b4a1191b5b4b2a85025b04c561d94f4554c89fb4
[binutils-gdb.git] / gdbserver / linux-low.cc
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2023 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "gdbsupport/agent.h"
23 #include "tdesc.h"
24 #include "gdbsupport/event-loop.h"
25 #include "gdbsupport/event-pipe.h"
26 #include "gdbsupport/rsp-low.h"
27 #include "gdbsupport/signals-state-save-restore.h"
28 #include "nat/linux-nat.h"
29 #include "nat/linux-waitpid.h"
30 #include "gdbsupport/gdb_wait.h"
31 #include "nat/gdb_ptrace.h"
32 #include "nat/linux-ptrace.h"
33 #include "nat/linux-procfs.h"
34 #include "nat/linux-personality.h"
35 #include <signal.h>
36 #include <sys/ioctl.h>
37 #include <fcntl.h>
38 #include <unistd.h>
39 #include <sys/syscall.h>
40 #include <sched.h>
41 #include <ctype.h>
42 #include <pwd.h>
43 #include <sys/types.h>
44 #include <dirent.h>
45 #include <sys/stat.h>
46 #include <sys/vfs.h>
47 #include <sys/uio.h>
48 #include "gdbsupport/filestuff.h"
49 #include "tracepoint.h"
50 #include <inttypes.h>
51 #include "gdbsupport/common-inferior.h"
52 #include "nat/fork-inferior.h"
53 #include "gdbsupport/environ.h"
54 #include "gdbsupport/gdb-sigmask.h"
55 #include "gdbsupport/scoped_restore.h"
56 #ifndef ELFMAG0
57 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
58 then ELFMAG0 will have been defined. If it didn't get included by
59 gdb_proc_service.h then including it will likely introduce a duplicate
60 definition of elf_fpregset_t. */
61 #include <elf.h>
62 #endif
63 #include "nat/linux-namespaces.h"
64
65 #ifndef O_LARGEFILE
66 #define O_LARGEFILE 0
67 #endif
68
69 #ifndef AT_HWCAP2
70 #define AT_HWCAP2 26
71 #endif
72
73 /* Some targets did not define these ptrace constants from the start,
74 so gdbserver defines them locally here. In the future, these may
75 be removed after they are added to asm/ptrace.h. */
76 #if !(defined(PT_TEXT_ADDR) \
77 || defined(PT_DATA_ADDR) \
78 || defined(PT_TEXT_END_ADDR))
79 #if defined(__mcoldfire__)
80 /* These are still undefined in 3.10 kernels. */
81 #define PT_TEXT_ADDR 49*4
82 #define PT_DATA_ADDR 50*4
83 #define PT_TEXT_END_ADDR 51*4
84 /* These are still undefined in 3.10 kernels. */
85 #elif defined(__TMS320C6X__)
86 #define PT_TEXT_ADDR (0x10000*4)
87 #define PT_DATA_ADDR (0x10004*4)
88 #define PT_TEXT_END_ADDR (0x10008*4)
89 #endif
90 #endif
91
92 #if (defined(__UCLIBC__) \
93 && defined(HAS_NOMMU) \
94 && defined(PT_TEXT_ADDR) \
95 && defined(PT_DATA_ADDR) \
96 && defined(PT_TEXT_END_ADDR))
97 #define SUPPORTS_READ_OFFSETS
98 #endif
99
100 #ifdef HAVE_LINUX_BTRACE
101 # include "nat/linux-btrace.h"
102 # include "gdbsupport/btrace-common.h"
103 #endif
104
105 #ifndef HAVE_ELF32_AUXV_T
106 /* Copied from glibc's elf.h. */
107 typedef struct
108 {
109 uint32_t a_type; /* Entry type */
110 union
111 {
112 uint32_t a_val; /* Integer value */
113 /* We use to have pointer elements added here. We cannot do that,
114 though, since it does not work when using 32-bit definitions
115 on 64-bit platforms and vice versa. */
116 } a_un;
117 } Elf32_auxv_t;
118 #endif
119
120 #ifndef HAVE_ELF64_AUXV_T
121 /* Copied from glibc's elf.h. */
122 typedef struct
123 {
124 uint64_t a_type; /* Entry type */
125 union
126 {
127 uint64_t a_val; /* Integer value */
128 /* We use to have pointer elements added here. We cannot do that,
129 though, since it does not work when using 32-bit definitions
130 on 64-bit platforms and vice versa. */
131 } a_un;
132 } Elf64_auxv_t;
133 #endif
134
135 /* Does the current host support PTRACE_GETREGSET? */
136 int have_ptrace_getregset = -1;
137
138 /* Return TRUE if THREAD is the leader thread of the process. */
139
140 static bool
141 is_leader (thread_info *thread)
142 {
143 ptid_t ptid = ptid_of (thread);
144 return ptid.pid () == ptid.lwp ();
145 }
146
147 /* Return true if we should report thread exit events to GDB, for
148 THR. */
149
150 static bool
151 report_exit_events_for (thread_info *thr)
152 {
153 client_state &cs = get_client_state ();
154
155 return (cs.report_thread_events
156 || (thr->thread_options & GDB_THREAD_OPTION_EXIT) != 0);
157 }
158
159 /* LWP accessors. */
160
161 /* See nat/linux-nat.h. */
162
163 ptid_t
164 ptid_of_lwp (struct lwp_info *lwp)
165 {
166 return ptid_of (get_lwp_thread (lwp));
167 }
168
169 /* See nat/linux-nat.h. */
170
171 void
172 lwp_set_arch_private_info (struct lwp_info *lwp,
173 struct arch_lwp_info *info)
174 {
175 lwp->arch_private = info;
176 }
177
178 /* See nat/linux-nat.h. */
179
180 struct arch_lwp_info *
181 lwp_arch_private_info (struct lwp_info *lwp)
182 {
183 return lwp->arch_private;
184 }
185
186 /* See nat/linux-nat.h. */
187
188 int
189 lwp_is_stopped (struct lwp_info *lwp)
190 {
191 return lwp->stopped;
192 }
193
194 /* See nat/linux-nat.h. */
195
196 enum target_stop_reason
197 lwp_stop_reason (struct lwp_info *lwp)
198 {
199 return lwp->stop_reason;
200 }
201
202 /* See nat/linux-nat.h. */
203
204 int
205 lwp_is_stepping (struct lwp_info *lwp)
206 {
207 return lwp->stepping;
208 }
209
210 /* A list of all unknown processes which receive stop signals. Some
211 other process will presumably claim each of these as forked
212 children momentarily. */
213
214 struct simple_pid_list
215 {
216 /* The process ID. */
217 int pid;
218
219 /* The status as reported by waitpid. */
220 int status;
221
222 /* Next in chain. */
223 struct simple_pid_list *next;
224 };
225 static struct simple_pid_list *stopped_pids;
226
227 /* Trivial list manipulation functions to keep track of a list of new
228 stopped processes. */
229
230 static void
231 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
232 {
233 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
234
235 new_pid->pid = pid;
236 new_pid->status = status;
237 new_pid->next = *listp;
238 *listp = new_pid;
239 }
240
241 static int
242 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
243 {
244 struct simple_pid_list **p;
245
246 for (p = listp; *p != NULL; p = &(*p)->next)
247 if ((*p)->pid == pid)
248 {
249 struct simple_pid_list *next = (*p)->next;
250
251 *statusp = (*p)->status;
252 xfree (*p);
253 *p = next;
254 return 1;
255 }
256 return 0;
257 }
258
259 enum stopping_threads_kind
260 {
261 /* Not stopping threads presently. */
262 NOT_STOPPING_THREADS,
263
264 /* Stopping threads. */
265 STOPPING_THREADS,
266
267 /* Stopping and suspending threads. */
268 STOPPING_AND_SUSPENDING_THREADS
269 };
270
271 /* This is set while stop_all_lwps is in effect. */
272 static stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
273
274 /* FIXME make into a target method? */
275 int using_threads = 1;
276
277 /* True if we're presently stabilizing threads (moving them out of
278 jump pads). */
279 static int stabilizing_threads;
280
281 static void unsuspend_all_lwps (struct lwp_info *except);
282 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
283 static int lwp_is_marked_dead (struct lwp_info *lwp);
284 static int kill_lwp (unsigned long lwpid, int signo);
285 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
286 static int linux_low_ptrace_options (int attached);
287 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
288
289 /* When the event-loop is doing a step-over, this points at the thread
290 being stepped. */
291 static ptid_t step_over_bkpt;
292
293 bool
294 linux_process_target::low_supports_breakpoints ()
295 {
296 return false;
297 }
298
299 CORE_ADDR
300 linux_process_target::low_get_pc (regcache *regcache)
301 {
302 return 0;
303 }
304
305 void
306 linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
307 {
308 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
309 }
310
311 std::vector<CORE_ADDR>
312 linux_process_target::low_get_next_pcs (regcache *regcache)
313 {
314 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
315 "implemented");
316 }
317
318 int
319 linux_process_target::low_decr_pc_after_break ()
320 {
321 return 0;
322 }
323
324 /* True if LWP is stopped in its stepping range. */
325
326 static int
327 lwp_in_step_range (struct lwp_info *lwp)
328 {
329 CORE_ADDR pc = lwp->stop_pc;
330
331 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
332 }
333
334 /* The event pipe registered as a waitable file in the event loop. */
335 static event_pipe linux_event_pipe;
336
337 /* True if we're currently in async mode. */
338 #define target_is_async_p() (linux_event_pipe.is_open ())
339
340 static void send_sigstop (struct lwp_info *lwp);
341
342 /* Return non-zero if HEADER is a 64-bit ELF file. */
343
344 static int
345 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
346 {
347 if (header->e_ident[EI_MAG0] == ELFMAG0
348 && header->e_ident[EI_MAG1] == ELFMAG1
349 && header->e_ident[EI_MAG2] == ELFMAG2
350 && header->e_ident[EI_MAG3] == ELFMAG3)
351 {
352 *machine = header->e_machine;
353 return header->e_ident[EI_CLASS] == ELFCLASS64;
354
355 }
356 *machine = EM_NONE;
357 return -1;
358 }
359
360 /* Return non-zero if FILE is a 64-bit ELF file,
361 zero if the file is not a 64-bit ELF file,
362 and -1 if the file is not accessible or doesn't exist. */
363
364 static int
365 elf_64_file_p (const char *file, unsigned int *machine)
366 {
367 Elf64_Ehdr header;
368 int fd;
369
370 fd = open (file, O_RDONLY);
371 if (fd < 0)
372 return -1;
373
374 if (read (fd, &header, sizeof (header)) != sizeof (header))
375 {
376 close (fd);
377 return 0;
378 }
379 close (fd);
380
381 return elf_64_header_p (&header, machine);
382 }
383
384 /* Accepts an integer PID; Returns true if the executable PID is
385 running is a 64-bit ELF file.. */
386
387 int
388 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
389 {
390 char file[PATH_MAX];
391
392 sprintf (file, "/proc/%d/exe", pid);
393 return elf_64_file_p (file, machine);
394 }
395
396 void
397 linux_process_target::delete_lwp (lwp_info *lwp)
398 {
399 struct thread_info *thr = get_lwp_thread (lwp);
400
401 threads_debug_printf ("deleting %ld", lwpid_of (thr));
402
403 remove_thread (thr);
404
405 low_delete_thread (lwp->arch_private);
406
407 delete lwp;
408 }
409
410 void
411 linux_process_target::low_delete_thread (arch_lwp_info *info)
412 {
413 /* Default implementation should be overridden if architecture-specific
414 info is being used. */
415 gdb_assert (info == nullptr);
416 }
417
418 /* Open the /proc/PID/mem file for PROC. */
419
420 static void
421 open_proc_mem_file (process_info *proc)
422 {
423 gdb_assert (proc->priv->mem_fd == -1);
424
425 char filename[64];
426 xsnprintf (filename, sizeof filename, "/proc/%d/mem", proc->pid);
427
428 proc->priv->mem_fd
429 = gdb_open_cloexec (filename, O_RDWR | O_LARGEFILE, 0).release ();
430 }
431
432 process_info *
433 linux_process_target::add_linux_process_no_mem_file (int pid, int attached)
434 {
435 struct process_info *proc;
436
437 proc = add_process (pid, attached);
438 proc->priv = XCNEW (struct process_info_private);
439
440 proc->priv->arch_private = low_new_process ();
441 proc->priv->mem_fd = -1;
442
443 return proc;
444 }
445
446
447 process_info *
448 linux_process_target::add_linux_process (int pid, int attached)
449 {
450 process_info *proc = add_linux_process_no_mem_file (pid, attached);
451 open_proc_mem_file (proc);
452 return proc;
453 }
454
455 void
456 linux_process_target::remove_linux_process (process_info *proc)
457 {
458 if (proc->priv->mem_fd >= 0)
459 close (proc->priv->mem_fd);
460
461 this->low_delete_process (proc->priv->arch_private);
462
463 xfree (proc->priv);
464 proc->priv = nullptr;
465
466 remove_process (proc);
467 }
468
469 arch_process_info *
470 linux_process_target::low_new_process ()
471 {
472 return nullptr;
473 }
474
475 void
476 linux_process_target::low_delete_process (arch_process_info *info)
477 {
478 /* Default implementation must be overridden if architecture-specific
479 info exists. */
480 gdb_assert (info == nullptr);
481 }
482
483 void
484 linux_process_target::low_new_fork (process_info *parent, process_info *child)
485 {
486 /* Nop. */
487 }
488
489 void
490 linux_process_target::arch_setup_thread (thread_info *thread)
491 {
492 scoped_restore_current_thread restore_thread;
493 switch_to_thread (thread);
494
495 low_arch_setup ();
496 }
497
498 int
499 linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
500 int wstat)
501 {
502 client_state &cs = get_client_state ();
503 struct lwp_info *event_lwp = *orig_event_lwp;
504 int event = linux_ptrace_get_extended_event (wstat);
505 struct thread_info *event_thr = get_lwp_thread (event_lwp);
506
507 gdb_assert (event_lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
508
509 /* All extended events we currently use are mid-syscall. Only
510 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
511 you have to be using PTRACE_SEIZE to get that. */
512 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
513
514 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
515 || (event == PTRACE_EVENT_CLONE))
516 {
517 unsigned long new_pid;
518 int ret, status;
519
520 /* Get the pid of the new lwp. */
521 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
522 &new_pid);
523
524 /* If we haven't already seen the new PID stop, wait for it now. */
525 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
526 {
527 /* The new child has a pending SIGSTOP. We can't affect it until it
528 hits the SIGSTOP, but we're already attached. */
529
530 ret = my_waitpid (new_pid, &status, __WALL);
531
532 if (ret == -1)
533 perror_with_name ("waiting for new child");
534 else if (ret != new_pid)
535 warning ("wait returned unexpected PID %d", ret);
536 else if (!WIFSTOPPED (status))
537 warning ("wait returned unexpected status 0x%x", status);
538 }
539
540 if (debug_threads)
541 {
542 debug_printf ("HEW: Got %s event from LWP %ld, new child is %ld\n",
543 (event == PTRACE_EVENT_FORK ? "fork"
544 : event == PTRACE_EVENT_VFORK ? "vfork"
545 : event == PTRACE_EVENT_CLONE ? "clone"
546 : "???"),
547 ptid_of (event_thr).lwp (),
548 new_pid);
549 }
550
551 ptid_t child_ptid = (event != PTRACE_EVENT_CLONE
552 ? ptid_t (new_pid, new_pid)
553 : ptid_t (ptid_of (event_thr).pid (), new_pid));
554
555 lwp_info *child_lwp = add_lwp (child_ptid);
556 gdb_assert (child_lwp != NULL);
557 child_lwp->stopped = 1;
558 if (event != PTRACE_EVENT_CLONE)
559 child_lwp->must_set_ptrace_flags = 1;
560 child_lwp->status_pending_p = 0;
561
562 thread_info *child_thr = get_lwp_thread (child_lwp);
563
564 /* If we're suspending all threads, leave this one suspended
565 too. If the fork/clone parent is stepping over a breakpoint,
566 all other threads have been suspended already. Leave the
567 child suspended too. */
568 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
569 || event_lwp->bp_reinsert != 0)
570 {
571 threads_debug_printf ("leaving child suspended");
572 child_lwp->suspended = 1;
573 }
574
575 if (event_lwp->bp_reinsert != 0
576 && supports_software_single_step ()
577 && event == PTRACE_EVENT_VFORK)
578 {
579 /* If we leave single-step breakpoints there, child will
580 hit it, so uninsert single-step breakpoints from parent
581 (and child). Once vfork child is done, reinsert
582 them back to parent. */
583 uninsert_single_step_breakpoints (event_thr);
584 }
585
586 if (event != PTRACE_EVENT_CLONE)
587 {
588 /* Add the new process to the tables and clone the breakpoint
589 lists of the parent. We need to do this even if the new process
590 will be detached, since we will need the process object and the
591 breakpoints to remove any breakpoints from memory when we
592 detach, and the client side will access registers. */
593 process_info *child_proc = add_linux_process (new_pid, 0);
594 gdb_assert (child_proc != NULL);
595
596 process_info *parent_proc = get_thread_process (event_thr);
597 child_proc->attached = parent_proc->attached;
598
599 clone_all_breakpoints (child_thr, event_thr);
600
601 target_desc_up tdesc = allocate_target_description ();
602 copy_target_description (tdesc.get (), parent_proc->tdesc);
603 child_proc->tdesc = tdesc.release ();
604
605 /* Clone arch-specific process data. */
606 low_new_fork (parent_proc, child_proc);
607 }
608
609 /* Save fork/clone info in the parent thread. */
610 if (event == PTRACE_EVENT_FORK)
611 event_lwp->waitstatus.set_forked (child_ptid);
612 else if (event == PTRACE_EVENT_VFORK)
613 event_lwp->waitstatus.set_vforked (child_ptid);
614 else if (event == PTRACE_EVENT_CLONE
615 && (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) != 0)
616 event_lwp->waitstatus.set_thread_cloned (child_ptid);
617
618 if (event != PTRACE_EVENT_CLONE
619 || (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) != 0)
620 {
621 /* The status_pending field contains bits denoting the
622 extended event, so when the pending event is handled, the
623 handler will look at lwp->waitstatus. */
624 event_lwp->status_pending_p = 1;
625 event_lwp->status_pending = wstat;
626
627 /* Link the threads until the parent's event is passed on to
628 GDB. */
629 event_lwp->relative = child_lwp;
630 child_lwp->relative = event_lwp;
631 }
632
633 /* If the parent thread is doing step-over with single-step
634 breakpoints, the list of single-step breakpoints are cloned
635 from the parent's. Remove them from the child process.
636 In case of vfork, we'll reinsert them back once vforked
637 child is done. */
638 if (event_lwp->bp_reinsert != 0
639 && supports_software_single_step ())
640 {
641 /* The child process is forked and stopped, so it is safe
642 to access its memory without stopping all other threads
643 from other processes. */
644 delete_single_step_breakpoints (child_thr);
645
646 gdb_assert (has_single_step_breakpoints (event_thr));
647 gdb_assert (!has_single_step_breakpoints (child_thr));
648 }
649
650 /* Normally we will get the pending SIGSTOP. But in some cases
651 we might get another signal delivered to the group first.
652 If we do get another signal, be sure not to lose it. */
653 if (WSTOPSIG (status) != SIGSTOP)
654 {
655 child_lwp->stop_expected = 1;
656 child_lwp->status_pending_p = 1;
657 child_lwp->status_pending = status;
658 }
659 else if (event == PTRACE_EVENT_CLONE && cs.report_thread_events)
660 {
661 child_lwp->waitstatus.set_thread_created ();
662 child_lwp->status_pending_p = 1;
663 child_lwp->status_pending = status;
664 }
665
666 if (event == PTRACE_EVENT_CLONE)
667 {
668 #ifdef USE_THREAD_DB
669 thread_db_notice_clone (event_thr, child_ptid);
670 #endif
671 }
672
673 if (event == PTRACE_EVENT_CLONE
674 && (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) == 0)
675 {
676 threads_debug_printf
677 ("not reporting clone event from LWP %ld, new child is %ld\n",
678 ptid_of (event_thr).lwp (),
679 new_pid);
680 return 1;
681 }
682
683 /* Leave the child stopped until GDB processes the parent
684 event. */
685 child_thr->last_resume_kind = resume_stop;
686 child_thr->last_status.set_stopped (GDB_SIGNAL_0);
687
688 /* Report the event. */
689 threads_debug_printf
690 ("reporting %s event from LWP %ld, new child is %ld\n",
691 (event == PTRACE_EVENT_FORK ? "fork"
692 : event == PTRACE_EVENT_VFORK ? "vfork"
693 : event == PTRACE_EVENT_CLONE ? "clone"
694 : "???"),
695 ptid_of (event_thr).lwp (),
696 new_pid);
697 return 0;
698 }
699 else if (event == PTRACE_EVENT_VFORK_DONE)
700 {
701 event_lwp->waitstatus.set_vfork_done ();
702
703 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
704 {
705 reinsert_single_step_breakpoints (event_thr);
706
707 gdb_assert (has_single_step_breakpoints (event_thr));
708 }
709
710 /* Report the event. */
711 return 0;
712 }
713 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
714 {
715 struct process_info *proc;
716 std::vector<int> syscalls_to_catch;
717 ptid_t event_ptid;
718 pid_t event_pid;
719
720 threads_debug_printf ("Got exec event from LWP %ld",
721 lwpid_of (event_thr));
722
723 /* Get the event ptid. */
724 event_ptid = ptid_of (event_thr);
725 event_pid = event_ptid.pid ();
726
727 /* Save the syscall list from the execing process. */
728 proc = get_thread_process (event_thr);
729 syscalls_to_catch = std::move (proc->syscalls_to_catch);
730
731 /* Delete the execing process and all its threads. */
732 mourn (proc);
733 switch_to_thread (nullptr);
734
735 /* Create a new process/lwp/thread. */
736 proc = add_linux_process (event_pid, 0);
737 event_lwp = add_lwp (event_ptid);
738 event_thr = get_lwp_thread (event_lwp);
739 gdb_assert (current_thread == event_thr);
740 arch_setup_thread (event_thr);
741
742 /* Set the event status. */
743 event_lwp->waitstatus.set_execd
744 (make_unique_xstrdup
745 (linux_proc_pid_to_exec_file (lwpid_of (event_thr))));
746
747 /* Mark the exec status as pending. */
748 event_lwp->stopped = 1;
749 event_lwp->status_pending_p = 1;
750 event_lwp->status_pending = wstat;
751 event_thr->last_resume_kind = resume_continue;
752 event_thr->last_status.set_ignore ();
753
754 /* Update syscall state in the new lwp, effectively mid-syscall too. */
755 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
756
757 /* Restore the list to catch. Don't rely on the client, which is free
758 to avoid sending a new list when the architecture doesn't change.
759 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
760 proc->syscalls_to_catch = std::move (syscalls_to_catch);
761
762 /* Report the event. */
763 *orig_event_lwp = event_lwp;
764 return 0;
765 }
766
767 internal_error (_("unknown ptrace event %d"), event);
768 }
769
770 CORE_ADDR
771 linux_process_target::get_pc (lwp_info *lwp)
772 {
773 process_info *proc = get_thread_process (get_lwp_thread (lwp));
774 gdb_assert (!proc->starting_up);
775
776 if (!low_supports_breakpoints ())
777 return 0;
778
779 scoped_restore_current_thread restore_thread;
780 switch_to_thread (get_lwp_thread (lwp));
781
782 struct regcache *regcache = get_thread_regcache (current_thread, 1);
783 CORE_ADDR pc = low_get_pc (regcache);
784
785 threads_debug_printf ("pc is 0x%lx", (long) pc);
786
787 return pc;
788 }
789
790 void
791 linux_process_target::get_syscall_trapinfo (lwp_info *lwp, int *sysno)
792 {
793 struct regcache *regcache;
794
795 scoped_restore_current_thread restore_thread;
796 switch_to_thread (get_lwp_thread (lwp));
797
798 regcache = get_thread_regcache (current_thread, 1);
799 low_get_syscall_trapinfo (regcache, sysno);
800
801 threads_debug_printf ("get_syscall_trapinfo sysno %d", *sysno);
802 }
803
804 void
805 linux_process_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
806 {
807 /* By default, report an unknown system call number. */
808 *sysno = UNKNOWN_SYSCALL;
809 }
810
811 bool
812 linux_process_target::save_stop_reason (lwp_info *lwp)
813 {
814 CORE_ADDR pc;
815 CORE_ADDR sw_breakpoint_pc;
816 #if USE_SIGTRAP_SIGINFO
817 siginfo_t siginfo;
818 #endif
819
820 if (!low_supports_breakpoints ())
821 return false;
822
823 process_info *proc = get_thread_process (get_lwp_thread (lwp));
824 if (proc->starting_up)
825 {
826 /* Claim we have the stop PC so that the caller doesn't try to
827 fetch it itself. */
828 return true;
829 }
830
831 pc = get_pc (lwp);
832 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
833
834 /* breakpoint_at reads from the current thread. */
835 scoped_restore_current_thread restore_thread;
836 switch_to_thread (get_lwp_thread (lwp));
837
838 #if USE_SIGTRAP_SIGINFO
839 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
840 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
841 {
842 if (siginfo.si_signo == SIGTRAP)
843 {
844 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
845 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
846 {
847 /* The si_code is ambiguous on this arch -- check debug
848 registers. */
849 if (!check_stopped_by_watchpoint (lwp))
850 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
851 }
852 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
853 {
854 /* If we determine the LWP stopped for a SW breakpoint,
855 trust it. Particularly don't check watchpoint
856 registers, because at least on s390, we'd find
857 stopped-by-watchpoint as long as there's a watchpoint
858 set. */
859 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
860 }
861 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
862 {
863 /* This can indicate either a hardware breakpoint or
864 hardware watchpoint. Check debug registers. */
865 if (!check_stopped_by_watchpoint (lwp))
866 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
867 }
868 else if (siginfo.si_code == TRAP_TRACE)
869 {
870 /* We may have single stepped an instruction that
871 triggered a watchpoint. In that case, on some
872 architectures (such as x86), instead of TRAP_HWBKPT,
873 si_code indicates TRAP_TRACE, and we need to check
874 the debug registers separately. */
875 if (!check_stopped_by_watchpoint (lwp))
876 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
877 }
878 }
879 }
880 #else
881 /* We may have just stepped a breakpoint instruction. E.g., in
882 non-stop mode, GDB first tells the thread A to step a range, and
883 then the user inserts a breakpoint inside the range. In that
884 case we need to report the breakpoint PC. */
885 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
886 && low_breakpoint_at (sw_breakpoint_pc))
887 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
888
889 if (hardware_breakpoint_inserted_here (pc))
890 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
891
892 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
893 check_stopped_by_watchpoint (lwp);
894 #endif
895
896 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
897 {
898 threads_debug_printf
899 ("%s stopped by software breakpoint",
900 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
901
902 /* Back up the PC if necessary. */
903 if (pc != sw_breakpoint_pc)
904 {
905 struct regcache *regcache
906 = get_thread_regcache (current_thread, 1);
907 low_set_pc (regcache, sw_breakpoint_pc);
908 }
909
910 /* Update this so we record the correct stop PC below. */
911 pc = sw_breakpoint_pc;
912 }
913 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
914 threads_debug_printf
915 ("%s stopped by hardware breakpoint",
916 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
917 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
918 threads_debug_printf
919 ("%s stopped by hardware watchpoint",
920 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
921 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
922 threads_debug_printf
923 ("%s stopped by trace",
924 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
925
926 lwp->stop_pc = pc;
927 return true;
928 }
929
930 lwp_info *
931 linux_process_target::add_lwp (ptid_t ptid)
932 {
933 lwp_info *lwp = new lwp_info;
934
935 lwp->thread = add_thread (ptid, lwp);
936
937 low_new_thread (lwp);
938
939 return lwp;
940 }
941
942 void
943 linux_process_target::low_new_thread (lwp_info *info)
944 {
945 /* Nop. */
946 }
947
948 /* Callback to be used when calling fork_inferior, responsible for
949 actually initiating the tracing of the inferior. */
950
951 static void
952 linux_ptrace_fun ()
953 {
954 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
955 (PTRACE_TYPE_ARG4) 0) < 0)
956 trace_start_error_with_name ("ptrace");
957
958 if (setpgid (0, 0) < 0)
959 trace_start_error_with_name ("setpgid");
960
961 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
962 stdout to stderr so that inferior i/o doesn't corrupt the connection.
963 Also, redirect stdin to /dev/null. */
964 if (remote_connection_is_stdio ())
965 {
966 if (close (0) < 0)
967 trace_start_error_with_name ("close");
968 if (open ("/dev/null", O_RDONLY) < 0)
969 trace_start_error_with_name ("open");
970 if (dup2 (2, 1) < 0)
971 trace_start_error_with_name ("dup2");
972 if (write (2, "stdin/stdout redirected\n",
973 sizeof ("stdin/stdout redirected\n") - 1) < 0)
974 {
975 /* Errors ignored. */;
976 }
977 }
978 }
979
980 /* Start an inferior process and returns its pid.
981 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
982 are its arguments. */
983
984 int
985 linux_process_target::create_inferior (const char *program,
986 const std::vector<char *> &program_args)
987 {
988 client_state &cs = get_client_state ();
989 struct lwp_info *new_lwp;
990 int pid;
991 ptid_t ptid;
992
993 {
994 maybe_disable_address_space_randomization restore_personality
995 (cs.disable_randomization);
996 std::string str_program_args = construct_inferior_arguments (program_args);
997
998 pid = fork_inferior (program,
999 str_program_args.c_str (),
1000 get_environ ()->envp (), linux_ptrace_fun,
1001 NULL, NULL, NULL, NULL);
1002 }
1003
1004 /* When spawning a new process, we can't open the mem file yet. We
1005 still have to nurse the process through the shell, and that execs
1006 a couple times. The address space a /proc/PID/mem file is
1007 accessing is destroyed on exec. */
1008 process_info *proc = add_linux_process_no_mem_file (pid, 0);
1009
1010 ptid = ptid_t (pid, pid);
1011 new_lwp = add_lwp (ptid);
1012 new_lwp->must_set_ptrace_flags = 1;
1013
1014 post_fork_inferior (pid, program);
1015
1016 /* PROC is now past the shell running the program we want, so we can
1017 open the /proc/PID/mem file. */
1018 open_proc_mem_file (proc);
1019
1020 return pid;
1021 }
1022
1023 /* Implement the post_create_inferior target_ops method. */
1024
1025 void
1026 linux_process_target::post_create_inferior ()
1027 {
1028 struct lwp_info *lwp = get_thread_lwp (current_thread);
1029
1030 low_arch_setup ();
1031
1032 if (lwp->must_set_ptrace_flags)
1033 {
1034 struct process_info *proc = current_process ();
1035 int options = linux_low_ptrace_options (proc->attached);
1036
1037 linux_enable_event_reporting (lwpid_of (current_thread), options);
1038 lwp->must_set_ptrace_flags = 0;
1039 }
1040 }
1041
1042 int
1043 linux_process_target::attach_lwp (ptid_t ptid)
1044 {
1045 struct lwp_info *new_lwp;
1046 int lwpid = ptid.lwp ();
1047
1048 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1049 != 0)
1050 return errno;
1051
1052 new_lwp = add_lwp (ptid);
1053
1054 /* We need to wait for SIGSTOP before being able to make the next
1055 ptrace call on this LWP. */
1056 new_lwp->must_set_ptrace_flags = 1;
1057
1058 if (linux_proc_pid_is_stopped (lwpid))
1059 {
1060 threads_debug_printf ("Attached to a stopped process");
1061
1062 /* The process is definitely stopped. It is in a job control
1063 stop, unless the kernel predates the TASK_STOPPED /
1064 TASK_TRACED distinction, in which case it might be in a
1065 ptrace stop. Make sure it is in a ptrace stop; from there we
1066 can kill it, signal it, et cetera.
1067
1068 First make sure there is a pending SIGSTOP. Since we are
1069 already attached, the process can not transition from stopped
1070 to running without a PTRACE_CONT; so we know this signal will
1071 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1072 probably already in the queue (unless this kernel is old
1073 enough to use TASK_STOPPED for ptrace stops); but since
1074 SIGSTOP is not an RT signal, it can only be queued once. */
1075 kill_lwp (lwpid, SIGSTOP);
1076
1077 /* Finally, resume the stopped process. This will deliver the
1078 SIGSTOP (or a higher priority signal, just like normal
1079 PTRACE_ATTACH), which we'll catch later on. */
1080 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1081 }
1082
1083 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1084 brings it to a halt.
1085
1086 There are several cases to consider here:
1087
1088 1) gdbserver has already attached to the process and is being notified
1089 of a new thread that is being created.
1090 In this case we should ignore that SIGSTOP and resume the
1091 process. This is handled below by setting stop_expected = 1,
1092 and the fact that add_thread sets last_resume_kind ==
1093 resume_continue.
1094
1095 2) This is the first thread (the process thread), and we're attaching
1096 to it via attach_inferior.
1097 In this case we want the process thread to stop.
1098 This is handled by having linux_attach set last_resume_kind ==
1099 resume_stop after we return.
1100
1101 If the pid we are attaching to is also the tgid, we attach to and
1102 stop all the existing threads. Otherwise, we attach to pid and
1103 ignore any other threads in the same group as this pid.
1104
1105 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1106 existing threads.
1107 In this case we want the thread to stop.
1108 FIXME: This case is currently not properly handled.
1109 We should wait for the SIGSTOP but don't. Things work apparently
1110 because enough time passes between when we ptrace (ATTACH) and when
1111 gdb makes the next ptrace call on the thread.
1112
1113 On the other hand, if we are currently trying to stop all threads, we
1114 should treat the new thread as if we had sent it a SIGSTOP. This works
1115 because we are guaranteed that the add_lwp call above added us to the
1116 end of the list, and so the new thread has not yet reached
1117 wait_for_sigstop (but will). */
1118 new_lwp->stop_expected = 1;
1119
1120 return 0;
1121 }
1122
1123 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1124 already attached. Returns true if a new LWP is found, false
1125 otherwise. */
1126
1127 static int
1128 attach_proc_task_lwp_callback (ptid_t ptid)
1129 {
1130 /* Is this a new thread? */
1131 if (find_thread_ptid (ptid) == NULL)
1132 {
1133 int lwpid = ptid.lwp ();
1134 int err;
1135
1136 threads_debug_printf ("Found new lwp %d", lwpid);
1137
1138 err = the_linux_target->attach_lwp (ptid);
1139
1140 /* Be quiet if we simply raced with the thread exiting. EPERM
1141 is returned if the thread's task still exists, and is marked
1142 as exited or zombie, as well as other conditions, so in that
1143 case, confirm the status in /proc/PID/status. */
1144 if (err == ESRCH
1145 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1146 threads_debug_printf
1147 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1148 lwpid, err, safe_strerror (err));
1149 else if (err != 0)
1150 {
1151 std::string reason
1152 = linux_ptrace_attach_fail_reason_string (ptid, err);
1153
1154 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1155 }
1156
1157 return 1;
1158 }
1159 return 0;
1160 }
1161
1162 static void async_file_mark (void);
1163
1164 /* Attach to PID. If PID is the tgid, attach to it and all
1165 of its threads. */
1166
1167 int
1168 linux_process_target::attach (unsigned long pid)
1169 {
1170 struct process_info *proc;
1171 struct thread_info *initial_thread;
1172 ptid_t ptid = ptid_t (pid, pid);
1173 int err;
1174
1175 /* Delay opening the /proc/PID/mem file until we've successfully
1176 attached. */
1177 proc = add_linux_process_no_mem_file (pid, 1);
1178
1179 /* Attach to PID. We will check for other threads
1180 soon. */
1181 err = attach_lwp (ptid);
1182 if (err != 0)
1183 {
1184 this->remove_linux_process (proc);
1185
1186 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1187 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1188 }
1189
1190 open_proc_mem_file (proc);
1191
1192 /* Don't ignore the initial SIGSTOP if we just attached to this
1193 process. It will be collected by wait shortly. */
1194 initial_thread = find_thread_ptid (ptid_t (pid, pid));
1195 gdb_assert (initial_thread != nullptr);
1196 initial_thread->last_resume_kind = resume_stop;
1197
1198 /* We must attach to every LWP. If /proc is mounted, use that to
1199 find them now. On the one hand, the inferior may be using raw
1200 clone instead of using pthreads. On the other hand, even if it
1201 is using pthreads, GDB may not be connected yet (thread_db needs
1202 to do symbol lookups, through qSymbol). Also, thread_db walks
1203 structures in the inferior's address space to find the list of
1204 threads/LWPs, and those structures may well be corrupted. Note
1205 that once thread_db is loaded, we'll still use it to list threads
1206 and associate pthread info with each LWP. */
1207 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1208
1209 /* GDB will shortly read the xml target description for this
1210 process, to figure out the process' architecture. But the target
1211 description is only filled in when the first process/thread in
1212 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1213 that now, otherwise, if GDB is fast enough, it could read the
1214 target description _before_ that initial stop. */
1215 if (non_stop)
1216 {
1217 struct lwp_info *lwp;
1218 int wstat, lwpid;
1219 ptid_t pid_ptid = ptid_t (pid);
1220
1221 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
1222 gdb_assert (lwpid > 0);
1223
1224 lwp = find_lwp_pid (ptid_t (lwpid));
1225 gdb_assert (lwp != nullptr);
1226
1227 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1228 {
1229 lwp->status_pending_p = 1;
1230 lwp->status_pending = wstat;
1231 }
1232
1233 initial_thread->last_resume_kind = resume_continue;
1234
1235 async_file_mark ();
1236
1237 gdb_assert (proc->tdesc != NULL);
1238 }
1239
1240 return 0;
1241 }
1242
1243 static int
1244 last_thread_of_process_p (int pid)
1245 {
1246 bool seen_one = false;
1247
1248 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1249 {
1250 if (!seen_one)
1251 {
1252 /* This is the first thread of this process we see. */
1253 seen_one = true;
1254 return false;
1255 }
1256 else
1257 {
1258 /* This is the second thread of this process we see. */
1259 return true;
1260 }
1261 });
1262
1263 return thread == NULL;
1264 }
1265
1266 /* Kill LWP. */
1267
1268 static void
1269 linux_kill_one_lwp (struct lwp_info *lwp)
1270 {
1271 struct thread_info *thr = get_lwp_thread (lwp);
1272 int pid = lwpid_of (thr);
1273
1274 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1275 there is no signal context, and ptrace(PTRACE_KILL) (or
1276 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1277 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1278 alternative is to kill with SIGKILL. We only need one SIGKILL
1279 per process, not one for each thread. But since we still support
1280 support debugging programs using raw clone without CLONE_THREAD,
1281 we send one for each thread. For years, we used PTRACE_KILL
1282 only, so we're being a bit paranoid about some old kernels where
1283 PTRACE_KILL might work better (dubious if there are any such, but
1284 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1285 second, and so we're fine everywhere. */
1286
1287 errno = 0;
1288 kill_lwp (pid, SIGKILL);
1289 if (debug_threads)
1290 {
1291 int save_errno = errno;
1292
1293 threads_debug_printf ("kill_lwp (SIGKILL) %s, 0, 0 (%s)",
1294 target_pid_to_str (ptid_of (thr)).c_str (),
1295 save_errno ? safe_strerror (save_errno) : "OK");
1296 }
1297
1298 errno = 0;
1299 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1300 if (debug_threads)
1301 {
1302 int save_errno = errno;
1303
1304 threads_debug_printf ("PTRACE_KILL %s, 0, 0 (%s)",
1305 target_pid_to_str (ptid_of (thr)).c_str (),
1306 save_errno ? safe_strerror (save_errno) : "OK");
1307 }
1308 }
1309
1310 /* Kill LWP and wait for it to die. */
1311
1312 static void
1313 kill_wait_lwp (struct lwp_info *lwp)
1314 {
1315 struct thread_info *thr = get_lwp_thread (lwp);
1316 int pid = ptid_of (thr).pid ();
1317 int lwpid = ptid_of (thr).lwp ();
1318 int wstat;
1319 int res;
1320
1321 threads_debug_printf ("killing lwp %d, for pid: %d", lwpid, pid);
1322
1323 do
1324 {
1325 linux_kill_one_lwp (lwp);
1326
1327 /* Make sure it died. Notes:
1328
1329 - The loop is most likely unnecessary.
1330
1331 - We don't use wait_for_event as that could delete lwps
1332 while we're iterating over them. We're not interested in
1333 any pending status at this point, only in making sure all
1334 wait status on the kernel side are collected until the
1335 process is reaped.
1336
1337 - We don't use __WALL here as the __WALL emulation relies on
1338 SIGCHLD, and killing a stopped process doesn't generate
1339 one, nor an exit status.
1340 */
1341 res = my_waitpid (lwpid, &wstat, 0);
1342 if (res == -1 && errno == ECHILD)
1343 res = my_waitpid (lwpid, &wstat, __WCLONE);
1344 } while (res > 0 && WIFSTOPPED (wstat));
1345
1346 /* Even if it was stopped, the child may have already disappeared.
1347 E.g., if it was killed by SIGKILL. */
1348 if (res < 0 && errno != ECHILD)
1349 perror_with_name ("kill_wait_lwp");
1350 }
1351
1352 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1353 except the leader. */
1354
1355 static void
1356 kill_one_lwp_callback (thread_info *thread, int pid)
1357 {
1358 struct lwp_info *lwp = get_thread_lwp (thread);
1359
1360 /* We avoid killing the first thread here, because of a Linux kernel (at
1361 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1362 the children get a chance to be reaped, it will remain a zombie
1363 forever. */
1364
1365 if (lwpid_of (thread) == pid)
1366 {
1367 threads_debug_printf ("is last of process %s",
1368 target_pid_to_str (thread->id).c_str ());
1369 return;
1370 }
1371
1372 kill_wait_lwp (lwp);
1373 }
1374
1375 int
1376 linux_process_target::kill (process_info *process)
1377 {
1378 int pid = process->pid;
1379
1380 /* If we're killing a running inferior, make sure it is stopped
1381 first, as PTRACE_KILL will not work otherwise. */
1382 stop_all_lwps (0, NULL);
1383
1384 for_each_thread (pid, [&] (thread_info *thread)
1385 {
1386 kill_one_lwp_callback (thread, pid);
1387 });
1388
1389 /* See the comment in linux_kill_one_lwp. We did not kill the first
1390 thread in the list, so do so now. */
1391 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1392
1393 if (lwp == NULL)
1394 threads_debug_printf ("cannot find lwp for pid: %d", pid);
1395 else
1396 kill_wait_lwp (lwp);
1397
1398 mourn (process);
1399
1400 /* Since we presently can only stop all lwps of all processes, we
1401 need to unstop lwps of other processes. */
1402 unstop_all_lwps (0, NULL);
1403 return 0;
1404 }
1405
1406 /* Get pending signal of THREAD, for detaching purposes. This is the
1407 signal the thread last stopped for, which we need to deliver to the
1408 thread when detaching, otherwise, it'd be suppressed/lost. */
1409
1410 static int
1411 get_detach_signal (struct thread_info *thread)
1412 {
1413 client_state &cs = get_client_state ();
1414 enum gdb_signal signo = GDB_SIGNAL_0;
1415 int status;
1416 struct lwp_info *lp = get_thread_lwp (thread);
1417
1418 if (lp->status_pending_p)
1419 status = lp->status_pending;
1420 else
1421 {
1422 /* If the thread had been suspended by gdbserver, and it stopped
1423 cleanly, then it'll have stopped with SIGSTOP. But we don't
1424 want to deliver that SIGSTOP. */
1425 if (thread->last_status.kind () != TARGET_WAITKIND_STOPPED
1426 || thread->last_status.sig () == GDB_SIGNAL_0)
1427 return 0;
1428
1429 /* Otherwise, we may need to deliver the signal we
1430 intercepted. */
1431 status = lp->last_status;
1432 }
1433
1434 if (!WIFSTOPPED (status))
1435 {
1436 threads_debug_printf ("lwp %s hasn't stopped: no pending signal",
1437 target_pid_to_str (ptid_of (thread)).c_str ());
1438 return 0;
1439 }
1440
1441 /* Extended wait statuses aren't real SIGTRAPs. */
1442 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1443 {
1444 threads_debug_printf ("lwp %s had stopped with extended "
1445 "status: no pending signal",
1446 target_pid_to_str (ptid_of (thread)).c_str ());
1447 return 0;
1448 }
1449
1450 signo = gdb_signal_from_host (WSTOPSIG (status));
1451
1452 if (cs.program_signals_p && !cs.program_signals[signo])
1453 {
1454 threads_debug_printf ("lwp %s had signal %s, but it is in nopass state",
1455 target_pid_to_str (ptid_of (thread)).c_str (),
1456 gdb_signal_to_string (signo));
1457 return 0;
1458 }
1459 else if (!cs.program_signals_p
1460 /* If we have no way to know which signals GDB does not
1461 want to have passed to the program, assume
1462 SIGTRAP/SIGINT, which is GDB's default. */
1463 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1464 {
1465 threads_debug_printf ("lwp %s had signal %s, "
1466 "but we don't know if we should pass it. "
1467 "Default to not.",
1468 target_pid_to_str (ptid_of (thread)).c_str (),
1469 gdb_signal_to_string (signo));
1470 return 0;
1471 }
1472 else
1473 {
1474 threads_debug_printf ("lwp %s has pending signal %s: delivering it",
1475 target_pid_to_str (ptid_of (thread)).c_str (),
1476 gdb_signal_to_string (signo));
1477
1478 return WSTOPSIG (status);
1479 }
1480 }
1481
1482 void
1483 linux_process_target::detach_one_lwp (lwp_info *lwp)
1484 {
1485 struct thread_info *thread = get_lwp_thread (lwp);
1486 int sig;
1487 int lwpid;
1488
1489 /* If there is a pending SIGSTOP, get rid of it. */
1490 if (lwp->stop_expected)
1491 {
1492 threads_debug_printf ("Sending SIGCONT to %s",
1493 target_pid_to_str (ptid_of (thread)).c_str ());
1494
1495 kill_lwp (lwpid_of (thread), SIGCONT);
1496 lwp->stop_expected = 0;
1497 }
1498
1499 /* Pass on any pending signal for this thread. */
1500 sig = get_detach_signal (thread);
1501
1502 /* Preparing to resume may try to write registers, and fail if the
1503 lwp is zombie. If that happens, ignore the error. We'll handle
1504 it below, when detach fails with ESRCH. */
1505 try
1506 {
1507 /* Flush any pending changes to the process's registers. */
1508 regcache_invalidate_thread (thread);
1509
1510 /* Finally, let it resume. */
1511 low_prepare_to_resume (lwp);
1512 }
1513 catch (const gdb_exception_error &ex)
1514 {
1515 if (!check_ptrace_stopped_lwp_gone (lwp))
1516 throw;
1517 }
1518
1519 lwpid = lwpid_of (thread);
1520 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1521 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1522 {
1523 int save_errno = errno;
1524
1525 /* We know the thread exists, so ESRCH must mean the lwp is
1526 zombie. This can happen if one of the already-detached
1527 threads exits the whole thread group. In that case we're
1528 still attached, and must reap the lwp. */
1529 if (save_errno == ESRCH)
1530 {
1531 int ret, status;
1532
1533 ret = my_waitpid (lwpid, &status, __WALL);
1534 if (ret == -1)
1535 {
1536 warning (_("Couldn't reap LWP %d while detaching: %s"),
1537 lwpid, safe_strerror (errno));
1538 }
1539 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1540 {
1541 warning (_("Reaping LWP %d while detaching "
1542 "returned unexpected status 0x%x"),
1543 lwpid, status);
1544 }
1545 }
1546 else
1547 {
1548 error (_("Can't detach %s: %s"),
1549 target_pid_to_str (ptid_of (thread)).c_str (),
1550 safe_strerror (save_errno));
1551 }
1552 }
1553 else
1554 threads_debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)",
1555 target_pid_to_str (ptid_of (thread)).c_str (),
1556 strsignal (sig));
1557
1558 delete_lwp (lwp);
1559 }
1560
1561 int
1562 linux_process_target::detach (process_info *process)
1563 {
1564 struct lwp_info *main_lwp;
1565
1566 /* As there's a step over already in progress, let it finish first,
1567 otherwise nesting a stabilize_threads operation on top gets real
1568 messy. */
1569 complete_ongoing_step_over ();
1570
1571 /* Stop all threads before detaching. First, ptrace requires that
1572 the thread is stopped to successfully detach. Second, thread_db
1573 may need to uninstall thread event breakpoints from memory, which
1574 only works with a stopped process anyway. */
1575 stop_all_lwps (0, NULL);
1576
1577 #ifdef USE_THREAD_DB
1578 thread_db_detach (process);
1579 #endif
1580
1581 /* Stabilize threads (move out of jump pads). */
1582 target_stabilize_threads ();
1583
1584 /* Detach from the clone lwps first. If the thread group exits just
1585 while we're detaching, we must reap the clone lwps before we're
1586 able to reap the leader. */
1587 for_each_thread (process->pid, [this] (thread_info *thread)
1588 {
1589 /* We don't actually detach from the thread group leader just yet.
1590 If the thread group exits, we must reap the zombie clone lwps
1591 before we're able to reap the leader. */
1592 if (thread->id.pid () == thread->id.lwp ())
1593 return;
1594
1595 lwp_info *lwp = get_thread_lwp (thread);
1596 detach_one_lwp (lwp);
1597 });
1598
1599 main_lwp = find_lwp_pid (ptid_t (process->pid));
1600 gdb_assert (main_lwp != nullptr);
1601 detach_one_lwp (main_lwp);
1602
1603 mourn (process);
1604
1605 /* Since we presently can only stop all lwps of all processes, we
1606 need to unstop lwps of other processes. */
1607 unstop_all_lwps (0, NULL);
1608 return 0;
1609 }
1610
1611 /* Remove all LWPs that belong to process PROC from the lwp list. */
1612
1613 void
1614 linux_process_target::mourn (process_info *process)
1615 {
1616 #ifdef USE_THREAD_DB
1617 thread_db_mourn (process);
1618 #endif
1619
1620 for_each_thread (process->pid, [this] (thread_info *thread)
1621 {
1622 delete_lwp (get_thread_lwp (thread));
1623 });
1624
1625 this->remove_linux_process (process);
1626 }
1627
1628 void
1629 linux_process_target::join (int pid)
1630 {
1631 int status, ret;
1632
1633 do {
1634 ret = my_waitpid (pid, &status, 0);
1635 if (WIFEXITED (status) || WIFSIGNALED (status))
1636 break;
1637 } while (ret != -1 || errno != ECHILD);
1638 }
1639
1640 /* Return true if the given thread is still alive. */
1641
1642 bool
1643 linux_process_target::thread_alive (ptid_t ptid)
1644 {
1645 struct lwp_info *lwp = find_lwp_pid (ptid);
1646
1647 /* We assume we always know if a thread exits. If a whole process
1648 exited but we still haven't been able to report it to GDB, we'll
1649 hold on to the last lwp of the dead process. */
1650 if (lwp != NULL)
1651 return !lwp_is_marked_dead (lwp);
1652 else
1653 return 0;
1654 }
1655
1656 bool
1657 linux_process_target::thread_still_has_status_pending (thread_info *thread)
1658 {
1659 struct lwp_info *lp = get_thread_lwp (thread);
1660
1661 if (!lp->status_pending_p)
1662 return 0;
1663
1664 if (thread->last_resume_kind != resume_stop
1665 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1666 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1667 {
1668 CORE_ADDR pc;
1669 int discard = 0;
1670
1671 gdb_assert (lp->last_status != 0);
1672
1673 pc = get_pc (lp);
1674
1675 scoped_restore_current_thread restore_thread;
1676 switch_to_thread (thread);
1677
1678 if (pc != lp->stop_pc)
1679 {
1680 threads_debug_printf ("PC of %ld changed",
1681 lwpid_of (thread));
1682 discard = 1;
1683 }
1684
1685 #if !USE_SIGTRAP_SIGINFO
1686 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1687 && !low_breakpoint_at (pc))
1688 {
1689 threads_debug_printf ("previous SW breakpoint of %ld gone",
1690 lwpid_of (thread));
1691 discard = 1;
1692 }
1693 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1694 && !hardware_breakpoint_inserted_here (pc))
1695 {
1696 threads_debug_printf ("previous HW breakpoint of %ld gone",
1697 lwpid_of (thread));
1698 discard = 1;
1699 }
1700 #endif
1701
1702 if (discard)
1703 {
1704 threads_debug_printf ("discarding pending breakpoint status");
1705 lp->status_pending_p = 0;
1706 return 0;
1707 }
1708 }
1709
1710 return 1;
1711 }
1712
1713 /* Returns true if LWP is resumed from the client's perspective. */
1714
1715 static int
1716 lwp_resumed (struct lwp_info *lwp)
1717 {
1718 struct thread_info *thread = get_lwp_thread (lwp);
1719
1720 if (thread->last_resume_kind != resume_stop)
1721 return 1;
1722
1723 /* Did gdb send us a `vCont;t', but we haven't reported the
1724 corresponding stop to gdb yet? If so, the thread is still
1725 resumed/running from gdb's perspective. */
1726 if (thread->last_resume_kind == resume_stop
1727 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
1728 return 1;
1729
1730 return 0;
1731 }
1732
1733 bool
1734 linux_process_target::status_pending_p_callback (thread_info *thread,
1735 ptid_t ptid)
1736 {
1737 struct lwp_info *lp = get_thread_lwp (thread);
1738
1739 /* Check if we're only interested in events from a specific process
1740 or a specific LWP. */
1741 if (!thread->id.matches (ptid))
1742 return 0;
1743
1744 if (!lwp_resumed (lp))
1745 return 0;
1746
1747 if (lp->status_pending_p
1748 && !thread_still_has_status_pending (thread))
1749 {
1750 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1751 return 0;
1752 }
1753
1754 return lp->status_pending_p;
1755 }
1756
1757 struct lwp_info *
1758 find_lwp_pid (ptid_t ptid)
1759 {
1760 long lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1761 thread_info *thread = find_thread ([lwp] (thread_info *thr_arg)
1762 {
1763 return thr_arg->id.lwp () == lwp;
1764 });
1765
1766 if (thread == NULL)
1767 return NULL;
1768
1769 return get_thread_lwp (thread);
1770 }
1771
1772 /* Return the number of known LWPs in the tgid given by PID. */
1773
1774 static int
1775 num_lwps (int pid)
1776 {
1777 int count = 0;
1778
1779 for_each_thread (pid, [&] (thread_info *thread)
1780 {
1781 count++;
1782 });
1783
1784 return count;
1785 }
1786
1787 /* See nat/linux-nat.h. */
1788
1789 struct lwp_info *
1790 iterate_over_lwps (ptid_t filter,
1791 gdb::function_view<iterate_over_lwps_ftype> callback)
1792 {
1793 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1794 {
1795 lwp_info *lwp = get_thread_lwp (thr_arg);
1796
1797 return callback (lwp);
1798 });
1799
1800 if (thread == NULL)
1801 return NULL;
1802
1803 return get_thread_lwp (thread);
1804 }
1805
1806 void
1807 linux_process_target::check_zombie_leaders ()
1808 {
1809 for_each_process ([this] (process_info *proc)
1810 {
1811 pid_t leader_pid = pid_of (proc);
1812 lwp_info *leader_lp = find_lwp_pid (ptid_t (leader_pid));
1813
1814 threads_debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1815 "num_lwps=%d, zombie=%d",
1816 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1817 linux_proc_pid_is_zombie (leader_pid));
1818
1819 if (leader_lp != NULL && !leader_lp->stopped
1820 /* Check if there are other threads in the group, as we may
1821 have raced with the inferior simply exiting. Note this
1822 isn't a watertight check. If the inferior is
1823 multi-threaded and is exiting, it may be we see the
1824 leader as zombie before we reap all the non-leader
1825 threads. See comments below. */
1826 && !last_thread_of_process_p (leader_pid)
1827 && linux_proc_pid_is_zombie (leader_pid))
1828 {
1829 /* A zombie leader in a multi-threaded program can mean one
1830 of three things:
1831
1832 #1 - Only the leader exited, not the whole program, e.g.,
1833 with pthread_exit. Since we can't reap the leader's exit
1834 status until all other threads are gone and reaped too,
1835 we want to delete the zombie leader right away, as it
1836 can't be debugged, we can't read its registers, etc.
1837 This is the main reason we check for zombie leaders
1838 disappearing.
1839
1840 #2 - The whole thread-group/process exited (a group exit,
1841 via e.g. exit(3), and there is (or will be shortly) an
1842 exit reported for each thread in the process, and then
1843 finally an exit for the leader once the non-leaders are
1844 reaped.
1845
1846 #3 - There are 3 or more threads in the group, and a
1847 thread other than the leader exec'd. See comments on
1848 exec events at the top of the file.
1849
1850 Ideally we would never delete the leader for case #2.
1851 Instead, we want to collect the exit status of each
1852 non-leader thread, and then finally collect the exit
1853 status of the leader as normal and use its exit code as
1854 whole-process exit code. Unfortunately, there's no
1855 race-free way to distinguish cases #1 and #2. We can't
1856 assume the exit events for the non-leaders threads are
1857 already pending in the kernel, nor can we assume the
1858 non-leader threads are in zombie state already. Between
1859 the leader becoming zombie and the non-leaders exiting
1860 and becoming zombie themselves, there's a small time
1861 window, so such a check would be racy. Temporarily
1862 pausing all threads and checking to see if all threads
1863 exit or not before re-resuming them would work in the
1864 case that all threads are running right now, but it
1865 wouldn't work if some thread is currently already
1866 ptrace-stopped, e.g., due to scheduler-locking.
1867
1868 So what we do is we delete the leader anyhow, and then
1869 later on when we see its exit status, we re-add it back.
1870 We also make sure that we only report a whole-process
1871 exit when we see the leader exiting, as opposed to when
1872 the last LWP in the LWP list exits, which can be a
1873 non-leader if we deleted the leader here. */
1874 threads_debug_printf ("Thread group leader %d zombie "
1875 "(it exited, or another thread execd), "
1876 "deleting it.",
1877 leader_pid);
1878 delete_lwp (leader_lp);
1879 }
1880 });
1881 }
1882
1883 /* Callback for `find_thread'. Returns the first LWP that is not
1884 stopped. */
1885
1886 static bool
1887 not_stopped_callback (thread_info *thread, ptid_t filter)
1888 {
1889 if (!thread->id.matches (filter))
1890 return false;
1891
1892 lwp_info *lwp = get_thread_lwp (thread);
1893
1894 return !lwp->stopped;
1895 }
1896
1897 /* Increment LWP's suspend count. */
1898
1899 static void
1900 lwp_suspended_inc (struct lwp_info *lwp)
1901 {
1902 lwp->suspended++;
1903
1904 if (lwp->suspended > 4)
1905 threads_debug_printf
1906 ("LWP %ld has a suspiciously high suspend count, suspended=%d",
1907 lwpid_of (get_lwp_thread (lwp)), lwp->suspended);
1908 }
1909
1910 /* Decrement LWP's suspend count. */
1911
1912 static void
1913 lwp_suspended_decr (struct lwp_info *lwp)
1914 {
1915 lwp->suspended--;
1916
1917 if (lwp->suspended < 0)
1918 {
1919 struct thread_info *thread = get_lwp_thread (lwp);
1920
1921 internal_error ("unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1922 lwp->suspended);
1923 }
1924 }
1925
1926 /* This function should only be called if the LWP got a SIGTRAP.
1927
1928 Handle any tracepoint steps or hits. Return true if a tracepoint
1929 event was handled, 0 otherwise. */
1930
1931 static int
1932 handle_tracepoints (struct lwp_info *lwp)
1933 {
1934 struct thread_info *tinfo = get_lwp_thread (lwp);
1935 int tpoint_related_event = 0;
1936
1937 gdb_assert (lwp->suspended == 0);
1938
1939 /* If this tracepoint hit causes a tracing stop, we'll immediately
1940 uninsert tracepoints. To do this, we temporarily pause all
1941 threads, unpatch away, and then unpause threads. We need to make
1942 sure the unpausing doesn't resume LWP too. */
1943 lwp_suspended_inc (lwp);
1944
1945 /* And we need to be sure that any all-threads-stopping doesn't try
1946 to move threads out of the jump pads, as it could deadlock the
1947 inferior (LWP could be in the jump pad, maybe even holding the
1948 lock.) */
1949
1950 /* Do any necessary step collect actions. */
1951 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1952
1953 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1954
1955 /* See if we just hit a tracepoint and do its main collect
1956 actions. */
1957 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1958
1959 lwp_suspended_decr (lwp);
1960
1961 gdb_assert (lwp->suspended == 0);
1962 gdb_assert (!stabilizing_threads
1963 || (lwp->collecting_fast_tracepoint
1964 != fast_tpoint_collect_result::not_collecting));
1965
1966 if (tpoint_related_event)
1967 {
1968 threads_debug_printf ("got a tracepoint event");
1969 return 1;
1970 }
1971
1972 return 0;
1973 }
1974
1975 fast_tpoint_collect_result
1976 linux_process_target::linux_fast_tracepoint_collecting
1977 (lwp_info *lwp, fast_tpoint_collect_status *status)
1978 {
1979 CORE_ADDR thread_area;
1980 struct thread_info *thread = get_lwp_thread (lwp);
1981
1982 /* Get the thread area address. This is used to recognize which
1983 thread is which when tracing with the in-process agent library.
1984 We don't read anything from the address, and treat it as opaque;
1985 it's the address itself that we assume is unique per-thread. */
1986 if (low_get_thread_area (lwpid_of (thread), &thread_area) == -1)
1987 return fast_tpoint_collect_result::not_collecting;
1988
1989 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1990 }
1991
1992 int
1993 linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
1994 {
1995 return -1;
1996 }
1997
1998 bool
1999 linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
2000 {
2001 scoped_restore_current_thread restore_thread;
2002 switch_to_thread (get_lwp_thread (lwp));
2003
2004 if ((wstat == NULL
2005 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2006 && supports_fast_tracepoints ()
2007 && agent_loaded_p ())
2008 {
2009 struct fast_tpoint_collect_status status;
2010
2011 threads_debug_printf
2012 ("Checking whether LWP %ld needs to move out of the jump pad.",
2013 lwpid_of (current_thread));
2014
2015 fast_tpoint_collect_result r
2016 = linux_fast_tracepoint_collecting (lwp, &status);
2017
2018 if (wstat == NULL
2019 || (WSTOPSIG (*wstat) != SIGILL
2020 && WSTOPSIG (*wstat) != SIGFPE
2021 && WSTOPSIG (*wstat) != SIGSEGV
2022 && WSTOPSIG (*wstat) != SIGBUS))
2023 {
2024 lwp->collecting_fast_tracepoint = r;
2025
2026 if (r != fast_tpoint_collect_result::not_collecting)
2027 {
2028 if (r == fast_tpoint_collect_result::before_insn
2029 && lwp->exit_jump_pad_bkpt == NULL)
2030 {
2031 /* Haven't executed the original instruction yet.
2032 Set breakpoint there, and wait till it's hit,
2033 then single-step until exiting the jump pad. */
2034 lwp->exit_jump_pad_bkpt
2035 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2036 }
2037
2038 threads_debug_printf
2039 ("Checking whether LWP %ld needs to move out of the jump pad..."
2040 " it does", lwpid_of (current_thread));
2041
2042 return true;
2043 }
2044 }
2045 else
2046 {
2047 /* If we get a synchronous signal while collecting, *and*
2048 while executing the (relocated) original instruction,
2049 reset the PC to point at the tpoint address, before
2050 reporting to GDB. Otherwise, it's an IPA lib bug: just
2051 report the signal to GDB, and pray for the best. */
2052
2053 lwp->collecting_fast_tracepoint
2054 = fast_tpoint_collect_result::not_collecting;
2055
2056 if (r != fast_tpoint_collect_result::not_collecting
2057 && (status.adjusted_insn_addr <= lwp->stop_pc
2058 && lwp->stop_pc < status.adjusted_insn_addr_end))
2059 {
2060 siginfo_t info;
2061 struct regcache *regcache;
2062
2063 /* The si_addr on a few signals references the address
2064 of the faulting instruction. Adjust that as
2065 well. */
2066 if ((WSTOPSIG (*wstat) == SIGILL
2067 || WSTOPSIG (*wstat) == SIGFPE
2068 || WSTOPSIG (*wstat) == SIGBUS
2069 || WSTOPSIG (*wstat) == SIGSEGV)
2070 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2071 (PTRACE_TYPE_ARG3) 0, &info) == 0
2072 /* Final check just to make sure we don't clobber
2073 the siginfo of non-kernel-sent signals. */
2074 && (uintptr_t) info.si_addr == lwp->stop_pc)
2075 {
2076 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2077 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2078 (PTRACE_TYPE_ARG3) 0, &info);
2079 }
2080
2081 regcache = get_thread_regcache (current_thread, 1);
2082 low_set_pc (regcache, status.tpoint_addr);
2083 lwp->stop_pc = status.tpoint_addr;
2084
2085 /* Cancel any fast tracepoint lock this thread was
2086 holding. */
2087 force_unlock_trace_buffer ();
2088 }
2089
2090 if (lwp->exit_jump_pad_bkpt != NULL)
2091 {
2092 threads_debug_printf
2093 ("Cancelling fast exit-jump-pad: removing bkpt."
2094 "stopping all threads momentarily.");
2095
2096 stop_all_lwps (1, lwp);
2097
2098 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2099 lwp->exit_jump_pad_bkpt = NULL;
2100
2101 unstop_all_lwps (1, lwp);
2102
2103 gdb_assert (lwp->suspended >= 0);
2104 }
2105 }
2106 }
2107
2108 threads_debug_printf
2109 ("Checking whether LWP %ld needs to move out of the jump pad... no",
2110 lwpid_of (current_thread));
2111
2112 return false;
2113 }
2114
2115 /* Enqueue one signal in the "signals to report later when out of the
2116 jump pad" list. */
2117
2118 static void
2119 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2120 {
2121 struct thread_info *thread = get_lwp_thread (lwp);
2122
2123 threads_debug_printf ("Deferring signal %d for LWP %ld.",
2124 WSTOPSIG (*wstat), lwpid_of (thread));
2125
2126 if (debug_threads)
2127 {
2128 for (const auto &sig : lwp->pending_signals_to_report)
2129 threads_debug_printf (" Already queued %d", sig.signal);
2130
2131 threads_debug_printf (" (no more currently queued signals)");
2132 }
2133
2134 /* Don't enqueue non-RT signals if they are already in the deferred
2135 queue. (SIGSTOP being the easiest signal to see ending up here
2136 twice) */
2137 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2138 {
2139 for (const auto &sig : lwp->pending_signals_to_report)
2140 {
2141 if (sig.signal == WSTOPSIG (*wstat))
2142 {
2143 threads_debug_printf
2144 ("Not requeuing already queued non-RT signal %d for LWP %ld",
2145 sig.signal, lwpid_of (thread));
2146 return;
2147 }
2148 }
2149 }
2150
2151 lwp->pending_signals_to_report.emplace_back (WSTOPSIG (*wstat));
2152
2153 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2154 &lwp->pending_signals_to_report.back ().info);
2155 }
2156
2157 /* Dequeue one signal from the "signals to report later when out of
2158 the jump pad" list. */
2159
2160 static int
2161 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2162 {
2163 struct thread_info *thread = get_lwp_thread (lwp);
2164
2165 if (!lwp->pending_signals_to_report.empty ())
2166 {
2167 const pending_signal &p_sig = lwp->pending_signals_to_report.front ();
2168
2169 *wstat = W_STOPCODE (p_sig.signal);
2170 if (p_sig.info.si_signo != 0)
2171 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2172 &p_sig.info);
2173
2174 lwp->pending_signals_to_report.pop_front ();
2175
2176 threads_debug_printf ("Reporting deferred signal %d for LWP %ld.",
2177 WSTOPSIG (*wstat), lwpid_of (thread));
2178
2179 if (debug_threads)
2180 {
2181 for (const auto &sig : lwp->pending_signals_to_report)
2182 threads_debug_printf (" Still queued %d", sig.signal);
2183
2184 threads_debug_printf (" (no more queued signals)");
2185 }
2186
2187 return 1;
2188 }
2189
2190 return 0;
2191 }
2192
2193 bool
2194 linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
2195 {
2196 scoped_restore_current_thread restore_thread;
2197 switch_to_thread (get_lwp_thread (child));
2198
2199 if (low_stopped_by_watchpoint ())
2200 {
2201 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2202 child->stopped_data_address = low_stopped_data_address ();
2203 }
2204
2205 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2206 }
2207
2208 bool
2209 linux_process_target::low_stopped_by_watchpoint ()
2210 {
2211 return false;
2212 }
2213
2214 CORE_ADDR
2215 linux_process_target::low_stopped_data_address ()
2216 {
2217 return 0;
2218 }
2219
2220 /* Return the ptrace options that we want to try to enable. */
2221
2222 static int
2223 linux_low_ptrace_options (int attached)
2224 {
2225 client_state &cs = get_client_state ();
2226 int options = 0;
2227
2228 if (!attached)
2229 options |= PTRACE_O_EXITKILL;
2230
2231 if (cs.report_fork_events)
2232 options |= PTRACE_O_TRACEFORK;
2233
2234 if (cs.report_vfork_events)
2235 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2236
2237 if (cs.report_exec_events)
2238 options |= PTRACE_O_TRACEEXEC;
2239
2240 options |= PTRACE_O_TRACESYSGOOD;
2241
2242 return options;
2243 }
2244
2245 void
2246 linux_process_target::filter_event (int lwpid, int wstat)
2247 {
2248 struct lwp_info *child;
2249 struct thread_info *thread;
2250 int have_stop_pc = 0;
2251
2252 child = find_lwp_pid (ptid_t (lwpid));
2253
2254 /* Check for events reported by anything not in our LWP list. */
2255 if (child == nullptr)
2256 {
2257 if (WIFSTOPPED (wstat))
2258 {
2259 if (WSTOPSIG (wstat) == SIGTRAP
2260 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2261 {
2262 /* A non-leader thread exec'ed after we've seen the
2263 leader zombie, and removed it from our lists (in
2264 check_zombie_leaders). The non-leader thread changes
2265 its tid to the tgid. */
2266 threads_debug_printf
2267 ("Re-adding thread group leader LWP %d after exec.",
2268 lwpid);
2269
2270 child = add_lwp (ptid_t (lwpid, lwpid));
2271 child->stopped = 1;
2272 switch_to_thread (child->thread);
2273 }
2274 else
2275 {
2276 /* A process we are controlling has forked and the new
2277 child's stop was reported to us by the kernel. Save
2278 its PID and go back to waiting for the fork event to
2279 be reported - the stopped process might be returned
2280 from waitpid before or after the fork event is. */
2281 threads_debug_printf
2282 ("Saving LWP %d status %s in stopped_pids list",
2283 lwpid, status_to_str (wstat).c_str ());
2284 add_to_pid_list (&stopped_pids, lwpid, wstat);
2285 }
2286 }
2287 else
2288 {
2289 /* Don't report an event for the exit of an LWP not in our
2290 list, i.e. not part of any inferior we're debugging.
2291 This can happen if we detach from a program we originally
2292 forked and then it exits. However, note that we may have
2293 earlier deleted a leader of an inferior we're debugging,
2294 in check_zombie_leaders. Re-add it back here if so. */
2295 find_process ([&] (process_info *proc)
2296 {
2297 if (proc->pid == lwpid)
2298 {
2299 threads_debug_printf
2300 ("Re-adding thread group leader LWP %d after exit.",
2301 lwpid);
2302
2303 child = add_lwp (ptid_t (lwpid, lwpid));
2304 return true;
2305 }
2306 return false;
2307 });
2308 }
2309
2310 if (child == nullptr)
2311 return;
2312 }
2313
2314 thread = get_lwp_thread (child);
2315
2316 child->stopped = 1;
2317
2318 child->last_status = wstat;
2319
2320 /* Check if the thread has exited. */
2321 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2322 {
2323 threads_debug_printf ("%d exited", lwpid);
2324
2325 if (finish_step_over (child))
2326 {
2327 /* Unsuspend all other LWPs, and set them back running again. */
2328 unsuspend_all_lwps (child);
2329 }
2330
2331 /* If this is not the leader LWP, then the exit signal was not
2332 the end of the debugged application and should be ignored,
2333 unless GDB wants to hear about thread exits. */
2334 if (report_exit_events_for (thread) || is_leader (thread))
2335 {
2336 /* Since events are serialized to GDB core, and we can't
2337 report this one right now. Leave the status pending for
2338 the next time we're able to report it. */
2339 mark_lwp_dead (child, wstat);
2340 return;
2341 }
2342 else
2343 {
2344 delete_lwp (child);
2345 return;
2346 }
2347 }
2348
2349 gdb_assert (WIFSTOPPED (wstat));
2350
2351 if (WIFSTOPPED (wstat))
2352 {
2353 struct process_info *proc;
2354
2355 /* Architecture-specific setup after inferior is running. */
2356 proc = find_process_pid (pid_of (thread));
2357 if (proc->tdesc == NULL)
2358 {
2359 if (proc->attached)
2360 {
2361 /* This needs to happen after we have attached to the
2362 inferior and it is stopped for the first time, but
2363 before we access any inferior registers. */
2364 arch_setup_thread (thread);
2365 }
2366 else
2367 {
2368 /* The process is started, but GDBserver will do
2369 architecture-specific setup after the program stops at
2370 the first instruction. */
2371 child->status_pending_p = 1;
2372 child->status_pending = wstat;
2373 return;
2374 }
2375 }
2376 }
2377
2378 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2379 {
2380 struct process_info *proc = find_process_pid (pid_of (thread));
2381 int options = linux_low_ptrace_options (proc->attached);
2382
2383 linux_enable_event_reporting (lwpid, options);
2384 child->must_set_ptrace_flags = 0;
2385 }
2386
2387 /* Always update syscall_state, even if it will be filtered later. */
2388 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2389 {
2390 child->syscall_state
2391 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2392 ? TARGET_WAITKIND_SYSCALL_RETURN
2393 : TARGET_WAITKIND_SYSCALL_ENTRY);
2394 }
2395 else
2396 {
2397 /* Almost all other ptrace-stops are known to be outside of system
2398 calls, with further exceptions in handle_extended_wait. */
2399 child->syscall_state = TARGET_WAITKIND_IGNORE;
2400 }
2401
2402 /* Be careful to not overwrite stop_pc until save_stop_reason is
2403 called. */
2404 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2405 && linux_is_extended_waitstatus (wstat))
2406 {
2407 child->stop_pc = get_pc (child);
2408 if (handle_extended_wait (&child, wstat))
2409 {
2410 /* The event has been handled, so just return without
2411 reporting it. */
2412 return;
2413 }
2414 }
2415
2416 if (linux_wstatus_maybe_breakpoint (wstat))
2417 {
2418 if (save_stop_reason (child))
2419 have_stop_pc = 1;
2420 }
2421
2422 if (!have_stop_pc)
2423 child->stop_pc = get_pc (child);
2424
2425 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2426 && child->stop_expected)
2427 {
2428 threads_debug_printf ("Expected stop.");
2429
2430 child->stop_expected = 0;
2431
2432 if (thread->last_resume_kind == resume_stop)
2433 {
2434 /* We want to report the stop to the core. Treat the
2435 SIGSTOP as a normal event. */
2436 threads_debug_printf ("resume_stop SIGSTOP caught for %s.",
2437 target_pid_to_str (ptid_of (thread)).c_str ());
2438 }
2439 else if (stopping_threads != NOT_STOPPING_THREADS)
2440 {
2441 /* Stopping threads. We don't want this SIGSTOP to end up
2442 pending. */
2443 threads_debug_printf ("SIGSTOP caught for %s while stopping threads.",
2444 target_pid_to_str (ptid_of (thread)).c_str ());
2445 return;
2446 }
2447 else
2448 {
2449 /* This is a delayed SIGSTOP. Filter out the event. */
2450 threads_debug_printf ("%s %s, 0, 0 (discard delayed SIGSTOP)",
2451 child->stepping ? "step" : "continue",
2452 target_pid_to_str (ptid_of (thread)).c_str ());
2453
2454 resume_one_lwp (child, child->stepping, 0, NULL);
2455 return;
2456 }
2457 }
2458
2459 child->status_pending_p = 1;
2460 child->status_pending = wstat;
2461 return;
2462 }
2463
2464 bool
2465 linux_process_target::maybe_hw_step (thread_info *thread)
2466 {
2467 if (supports_hardware_single_step ())
2468 return true;
2469 else
2470 {
2471 /* GDBserver must insert single-step breakpoint for software
2472 single step. */
2473 gdb_assert (has_single_step_breakpoints (thread));
2474 return false;
2475 }
2476 }
2477
2478 void
2479 linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
2480 {
2481 struct lwp_info *lp = get_thread_lwp (thread);
2482
2483 if (lp->stopped
2484 && !lp->suspended
2485 && !lp->status_pending_p
2486 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
2487 {
2488 int step = 0;
2489
2490 if (thread->last_resume_kind == resume_step)
2491 {
2492 if (supports_software_single_step ())
2493 install_software_single_step_breakpoints (lp);
2494
2495 step = maybe_hw_step (thread);
2496 }
2497
2498 threads_debug_printf ("resuming stopped-resumed LWP %s at %s: step=%d",
2499 target_pid_to_str (ptid_of (thread)).c_str (),
2500 paddress (lp->stop_pc), step);
2501
2502 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2503 }
2504 }
2505
2506 int
2507 linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2508 ptid_t filter_ptid,
2509 int *wstatp, int options)
2510 {
2511 struct thread_info *event_thread;
2512 struct lwp_info *event_child, *requested_child;
2513 sigset_t block_mask, prev_mask;
2514
2515 retry:
2516 /* N.B. event_thread points to the thread_info struct that contains
2517 event_child. Keep them in sync. */
2518 event_thread = NULL;
2519 event_child = NULL;
2520 requested_child = NULL;
2521
2522 /* Check for a lwp with a pending status. */
2523
2524 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2525 {
2526 event_thread = find_thread_in_random ([&] (thread_info *thread)
2527 {
2528 return status_pending_p_callback (thread, filter_ptid);
2529 });
2530
2531 if (event_thread != NULL)
2532 {
2533 event_child = get_thread_lwp (event_thread);
2534 threads_debug_printf ("Got a pending child %ld", lwpid_of (event_thread));
2535 }
2536 }
2537 else if (filter_ptid != null_ptid)
2538 {
2539 requested_child = find_lwp_pid (filter_ptid);
2540 gdb_assert (requested_child != nullptr);
2541
2542 if (stopping_threads == NOT_STOPPING_THREADS
2543 && requested_child->status_pending_p
2544 && (requested_child->collecting_fast_tracepoint
2545 != fast_tpoint_collect_result::not_collecting))
2546 {
2547 enqueue_one_deferred_signal (requested_child,
2548 &requested_child->status_pending);
2549 requested_child->status_pending_p = 0;
2550 requested_child->status_pending = 0;
2551 resume_one_lwp (requested_child, 0, 0, NULL);
2552 }
2553
2554 if (requested_child->suspended
2555 && requested_child->status_pending_p)
2556 {
2557 internal_error ("requesting an event out of a"
2558 " suspended child?");
2559 }
2560
2561 if (requested_child->status_pending_p)
2562 {
2563 event_child = requested_child;
2564 event_thread = get_lwp_thread (event_child);
2565 }
2566 }
2567
2568 if (event_child != NULL)
2569 {
2570 threads_debug_printf ("Got an event from pending child %ld (%04x)",
2571 lwpid_of (event_thread),
2572 event_child->status_pending);
2573
2574 *wstatp = event_child->status_pending;
2575 event_child->status_pending_p = 0;
2576 event_child->status_pending = 0;
2577 switch_to_thread (event_thread);
2578 return lwpid_of (event_thread);
2579 }
2580
2581 /* But if we don't find a pending event, we'll have to wait.
2582
2583 We only enter this loop if no process has a pending wait status.
2584 Thus any action taken in response to a wait status inside this
2585 loop is responding as soon as we detect the status, not after any
2586 pending events. */
2587
2588 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2589 all signals while here. */
2590 sigfillset (&block_mask);
2591 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
2592
2593 /* Always pull all events out of the kernel. We'll randomly select
2594 an event LWP out of all that have events, to prevent
2595 starvation. */
2596 while (event_child == NULL)
2597 {
2598 pid_t ret = 0;
2599
2600 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2601 quirks:
2602
2603 - If the thread group leader exits while other threads in the
2604 thread group still exist, waitpid(TGID, ...) hangs. That
2605 waitpid won't return an exit status until the other threads
2606 in the group are reaped.
2607
2608 - When a non-leader thread execs, that thread just vanishes
2609 without reporting an exit (so we'd hang if we waited for it
2610 explicitly in that case). The exec event is reported to
2611 the TGID pid. */
2612 errno = 0;
2613 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2614
2615 threads_debug_printf ("waitpid(-1, ...) returned %d, %s",
2616 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
2617
2618 if (ret > 0)
2619 {
2620 threads_debug_printf ("waitpid %ld received %s",
2621 (long) ret, status_to_str (*wstatp).c_str ());
2622
2623 /* Filter all events. IOW, leave all events pending. We'll
2624 randomly select an event LWP out of all that have events
2625 below. */
2626 filter_event (ret, *wstatp);
2627 /* Retry until nothing comes out of waitpid. A single
2628 SIGCHLD can indicate more than one child stopped. */
2629 continue;
2630 }
2631
2632 /* Now that we've pulled all events out of the kernel, resume
2633 LWPs that don't have an interesting event to report. */
2634 if (stopping_threads == NOT_STOPPING_THREADS)
2635 for_each_thread ([this] (thread_info *thread)
2636 {
2637 resume_stopped_resumed_lwps (thread);
2638 });
2639
2640 /* ... and find an LWP with a status to report to the core, if
2641 any. */
2642 event_thread = find_thread_in_random ([&] (thread_info *thread)
2643 {
2644 return status_pending_p_callback (thread, filter_ptid);
2645 });
2646
2647 if (event_thread != NULL)
2648 {
2649 event_child = get_thread_lwp (event_thread);
2650 *wstatp = event_child->status_pending;
2651 event_child->status_pending_p = 0;
2652 event_child->status_pending = 0;
2653 break;
2654 }
2655
2656 /* Check for zombie thread group leaders. Those can't be reaped
2657 until all other threads in the thread group are. */
2658 check_zombie_leaders ();
2659
2660 auto not_stopped = [&] (thread_info *thread)
2661 {
2662 return not_stopped_callback (thread, wait_ptid);
2663 };
2664
2665 /* If there are no resumed children left in the set of LWPs we
2666 want to wait for, bail. We can't just block in
2667 waitpid/sigsuspend, because lwps might have been left stopped
2668 in trace-stop state, and we'd be stuck forever waiting for
2669 their status to change (which would only happen if we resumed
2670 them). Even if WNOHANG is set, this return code is preferred
2671 over 0 (below), as it is more detailed. */
2672 if (find_thread (not_stopped) == NULL)
2673 {
2674 threads_debug_printf ("exit (no unwaited-for LWP)");
2675
2676 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2677 return -1;
2678 }
2679
2680 /* No interesting event to report to the caller. */
2681 if ((options & WNOHANG))
2682 {
2683 threads_debug_printf ("WNOHANG set, no event found");
2684
2685 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2686 return 0;
2687 }
2688
2689 /* Block until we get an event reported with SIGCHLD. */
2690 threads_debug_printf ("sigsuspend'ing");
2691
2692 sigsuspend (&prev_mask);
2693 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2694 goto retry;
2695 }
2696
2697 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2698
2699 switch_to_thread (event_thread);
2700
2701 return lwpid_of (event_thread);
2702 }
2703
2704 int
2705 linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
2706 {
2707 return wait_for_event_filtered (ptid, ptid, wstatp, options);
2708 }
2709
2710 /* Select one LWP out of those that have events pending. */
2711
2712 static void
2713 select_event_lwp (struct lwp_info **orig_lp)
2714 {
2715 struct thread_info *event_thread = NULL;
2716
2717 /* In all-stop, give preference to the LWP that is being
2718 single-stepped. There will be at most one, and it's the LWP that
2719 the core is most interested in. If we didn't do this, then we'd
2720 have to handle pending step SIGTRAPs somehow in case the core
2721 later continues the previously-stepped thread, otherwise we'd
2722 report the pending SIGTRAP, and the core, not having stepped the
2723 thread, wouldn't understand what the trap was for, and therefore
2724 would report it to the user as a random signal. */
2725 if (!non_stop)
2726 {
2727 event_thread = find_thread ([] (thread_info *thread)
2728 {
2729 lwp_info *lp = get_thread_lwp (thread);
2730
2731 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2732 && thread->last_resume_kind == resume_step
2733 && lp->status_pending_p);
2734 });
2735
2736 if (event_thread != NULL)
2737 threads_debug_printf
2738 ("Select single-step %s",
2739 target_pid_to_str (ptid_of (event_thread)).c_str ());
2740 }
2741 if (event_thread == NULL)
2742 {
2743 /* No single-stepping LWP. Select one at random, out of those
2744 which have had events. */
2745
2746 event_thread = find_thread_in_random ([&] (thread_info *thread)
2747 {
2748 lwp_info *lp = get_thread_lwp (thread);
2749
2750 /* Only resumed LWPs that have an event pending. */
2751 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2752 && lp->status_pending_p);
2753 });
2754 }
2755
2756 if (event_thread != NULL)
2757 {
2758 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2759
2760 /* Switch the event LWP. */
2761 *orig_lp = event_lp;
2762 }
2763 }
2764
2765 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2766 NULL. */
2767
2768 static void
2769 unsuspend_all_lwps (struct lwp_info *except)
2770 {
2771 for_each_thread ([&] (thread_info *thread)
2772 {
2773 lwp_info *lwp = get_thread_lwp (thread);
2774
2775 if (lwp != except)
2776 lwp_suspended_decr (lwp);
2777 });
2778 }
2779
2780 static bool lwp_running (thread_info *thread);
2781
2782 /* Stabilize threads (move out of jump pads).
2783
2784 If a thread is midway collecting a fast tracepoint, we need to
2785 finish the collection and move it out of the jump pad before
2786 reporting the signal.
2787
2788 This avoids recursion while collecting (when a signal arrives
2789 midway, and the signal handler itself collects), which would trash
2790 the trace buffer. In case the user set a breakpoint in a signal
2791 handler, this avoids the backtrace showing the jump pad, etc..
2792 Most importantly, there are certain things we can't do safely if
2793 threads are stopped in a jump pad (or in its callee's). For
2794 example:
2795
2796 - starting a new trace run. A thread still collecting the
2797 previous run, could trash the trace buffer when resumed. The trace
2798 buffer control structures would have been reset but the thread had
2799 no way to tell. The thread could even midway memcpy'ing to the
2800 buffer, which would mean that when resumed, it would clobber the
2801 trace buffer that had been set for a new run.
2802
2803 - we can't rewrite/reuse the jump pads for new tracepoints
2804 safely. Say you do tstart while a thread is stopped midway while
2805 collecting. When the thread is later resumed, it finishes the
2806 collection, and returns to the jump pad, to execute the original
2807 instruction that was under the tracepoint jump at the time the
2808 older run had been started. If the jump pad had been rewritten
2809 since for something else in the new run, the thread would now
2810 execute the wrong / random instructions. */
2811
2812 void
2813 linux_process_target::stabilize_threads ()
2814 {
2815 thread_info *thread_stuck = find_thread ([this] (thread_info *thread)
2816 {
2817 return stuck_in_jump_pad (thread);
2818 });
2819
2820 if (thread_stuck != NULL)
2821 {
2822 threads_debug_printf ("can't stabilize, LWP %ld is stuck in jump pad",
2823 lwpid_of (thread_stuck));
2824 return;
2825 }
2826
2827 scoped_restore_current_thread restore_thread;
2828
2829 stabilizing_threads = 1;
2830
2831 /* Kick 'em all. */
2832 for_each_thread ([this] (thread_info *thread)
2833 {
2834 move_out_of_jump_pad (thread);
2835 });
2836
2837 /* Loop until all are stopped out of the jump pads. */
2838 while (find_thread (lwp_running) != NULL)
2839 {
2840 struct target_waitstatus ourstatus;
2841 struct lwp_info *lwp;
2842 int wstat;
2843
2844 /* Note that we go through the full wait even loop. While
2845 moving threads out of jump pad, we need to be able to step
2846 over internal breakpoints and such. */
2847 wait_1 (minus_one_ptid, &ourstatus, 0);
2848
2849 if (ourstatus.kind () == TARGET_WAITKIND_STOPPED)
2850 {
2851 lwp = get_thread_lwp (current_thread);
2852
2853 /* Lock it. */
2854 lwp_suspended_inc (lwp);
2855
2856 if (ourstatus.sig () != GDB_SIGNAL_0
2857 || current_thread->last_resume_kind == resume_stop)
2858 {
2859 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.sig ()));
2860 enqueue_one_deferred_signal (lwp, &wstat);
2861 }
2862 }
2863 }
2864
2865 unsuspend_all_lwps (NULL);
2866
2867 stabilizing_threads = 0;
2868
2869 if (debug_threads)
2870 {
2871 thread_stuck = find_thread ([this] (thread_info *thread)
2872 {
2873 return stuck_in_jump_pad (thread);
2874 });
2875
2876 if (thread_stuck != NULL)
2877 threads_debug_printf
2878 ("couldn't stabilize, LWP %ld got stuck in jump pad",
2879 lwpid_of (thread_stuck));
2880 }
2881 }
2882
2883 /* Convenience function that is called when the kernel reports an
2884 event that is not passed out to GDB. */
2885
2886 static ptid_t
2887 ignore_event (struct target_waitstatus *ourstatus)
2888 {
2889 /* If we got an event, there may still be others, as a single
2890 SIGCHLD can indicate more than one child stopped. This forces
2891 another target_wait call. */
2892 async_file_mark ();
2893
2894 ourstatus->set_ignore ();
2895 return null_ptid;
2896 }
2897
2898 ptid_t
2899 linux_process_target::filter_exit_event (lwp_info *event_child,
2900 target_waitstatus *ourstatus)
2901 {
2902 struct thread_info *thread = get_lwp_thread (event_child);
2903 ptid_t ptid = ptid_of (thread);
2904
2905 /* Note we must filter TARGET_WAITKIND_SIGNALLED as well, otherwise
2906 if a non-leader thread exits with a signal, we'd report it to the
2907 core which would interpret it as the whole-process exiting.
2908 There is no TARGET_WAITKIND_THREAD_SIGNALLED event kind. */
2909 if (ourstatus->kind () != TARGET_WAITKIND_EXITED
2910 && ourstatus->kind () != TARGET_WAITKIND_SIGNALLED)
2911 return ptid;
2912
2913 if (!is_leader (thread))
2914 {
2915 if (report_exit_events_for (thread))
2916 ourstatus->set_thread_exited (0);
2917 else
2918 ourstatus->set_ignore ();
2919
2920 delete_lwp (event_child);
2921 }
2922 return ptid;
2923 }
2924
2925 /* Returns 1 if GDB is interested in any event_child syscalls. */
2926
2927 static int
2928 gdb_catching_syscalls_p (struct lwp_info *event_child)
2929 {
2930 struct thread_info *thread = get_lwp_thread (event_child);
2931 struct process_info *proc = get_thread_process (thread);
2932
2933 return !proc->syscalls_to_catch.empty ();
2934 }
2935
2936 bool
2937 linux_process_target::gdb_catch_this_syscall (lwp_info *event_child)
2938 {
2939 int sysno;
2940 struct thread_info *thread = get_lwp_thread (event_child);
2941 struct process_info *proc = get_thread_process (thread);
2942
2943 if (proc->syscalls_to_catch.empty ())
2944 return false;
2945
2946 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
2947 return true;
2948
2949 get_syscall_trapinfo (event_child, &sysno);
2950
2951 for (int iter : proc->syscalls_to_catch)
2952 if (iter == sysno)
2953 return true;
2954
2955 return false;
2956 }
2957
2958 ptid_t
2959 linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
2960 target_wait_flags target_options)
2961 {
2962 THREADS_SCOPED_DEBUG_ENTER_EXIT;
2963
2964 client_state &cs = get_client_state ();
2965 int w;
2966 struct lwp_info *event_child;
2967 int options;
2968 int pid;
2969 int step_over_finished;
2970 int bp_explains_trap;
2971 int maybe_internal_trap;
2972 int report_to_gdb;
2973 int trace_event;
2974 int in_step_range;
2975 int any_resumed;
2976
2977 threads_debug_printf ("[%s]", target_pid_to_str (ptid).c_str ());
2978
2979 /* Translate generic target options into linux options. */
2980 options = __WALL;
2981 if (target_options & TARGET_WNOHANG)
2982 options |= WNOHANG;
2983
2984 bp_explains_trap = 0;
2985 trace_event = 0;
2986 in_step_range = 0;
2987 ourstatus->set_ignore ();
2988
2989 auto status_pending_p_any = [&] (thread_info *thread)
2990 {
2991 return status_pending_p_callback (thread, minus_one_ptid);
2992 };
2993
2994 auto not_stopped = [&] (thread_info *thread)
2995 {
2996 return not_stopped_callback (thread, minus_one_ptid);
2997 };
2998
2999 /* Find a resumed LWP, if any. */
3000 if (find_thread (status_pending_p_any) != NULL)
3001 any_resumed = 1;
3002 else if (find_thread (not_stopped) != NULL)
3003 any_resumed = 1;
3004 else
3005 any_resumed = 0;
3006
3007 if (step_over_bkpt == null_ptid)
3008 pid = wait_for_event (ptid, &w, options);
3009 else
3010 {
3011 threads_debug_printf ("step_over_bkpt set [%s], doing a blocking wait",
3012 target_pid_to_str (step_over_bkpt).c_str ());
3013 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3014 }
3015
3016 if (pid == 0 || (pid == -1 && !any_resumed))
3017 {
3018 gdb_assert (target_options & TARGET_WNOHANG);
3019
3020 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_IGNORE");
3021
3022 ourstatus->set_ignore ();
3023 return null_ptid;
3024 }
3025 else if (pid == -1)
3026 {
3027 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_NO_RESUMED");
3028
3029 ourstatus->set_no_resumed ();
3030 return null_ptid;
3031 }
3032
3033 event_child = get_thread_lwp (current_thread);
3034
3035 /* wait_for_event only returns an exit status for the last
3036 child of a process. Report it. */
3037 if (WIFEXITED (w) || WIFSIGNALED (w))
3038 {
3039 if (WIFEXITED (w))
3040 {
3041 ourstatus->set_exited (WEXITSTATUS (w));
3042
3043 threads_debug_printf
3044 ("ret = %s, exited with retcode %d",
3045 target_pid_to_str (ptid_of (current_thread)).c_str (),
3046 WEXITSTATUS (w));
3047 }
3048 else
3049 {
3050 ourstatus->set_signalled (gdb_signal_from_host (WTERMSIG (w)));
3051
3052 threads_debug_printf
3053 ("ret = %s, terminated with signal %d",
3054 target_pid_to_str (ptid_of (current_thread)).c_str (),
3055 WTERMSIG (w));
3056 }
3057
3058 return filter_exit_event (event_child, ourstatus);
3059 }
3060
3061 /* If step-over executes a breakpoint instruction, in the case of a
3062 hardware single step it means a gdb/gdbserver breakpoint had been
3063 planted on top of a permanent breakpoint, in the case of a software
3064 single step it may just mean that gdbserver hit the reinsert breakpoint.
3065 The PC has been adjusted by save_stop_reason to point at
3066 the breakpoint address.
3067 So in the case of the hardware single step advance the PC manually
3068 past the breakpoint and in the case of software single step advance only
3069 if it's not the single_step_breakpoint we are hitting.
3070 This avoids that a program would keep trapping a permanent breakpoint
3071 forever. */
3072 if (step_over_bkpt != null_ptid
3073 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3074 && (event_child->stepping
3075 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3076 {
3077 int increment_pc = 0;
3078 int breakpoint_kind = 0;
3079 CORE_ADDR stop_pc = event_child->stop_pc;
3080
3081 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3082 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3083
3084 threads_debug_printf
3085 ("step-over for %s executed software breakpoint",
3086 target_pid_to_str (ptid_of (current_thread)).c_str ());
3087
3088 if (increment_pc != 0)
3089 {
3090 struct regcache *regcache
3091 = get_thread_regcache (current_thread, 1);
3092
3093 event_child->stop_pc += increment_pc;
3094 low_set_pc (regcache, event_child->stop_pc);
3095
3096 if (!low_breakpoint_at (event_child->stop_pc))
3097 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3098 }
3099 }
3100
3101 /* If this event was not handled before, and is not a SIGTRAP, we
3102 report it. SIGILL and SIGSEGV are also treated as traps in case
3103 a breakpoint is inserted at the current PC. If this target does
3104 not support internal breakpoints at all, we also report the
3105 SIGTRAP without further processing; it's of no concern to us. */
3106 maybe_internal_trap
3107 = (low_supports_breakpoints ()
3108 && (WSTOPSIG (w) == SIGTRAP
3109 || ((WSTOPSIG (w) == SIGILL
3110 || WSTOPSIG (w) == SIGSEGV)
3111 && low_breakpoint_at (event_child->stop_pc))));
3112
3113 if (maybe_internal_trap)
3114 {
3115 /* Handle anything that requires bookkeeping before deciding to
3116 report the event or continue waiting. */
3117
3118 /* First check if we can explain the SIGTRAP with an internal
3119 breakpoint, or if we should possibly report the event to GDB.
3120 Do this before anything that may remove or insert a
3121 breakpoint. */
3122 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3123
3124 /* We have a SIGTRAP, possibly a step-over dance has just
3125 finished. If so, tweak the state machine accordingly,
3126 reinsert breakpoints and delete any single-step
3127 breakpoints. */
3128 step_over_finished = finish_step_over (event_child);
3129
3130 /* Now invoke the callbacks of any internal breakpoints there. */
3131 check_breakpoints (event_child->stop_pc);
3132
3133 /* Handle tracepoint data collecting. This may overflow the
3134 trace buffer, and cause a tracing stop, removing
3135 breakpoints. */
3136 trace_event = handle_tracepoints (event_child);
3137
3138 if (bp_explains_trap)
3139 threads_debug_printf ("Hit a gdbserver breakpoint.");
3140 }
3141 else
3142 {
3143 /* We have some other signal, possibly a step-over dance was in
3144 progress, and it should be cancelled too. */
3145 step_over_finished = finish_step_over (event_child);
3146 }
3147
3148 /* We have all the data we need. Either report the event to GDB, or
3149 resume threads and keep waiting for more. */
3150
3151 /* If we're collecting a fast tracepoint, finish the collection and
3152 move out of the jump pad before delivering a signal. See
3153 linux_stabilize_threads. */
3154
3155 if (WIFSTOPPED (w)
3156 && WSTOPSIG (w) != SIGTRAP
3157 && supports_fast_tracepoints ()
3158 && agent_loaded_p ())
3159 {
3160 threads_debug_printf ("Got signal %d for LWP %ld. Check if we need "
3161 "to defer or adjust it.",
3162 WSTOPSIG (w), lwpid_of (current_thread));
3163
3164 /* Allow debugging the jump pad itself. */
3165 if (current_thread->last_resume_kind != resume_step
3166 && maybe_move_out_of_jump_pad (event_child, &w))
3167 {
3168 enqueue_one_deferred_signal (event_child, &w);
3169
3170 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad)",
3171 WSTOPSIG (w), lwpid_of (current_thread));
3172
3173 resume_one_lwp (event_child, 0, 0, NULL);
3174
3175 return ignore_event (ourstatus);
3176 }
3177 }
3178
3179 if (event_child->collecting_fast_tracepoint
3180 != fast_tpoint_collect_result::not_collecting)
3181 {
3182 threads_debug_printf
3183 ("LWP %ld was trying to move out of the jump pad (%d). "
3184 "Check if we're already there.",
3185 lwpid_of (current_thread),
3186 (int) event_child->collecting_fast_tracepoint);
3187
3188 trace_event = 1;
3189
3190 event_child->collecting_fast_tracepoint
3191 = linux_fast_tracepoint_collecting (event_child, NULL);
3192
3193 if (event_child->collecting_fast_tracepoint
3194 != fast_tpoint_collect_result::before_insn)
3195 {
3196 /* No longer need this breakpoint. */
3197 if (event_child->exit_jump_pad_bkpt != NULL)
3198 {
3199 threads_debug_printf
3200 ("No longer need exit-jump-pad bkpt; removing it."
3201 "stopping all threads momentarily.");
3202
3203 /* Other running threads could hit this breakpoint.
3204 We don't handle moribund locations like GDB does,
3205 instead we always pause all threads when removing
3206 breakpoints, so that any step-over or
3207 decr_pc_after_break adjustment is always taken
3208 care of while the breakpoint is still
3209 inserted. */
3210 stop_all_lwps (1, event_child);
3211
3212 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3213 event_child->exit_jump_pad_bkpt = NULL;
3214
3215 unstop_all_lwps (1, event_child);
3216
3217 gdb_assert (event_child->suspended >= 0);
3218 }
3219 }
3220
3221 if (event_child->collecting_fast_tracepoint
3222 == fast_tpoint_collect_result::not_collecting)
3223 {
3224 threads_debug_printf
3225 ("fast tracepoint finished collecting successfully.");
3226
3227 /* We may have a deferred signal to report. */
3228 if (dequeue_one_deferred_signal (event_child, &w))
3229 threads_debug_printf ("dequeued one signal.");
3230 else
3231 {
3232 threads_debug_printf ("no deferred signals.");
3233
3234 if (stabilizing_threads)
3235 {
3236 ourstatus->set_stopped (GDB_SIGNAL_0);
3237
3238 threads_debug_printf
3239 ("ret = %s, stopped while stabilizing threads",
3240 target_pid_to_str (ptid_of (current_thread)).c_str ());
3241
3242 return ptid_of (current_thread);
3243 }
3244 }
3245 }
3246 }
3247
3248 /* Check whether GDB would be interested in this event. */
3249
3250 /* Check if GDB is interested in this syscall. */
3251 if (WIFSTOPPED (w)
3252 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3253 && !gdb_catch_this_syscall (event_child))
3254 {
3255 threads_debug_printf ("Ignored syscall for LWP %ld.",
3256 lwpid_of (current_thread));
3257
3258 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
3259
3260 return ignore_event (ourstatus);
3261 }
3262
3263 /* If GDB is not interested in this signal, don't stop other
3264 threads, and don't report it to GDB. Just resume the inferior
3265 right away. We do this for threading-related signals as well as
3266 any that GDB specifically requested we ignore. But never ignore
3267 SIGSTOP if we sent it ourselves, and do not ignore signals when
3268 stepping - they may require special handling to skip the signal
3269 handler. Also never ignore signals that could be caused by a
3270 breakpoint. */
3271 if (WIFSTOPPED (w)
3272 && current_thread->last_resume_kind != resume_step
3273 && (
3274 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3275 (current_process ()->priv->thread_db != NULL
3276 && (WSTOPSIG (w) == __SIGRTMIN
3277 || WSTOPSIG (w) == __SIGRTMIN + 1))
3278 ||
3279 #endif
3280 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3281 && !(WSTOPSIG (w) == SIGSTOP
3282 && current_thread->last_resume_kind == resume_stop)
3283 && !linux_wstatus_maybe_breakpoint (w))))
3284 {
3285 siginfo_t info, *info_p;
3286
3287 threads_debug_printf ("Ignored signal %d for LWP %ld.",
3288 WSTOPSIG (w), lwpid_of (current_thread));
3289
3290 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3291 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3292 info_p = &info;
3293 else
3294 info_p = NULL;
3295
3296 if (step_over_finished)
3297 {
3298 /* We cancelled this thread's step-over above. We still
3299 need to unsuspend all other LWPs, and set them back
3300 running again while the signal handler runs. */
3301 unsuspend_all_lwps (event_child);
3302
3303 /* Enqueue the pending signal info so that proceed_all_lwps
3304 doesn't lose it. */
3305 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3306
3307 proceed_all_lwps ();
3308 }
3309 else
3310 {
3311 resume_one_lwp (event_child, event_child->stepping,
3312 WSTOPSIG (w), info_p);
3313 }
3314
3315 return ignore_event (ourstatus);
3316 }
3317
3318 /* Note that all addresses are always "out of the step range" when
3319 there's no range to begin with. */
3320 in_step_range = lwp_in_step_range (event_child);
3321
3322 /* If GDB wanted this thread to single step, and the thread is out
3323 of the step range, we always want to report the SIGTRAP, and let
3324 GDB handle it. Watchpoints should always be reported. So should
3325 signals we can't explain. A SIGTRAP we can't explain could be a
3326 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3327 do, we're be able to handle GDB breakpoints on top of internal
3328 breakpoints, by handling the internal breakpoint and still
3329 reporting the event to GDB. If we don't, we're out of luck, GDB
3330 won't see the breakpoint hit. If we see a single-step event but
3331 the thread should be continuing, don't pass the trap to gdb.
3332 That indicates that we had previously finished a single-step but
3333 left the single-step pending -- see
3334 complete_ongoing_step_over. */
3335 report_to_gdb = (!maybe_internal_trap
3336 || (current_thread->last_resume_kind == resume_step
3337 && !in_step_range)
3338 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3339 || (!in_step_range
3340 && !bp_explains_trap
3341 && !trace_event
3342 && !step_over_finished
3343 && !(current_thread->last_resume_kind == resume_continue
3344 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3345 || (gdb_breakpoint_here (event_child->stop_pc)
3346 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3347 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3348 || event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE);
3349
3350 run_breakpoint_commands (event_child->stop_pc);
3351
3352 /* We found no reason GDB would want us to stop. We either hit one
3353 of our own breakpoints, or finished an internal step GDB
3354 shouldn't know about. */
3355 if (!report_to_gdb)
3356 {
3357 if (bp_explains_trap)
3358 threads_debug_printf ("Hit a gdbserver breakpoint.");
3359
3360 if (step_over_finished)
3361 threads_debug_printf ("Step-over finished.");
3362
3363 if (trace_event)
3364 threads_debug_printf ("Tracepoint event.");
3365
3366 if (lwp_in_step_range (event_child))
3367 threads_debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).",
3368 paddress (event_child->stop_pc),
3369 paddress (event_child->step_range_start),
3370 paddress (event_child->step_range_end));
3371
3372 /* We're not reporting this breakpoint to GDB, so apply the
3373 decr_pc_after_break adjustment to the inferior's regcache
3374 ourselves. */
3375
3376 if (low_supports_breakpoints ())
3377 {
3378 struct regcache *regcache
3379 = get_thread_regcache (current_thread, 1);
3380 low_set_pc (regcache, event_child->stop_pc);
3381 }
3382
3383 if (step_over_finished)
3384 {
3385 /* If we have finished stepping over a breakpoint, we've
3386 stopped and suspended all LWPs momentarily except the
3387 stepping one. This is where we resume them all again.
3388 We're going to keep waiting, so use proceed, which
3389 handles stepping over the next breakpoint. */
3390 unsuspend_all_lwps (event_child);
3391 }
3392 else
3393 {
3394 /* Remove the single-step breakpoints if any. Note that
3395 there isn't single-step breakpoint if we finished stepping
3396 over. */
3397 if (supports_software_single_step ()
3398 && has_single_step_breakpoints (current_thread))
3399 {
3400 stop_all_lwps (0, event_child);
3401 delete_single_step_breakpoints (current_thread);
3402 unstop_all_lwps (0, event_child);
3403 }
3404 }
3405
3406 threads_debug_printf ("proceeding all threads.");
3407
3408 proceed_all_lwps ();
3409
3410 return ignore_event (ourstatus);
3411 }
3412
3413 if (debug_threads)
3414 {
3415 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3416 threads_debug_printf ("LWP %ld: extended event with waitstatus %s",
3417 lwpid_of (get_lwp_thread (event_child)),
3418 event_child->waitstatus.to_string ().c_str ());
3419
3420 if (current_thread->last_resume_kind == resume_step)
3421 {
3422 if (event_child->step_range_start == event_child->step_range_end)
3423 threads_debug_printf
3424 ("GDB wanted to single-step, reporting event.");
3425 else if (!lwp_in_step_range (event_child))
3426 threads_debug_printf ("Out of step range, reporting event.");
3427 }
3428
3429 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3430 threads_debug_printf ("Stopped by watchpoint.");
3431 else if (gdb_breakpoint_here (event_child->stop_pc))
3432 threads_debug_printf ("Stopped by GDB breakpoint.");
3433 }
3434
3435 threads_debug_printf ("Hit a non-gdbserver trap event.");
3436
3437 /* Alright, we're going to report a stop. */
3438
3439 /* Remove single-step breakpoints. */
3440 if (supports_software_single_step ())
3441 {
3442 /* Remove single-step breakpoints or not. It it is true, stop all
3443 lwps, so that other threads won't hit the breakpoint in the
3444 staled memory. */
3445 int remove_single_step_breakpoints_p = 0;
3446
3447 if (non_stop)
3448 {
3449 remove_single_step_breakpoints_p
3450 = has_single_step_breakpoints (current_thread);
3451 }
3452 else
3453 {
3454 /* In all-stop, a stop reply cancels all previous resume
3455 requests. Delete all single-step breakpoints. */
3456
3457 find_thread ([&] (thread_info *thread) {
3458 if (has_single_step_breakpoints (thread))
3459 {
3460 remove_single_step_breakpoints_p = 1;
3461 return true;
3462 }
3463
3464 return false;
3465 });
3466 }
3467
3468 if (remove_single_step_breakpoints_p)
3469 {
3470 /* If we remove single-step breakpoints from memory, stop all lwps,
3471 so that other threads won't hit the breakpoint in the staled
3472 memory. */
3473 stop_all_lwps (0, event_child);
3474
3475 if (non_stop)
3476 {
3477 gdb_assert (has_single_step_breakpoints (current_thread));
3478 delete_single_step_breakpoints (current_thread);
3479 }
3480 else
3481 {
3482 for_each_thread ([] (thread_info *thread){
3483 if (has_single_step_breakpoints (thread))
3484 delete_single_step_breakpoints (thread);
3485 });
3486 }
3487
3488 unstop_all_lwps (0, event_child);
3489 }
3490 }
3491
3492 if (!stabilizing_threads)
3493 {
3494 /* In all-stop, stop all threads. */
3495 if (!non_stop)
3496 stop_all_lwps (0, NULL);
3497
3498 if (step_over_finished)
3499 {
3500 if (!non_stop)
3501 {
3502 /* If we were doing a step-over, all other threads but
3503 the stepping one had been paused in start_step_over,
3504 with their suspend counts incremented. We don't want
3505 to do a full unstop/unpause, because we're in
3506 all-stop mode (so we want threads stopped), but we
3507 still need to unsuspend the other threads, to
3508 decrement their `suspended' count back. */
3509 unsuspend_all_lwps (event_child);
3510 }
3511 else
3512 {
3513 /* If we just finished a step-over, then all threads had
3514 been momentarily paused. In all-stop, that's fine,
3515 we want threads stopped by now anyway. In non-stop,
3516 we need to re-resume threads that GDB wanted to be
3517 running. */
3518 unstop_all_lwps (1, event_child);
3519 }
3520 }
3521
3522 /* If we're not waiting for a specific LWP, choose an event LWP
3523 from among those that have had events. Giving equal priority
3524 to all LWPs that have had events helps prevent
3525 starvation. */
3526 if (ptid == minus_one_ptid)
3527 {
3528 event_child->status_pending_p = 1;
3529 event_child->status_pending = w;
3530
3531 select_event_lwp (&event_child);
3532
3533 /* current_thread and event_child must stay in sync. */
3534 switch_to_thread (get_lwp_thread (event_child));
3535
3536 event_child->status_pending_p = 0;
3537 w = event_child->status_pending;
3538 }
3539
3540
3541 /* Stabilize threads (move out of jump pads). */
3542 if (!non_stop)
3543 target_stabilize_threads ();
3544 }
3545 else
3546 {
3547 /* If we just finished a step-over, then all threads had been
3548 momentarily paused. In all-stop, that's fine, we want
3549 threads stopped by now anyway. In non-stop, we need to
3550 re-resume threads that GDB wanted to be running. */
3551 if (step_over_finished)
3552 unstop_all_lwps (1, event_child);
3553 }
3554
3555 /* At this point, we haven't set OURSTATUS. This is where we do it. */
3556 gdb_assert (ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3557
3558 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3559 {
3560 /* If the reported event is an exit, fork, vfork, clone or exec,
3561 let GDB know. */
3562
3563 /* Break the unreported fork/vfork/clone relationship chain. */
3564 if (is_new_child_status (event_child->waitstatus.kind ()))
3565 {
3566 event_child->relative->relative = NULL;
3567 event_child->relative = NULL;
3568 }
3569
3570 *ourstatus = event_child->waitstatus;
3571 /* Clear the event lwp's waitstatus since we handled it already. */
3572 event_child->waitstatus.set_ignore ();
3573 }
3574 else
3575 {
3576 /* The LWP stopped due to a plain signal or a syscall signal. Either way,
3577 event_child->waitstatus wasn't filled in with the details, so look at
3578 the wait status W. */
3579 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3580 {
3581 int syscall_number;
3582
3583 get_syscall_trapinfo (event_child, &syscall_number);
3584 if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
3585 ourstatus->set_syscall_entry (syscall_number);
3586 else if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
3587 ourstatus->set_syscall_return (syscall_number);
3588 else
3589 gdb_assert_not_reached ("unexpected syscall state");
3590 }
3591 else if (current_thread->last_resume_kind == resume_stop
3592 && WSTOPSIG (w) == SIGSTOP)
3593 {
3594 /* A thread that has been requested to stop by GDB with vCont;t,
3595 and it stopped cleanly, so report as SIG0. The use of
3596 SIGSTOP is an implementation detail. */
3597 ourstatus->set_stopped (GDB_SIGNAL_0);
3598 }
3599 else
3600 ourstatus->set_stopped (gdb_signal_from_host (WSTOPSIG (w)));
3601 }
3602
3603 /* Now that we've selected our final event LWP, un-adjust its PC if
3604 it was a software breakpoint, and the client doesn't know we can
3605 adjust the breakpoint ourselves. */
3606 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3607 && !cs.swbreak_feature)
3608 {
3609 int decr_pc = low_decr_pc_after_break ();
3610
3611 if (decr_pc != 0)
3612 {
3613 struct regcache *regcache
3614 = get_thread_regcache (current_thread, 1);
3615 low_set_pc (regcache, event_child->stop_pc + decr_pc);
3616 }
3617 }
3618
3619 gdb_assert (step_over_bkpt == null_ptid);
3620
3621 threads_debug_printf ("ret = %s, %s",
3622 target_pid_to_str (ptid_of (current_thread)).c_str (),
3623 ourstatus->to_string ().c_str ());
3624
3625 return filter_exit_event (event_child, ourstatus);
3626 }
3627
3628 /* Get rid of any pending event in the pipe. */
3629 static void
3630 async_file_flush (void)
3631 {
3632 linux_event_pipe.flush ();
3633 }
3634
3635 /* Put something in the pipe, so the event loop wakes up. */
3636 static void
3637 async_file_mark (void)
3638 {
3639 linux_event_pipe.mark ();
3640 }
3641
3642 ptid_t
3643 linux_process_target::wait (ptid_t ptid,
3644 target_waitstatus *ourstatus,
3645 target_wait_flags target_options)
3646 {
3647 ptid_t event_ptid;
3648
3649 /* Flush the async file first. */
3650 if (target_is_async_p ())
3651 async_file_flush ();
3652
3653 do
3654 {
3655 event_ptid = wait_1 (ptid, ourstatus, target_options);
3656 }
3657 while ((target_options & TARGET_WNOHANG) == 0
3658 && ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3659
3660 /* If at least one stop was reported, there may be more. A single
3661 SIGCHLD can signal more than one child stop. */
3662 if (target_is_async_p ()
3663 && (target_options & TARGET_WNOHANG) != 0
3664 && event_ptid != null_ptid)
3665 async_file_mark ();
3666
3667 return event_ptid;
3668 }
3669
3670 /* Send a signal to an LWP. */
3671
3672 static int
3673 kill_lwp (unsigned long lwpid, int signo)
3674 {
3675 int ret;
3676
3677 errno = 0;
3678 ret = syscall (__NR_tkill, lwpid, signo);
3679 if (errno == ENOSYS)
3680 {
3681 /* If tkill fails, then we are not using nptl threads, a
3682 configuration we no longer support. */
3683 perror_with_name (("tkill"));
3684 }
3685 return ret;
3686 }
3687
3688 void
3689 linux_stop_lwp (struct lwp_info *lwp)
3690 {
3691 send_sigstop (lwp);
3692 }
3693
3694 static void
3695 send_sigstop (struct lwp_info *lwp)
3696 {
3697 int pid;
3698
3699 pid = lwpid_of (get_lwp_thread (lwp));
3700
3701 /* If we already have a pending stop signal for this process, don't
3702 send another. */
3703 if (lwp->stop_expected)
3704 {
3705 threads_debug_printf ("Have pending sigstop for lwp %d", pid);
3706
3707 return;
3708 }
3709
3710 threads_debug_printf ("Sending sigstop to lwp %d", pid);
3711
3712 lwp->stop_expected = 1;
3713 kill_lwp (pid, SIGSTOP);
3714 }
3715
3716 static void
3717 send_sigstop (thread_info *thread, lwp_info *except)
3718 {
3719 struct lwp_info *lwp = get_thread_lwp (thread);
3720
3721 /* Ignore EXCEPT. */
3722 if (lwp == except)
3723 return;
3724
3725 if (lwp->stopped)
3726 return;
3727
3728 send_sigstop (lwp);
3729 }
3730
3731 /* Increment the suspend count of an LWP, and stop it, if not stopped
3732 yet. */
3733 static void
3734 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3735 {
3736 struct lwp_info *lwp = get_thread_lwp (thread);
3737
3738 /* Ignore EXCEPT. */
3739 if (lwp == except)
3740 return;
3741
3742 lwp_suspended_inc (lwp);
3743
3744 send_sigstop (thread, except);
3745 }
3746
3747 static void
3748 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3749 {
3750 /* Store the exit status for later. */
3751 lwp->status_pending_p = 1;
3752 lwp->status_pending = wstat;
3753
3754 /* Store in waitstatus as well, as there's nothing else to process
3755 for this event. */
3756 if (WIFEXITED (wstat))
3757 lwp->waitstatus.set_exited (WEXITSTATUS (wstat));
3758 else if (WIFSIGNALED (wstat))
3759 lwp->waitstatus.set_signalled (gdb_signal_from_host (WTERMSIG (wstat)));
3760
3761 /* Prevent trying to stop it. */
3762 lwp->stopped = 1;
3763
3764 /* No further stops are expected from a dead lwp. */
3765 lwp->stop_expected = 0;
3766 }
3767
3768 /* Return true if LWP has exited already, and has a pending exit event
3769 to report to GDB. */
3770
3771 static int
3772 lwp_is_marked_dead (struct lwp_info *lwp)
3773 {
3774 return (lwp->status_pending_p
3775 && (WIFEXITED (lwp->status_pending)
3776 || WIFSIGNALED (lwp->status_pending)));
3777 }
3778
3779 void
3780 linux_process_target::wait_for_sigstop ()
3781 {
3782 struct thread_info *saved_thread;
3783 ptid_t saved_tid;
3784 int wstat;
3785 int ret;
3786
3787 saved_thread = current_thread;
3788 if (saved_thread != NULL)
3789 saved_tid = saved_thread->id;
3790 else
3791 saved_tid = null_ptid; /* avoid bogus unused warning */
3792
3793 scoped_restore_current_thread restore_thread;
3794
3795 threads_debug_printf ("pulling events");
3796
3797 /* Passing NULL_PTID as filter indicates we want all events to be
3798 left pending. Eventually this returns when there are no
3799 unwaited-for children left. */
3800 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
3801 gdb_assert (ret == -1);
3802
3803 if (saved_thread == NULL || mythread_alive (saved_tid))
3804 return;
3805 else
3806 {
3807 threads_debug_printf ("Previously current thread died.");
3808
3809 /* We can't change the current inferior behind GDB's back,
3810 otherwise, a subsequent command may apply to the wrong
3811 process. */
3812 restore_thread.dont_restore ();
3813 switch_to_thread (nullptr);
3814 }
3815 }
3816
3817 bool
3818 linux_process_target::stuck_in_jump_pad (thread_info *thread)
3819 {
3820 struct lwp_info *lwp = get_thread_lwp (thread);
3821
3822 if (lwp->suspended != 0)
3823 {
3824 internal_error ("LWP %ld is suspended, suspended=%d\n",
3825 lwpid_of (thread), lwp->suspended);
3826 }
3827 gdb_assert (lwp->stopped);
3828
3829 /* Allow debugging the jump pad, gdb_collect, etc.. */
3830 return (supports_fast_tracepoints ()
3831 && agent_loaded_p ()
3832 && (gdb_breakpoint_here (lwp->stop_pc)
3833 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3834 || thread->last_resume_kind == resume_step)
3835 && (linux_fast_tracepoint_collecting (lwp, NULL)
3836 != fast_tpoint_collect_result::not_collecting));
3837 }
3838
3839 void
3840 linux_process_target::move_out_of_jump_pad (thread_info *thread)
3841 {
3842 struct lwp_info *lwp = get_thread_lwp (thread);
3843 int *wstat;
3844
3845 if (lwp->suspended != 0)
3846 {
3847 internal_error ("LWP %ld is suspended, suspended=%d\n",
3848 lwpid_of (thread), lwp->suspended);
3849 }
3850 gdb_assert (lwp->stopped);
3851
3852 /* For gdb_breakpoint_here. */
3853 scoped_restore_current_thread restore_thread;
3854 switch_to_thread (thread);
3855
3856 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3857
3858 /* Allow debugging the jump pad, gdb_collect, etc. */
3859 if (!gdb_breakpoint_here (lwp->stop_pc)
3860 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3861 && thread->last_resume_kind != resume_step
3862 && maybe_move_out_of_jump_pad (lwp, wstat))
3863 {
3864 threads_debug_printf ("LWP %ld needs stabilizing (in jump pad)",
3865 lwpid_of (thread));
3866
3867 if (wstat)
3868 {
3869 lwp->status_pending_p = 0;
3870 enqueue_one_deferred_signal (lwp, wstat);
3871
3872 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad",
3873 WSTOPSIG (*wstat), lwpid_of (thread));
3874 }
3875
3876 resume_one_lwp (lwp, 0, 0, NULL);
3877 }
3878 else
3879 lwp_suspended_inc (lwp);
3880 }
3881
3882 static bool
3883 lwp_running (thread_info *thread)
3884 {
3885 struct lwp_info *lwp = get_thread_lwp (thread);
3886
3887 if (lwp_is_marked_dead (lwp))
3888 return false;
3889
3890 return !lwp->stopped;
3891 }
3892
3893 void
3894 linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
3895 {
3896 /* Should not be called recursively. */
3897 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3898
3899 THREADS_SCOPED_DEBUG_ENTER_EXIT;
3900
3901 threads_debug_printf
3902 ("%s, except=%s", suspend ? "stop-and-suspend" : "stop",
3903 (except != NULL
3904 ? target_pid_to_str (ptid_of (get_lwp_thread (except))).c_str ()
3905 : "none"));
3906
3907 stopping_threads = (suspend
3908 ? STOPPING_AND_SUSPENDING_THREADS
3909 : STOPPING_THREADS);
3910
3911 if (suspend)
3912 for_each_thread ([&] (thread_info *thread)
3913 {
3914 suspend_and_send_sigstop (thread, except);
3915 });
3916 else
3917 for_each_thread ([&] (thread_info *thread)
3918 {
3919 send_sigstop (thread, except);
3920 });
3921
3922 wait_for_sigstop ();
3923 stopping_threads = NOT_STOPPING_THREADS;
3924
3925 threads_debug_printf ("setting stopping_threads back to !stopping");
3926 }
3927
3928 /* Enqueue one signal in the chain of signals which need to be
3929 delivered to this process on next resume. */
3930
3931 static void
3932 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
3933 {
3934 lwp->pending_signals.emplace_back (signal);
3935 if (info == nullptr)
3936 memset (&lwp->pending_signals.back ().info, 0, sizeof (siginfo_t));
3937 else
3938 lwp->pending_signals.back ().info = *info;
3939 }
3940
3941 void
3942 linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
3943 {
3944 struct thread_info *thread = get_lwp_thread (lwp);
3945 struct regcache *regcache = get_thread_regcache (thread, 1);
3946
3947 scoped_restore_current_thread restore_thread;
3948
3949 switch_to_thread (thread);
3950 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
3951
3952 for (CORE_ADDR pc : next_pcs)
3953 set_single_step_breakpoint (pc, current_ptid);
3954 }
3955
3956 int
3957 linux_process_target::single_step (lwp_info* lwp)
3958 {
3959 int step = 0;
3960
3961 if (supports_hardware_single_step ())
3962 {
3963 step = 1;
3964 }
3965 else if (supports_software_single_step ())
3966 {
3967 install_software_single_step_breakpoints (lwp);
3968 step = 0;
3969 }
3970 else
3971 threads_debug_printf ("stepping is not implemented on this target");
3972
3973 return step;
3974 }
3975
3976 /* The signal can be delivered to the inferior if we are not trying to
3977 finish a fast tracepoint collect. Since signal can be delivered in
3978 the step-over, the program may go to signal handler and trap again
3979 after return from the signal handler. We can live with the spurious
3980 double traps. */
3981
3982 static int
3983 lwp_signal_can_be_delivered (struct lwp_info *lwp)
3984 {
3985 return (lwp->collecting_fast_tracepoint
3986 == fast_tpoint_collect_result::not_collecting);
3987 }
3988
3989 void
3990 linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
3991 int signal, siginfo_t *info)
3992 {
3993 struct thread_info *thread = get_lwp_thread (lwp);
3994 int ptrace_request;
3995 struct process_info *proc = get_thread_process (thread);
3996
3997 /* Note that target description may not be initialised
3998 (proc->tdesc == NULL) at this point because the program hasn't
3999 stopped at the first instruction yet. It means GDBserver skips
4000 the extra traps from the wrapper program (see option --wrapper).
4001 Code in this function that requires register access should be
4002 guarded by proc->tdesc == NULL or something else. */
4003
4004 if (lwp->stopped == 0)
4005 return;
4006
4007 gdb_assert (lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
4008
4009 fast_tpoint_collect_result fast_tp_collecting
4010 = lwp->collecting_fast_tracepoint;
4011
4012 gdb_assert (!stabilizing_threads
4013 || (fast_tp_collecting
4014 != fast_tpoint_collect_result::not_collecting));
4015
4016 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4017 user used the "jump" command, or "set $pc = foo"). */
4018 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4019 {
4020 /* Collecting 'while-stepping' actions doesn't make sense
4021 anymore. */
4022 release_while_stepping_state_list (thread);
4023 }
4024
4025 /* If we have pending signals or status, and a new signal, enqueue the
4026 signal. Also enqueue the signal if it can't be delivered to the
4027 inferior right now. */
4028 if (signal != 0
4029 && (lwp->status_pending_p
4030 || !lwp->pending_signals.empty ()
4031 || !lwp_signal_can_be_delivered (lwp)))
4032 {
4033 enqueue_pending_signal (lwp, signal, info);
4034
4035 /* Postpone any pending signal. It was enqueued above. */
4036 signal = 0;
4037 }
4038
4039 if (lwp->status_pending_p)
4040 {
4041 threads_debug_printf
4042 ("Not resuming lwp %ld (%s, stop %s); has pending status",
4043 lwpid_of (thread), step ? "step" : "continue",
4044 lwp->stop_expected ? "expected" : "not expected");
4045 return;
4046 }
4047
4048 scoped_restore_current_thread restore_thread;
4049 switch_to_thread (thread);
4050
4051 /* This bit needs some thinking about. If we get a signal that
4052 we must report while a single-step reinsert is still pending,
4053 we often end up resuming the thread. It might be better to
4054 (ew) allow a stack of pending events; then we could be sure that
4055 the reinsert happened right away and not lose any signals.
4056
4057 Making this stack would also shrink the window in which breakpoints are
4058 uninserted (see comment in linux_wait_for_lwp) but not enough for
4059 complete correctness, so it won't solve that problem. It may be
4060 worthwhile just to solve this one, however. */
4061 if (lwp->bp_reinsert != 0)
4062 {
4063 threads_debug_printf (" pending reinsert at 0x%s",
4064 paddress (lwp->bp_reinsert));
4065
4066 if (supports_hardware_single_step ())
4067 {
4068 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4069 {
4070 if (step == 0)
4071 warning ("BAD - reinserting but not stepping.");
4072 if (lwp->suspended)
4073 warning ("BAD - reinserting and suspended(%d).",
4074 lwp->suspended);
4075 }
4076 }
4077
4078 step = maybe_hw_step (thread);
4079 }
4080
4081 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4082 threads_debug_printf
4083 ("lwp %ld wants to get out of fast tracepoint jump pad "
4084 "(exit-jump-pad-bkpt)", lwpid_of (thread));
4085
4086 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4087 {
4088 threads_debug_printf
4089 ("lwp %ld wants to get out of fast tracepoint jump pad single-stepping",
4090 lwpid_of (thread));
4091
4092 if (supports_hardware_single_step ())
4093 step = 1;
4094 else
4095 {
4096 internal_error ("moving out of jump pad single-stepping"
4097 " not implemented on this target");
4098 }
4099 }
4100
4101 /* If we have while-stepping actions in this thread set it stepping.
4102 If we have a signal to deliver, it may or may not be set to
4103 SIG_IGN, we don't know. Assume so, and allow collecting
4104 while-stepping into a signal handler. A possible smart thing to
4105 do would be to set an internal breakpoint at the signal return
4106 address, continue, and carry on catching this while-stepping
4107 action only when that breakpoint is hit. A future
4108 enhancement. */
4109 if (thread->while_stepping != NULL)
4110 {
4111 threads_debug_printf
4112 ("lwp %ld has a while-stepping action -> forcing step.",
4113 lwpid_of (thread));
4114
4115 step = single_step (lwp);
4116 }
4117
4118 if (proc->tdesc != NULL && low_supports_breakpoints ())
4119 {
4120 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4121
4122 lwp->stop_pc = low_get_pc (regcache);
4123
4124 threads_debug_printf (" %s from pc 0x%lx", step ? "step" : "continue",
4125 (long) lwp->stop_pc);
4126 }
4127
4128 /* If we have pending signals, consume one if it can be delivered to
4129 the inferior. */
4130 if (!lwp->pending_signals.empty () && lwp_signal_can_be_delivered (lwp))
4131 {
4132 const pending_signal &p_sig = lwp->pending_signals.front ();
4133
4134 signal = p_sig.signal;
4135 if (p_sig.info.si_signo != 0)
4136 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4137 &p_sig.info);
4138
4139 lwp->pending_signals.pop_front ();
4140 }
4141
4142 threads_debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)",
4143 lwpid_of (thread), step ? "step" : "continue", signal,
4144 lwp->stop_expected ? "expected" : "not expected");
4145
4146 low_prepare_to_resume (lwp);
4147
4148 regcache_invalidate_thread (thread);
4149 errno = 0;
4150 lwp->stepping = step;
4151 if (step)
4152 ptrace_request = PTRACE_SINGLESTEP;
4153 else if (gdb_catching_syscalls_p (lwp))
4154 ptrace_request = PTRACE_SYSCALL;
4155 else
4156 ptrace_request = PTRACE_CONT;
4157 ptrace (ptrace_request,
4158 lwpid_of (thread),
4159 (PTRACE_TYPE_ARG3) 0,
4160 /* Coerce to a uintptr_t first to avoid potential gcc warning
4161 of coercing an 8 byte integer to a 4 byte pointer. */
4162 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4163
4164 if (errno)
4165 {
4166 int saved_errno = errno;
4167
4168 threads_debug_printf ("ptrace errno = %d (%s)",
4169 saved_errno, strerror (saved_errno));
4170
4171 errno = saved_errno;
4172 perror_with_name ("resuming thread");
4173 }
4174
4175 /* Successfully resumed. Clear state that no longer makes sense,
4176 and mark the LWP as running. Must not do this before resuming
4177 otherwise if that fails other code will be confused. E.g., we'd
4178 later try to stop the LWP and hang forever waiting for a stop
4179 status. Note that we must not throw after this is cleared,
4180 otherwise handle_zombie_lwp_error would get confused. */
4181 lwp->stopped = 0;
4182 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4183 }
4184
4185 void
4186 linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4187 {
4188 /* Nop. */
4189 }
4190
4191 /* Called when we try to resume a stopped LWP and that errors out. If
4192 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4193 or about to become), discard the error, clear any pending status
4194 the LWP may have, and return true (we'll collect the exit status
4195 soon enough). Otherwise, return false. */
4196
4197 static int
4198 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4199 {
4200 struct thread_info *thread = get_lwp_thread (lp);
4201
4202 /* If we get an error after resuming the LWP successfully, we'd
4203 confuse !T state for the LWP being gone. */
4204 gdb_assert (lp->stopped);
4205
4206 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4207 because even if ptrace failed with ESRCH, the tracee may be "not
4208 yet fully dead", but already refusing ptrace requests. In that
4209 case the tracee has 'R (Running)' state for a little bit
4210 (observed in Linux 3.18). See also the note on ESRCH in the
4211 ptrace(2) man page. Instead, check whether the LWP has any state
4212 other than ptrace-stopped. */
4213
4214 /* Don't assume anything if /proc/PID/status can't be read. */
4215 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4216 {
4217 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4218 lp->status_pending_p = 0;
4219 return 1;
4220 }
4221 return 0;
4222 }
4223
4224 void
4225 linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4226 siginfo_t *info)
4227 {
4228 try
4229 {
4230 resume_one_lwp_throw (lwp, step, signal, info);
4231 }
4232 catch (const gdb_exception_error &ex)
4233 {
4234 if (check_ptrace_stopped_lwp_gone (lwp))
4235 {
4236 /* This could because we tried to resume an LWP after its leader
4237 exited. Mark it as resumed, so we can collect an exit event
4238 from it. */
4239 lwp->stopped = 0;
4240 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4241 }
4242 else
4243 throw;
4244 }
4245 }
4246
4247 /* This function is called once per thread via for_each_thread.
4248 We look up which resume request applies to THREAD and mark it with a
4249 pointer to the appropriate resume request.
4250
4251 This algorithm is O(threads * resume elements), but resume elements
4252 is small (and will remain small at least until GDB supports thread
4253 suspension). */
4254
4255 static void
4256 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4257 {
4258 struct lwp_info *lwp = get_thread_lwp (thread);
4259
4260 for (int ndx = 0; ndx < n; ndx++)
4261 {
4262 ptid_t ptid = resume[ndx].thread;
4263 if (ptid == minus_one_ptid
4264 || ptid == thread->id
4265 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4266 of PID'. */
4267 || (ptid.pid () == pid_of (thread)
4268 && (ptid.is_pid ()
4269 || ptid.lwp () == -1)))
4270 {
4271 if (resume[ndx].kind == resume_stop
4272 && thread->last_resume_kind == resume_stop)
4273 {
4274 threads_debug_printf
4275 ("already %s LWP %ld at GDB's request",
4276 (thread->last_status.kind () == TARGET_WAITKIND_STOPPED
4277 ? "stopped" : "stopping"),
4278 lwpid_of (thread));
4279
4280 continue;
4281 }
4282
4283 /* Ignore (wildcard) resume requests for already-resumed
4284 threads. */
4285 if (resume[ndx].kind != resume_stop
4286 && thread->last_resume_kind != resume_stop)
4287 {
4288 threads_debug_printf
4289 ("already %s LWP %ld at GDB's request",
4290 (thread->last_resume_kind == resume_step
4291 ? "stepping" : "continuing"),
4292 lwpid_of (thread));
4293 continue;
4294 }
4295
4296 /* Don't let wildcard resumes resume fork/vfork/clone
4297 children that GDB does not yet know are new children. */
4298 if (lwp->relative != NULL)
4299 {
4300 struct lwp_info *rel = lwp->relative;
4301
4302 if (rel->status_pending_p
4303 && is_new_child_status (rel->waitstatus.kind ()))
4304 {
4305 threads_debug_printf
4306 ("not resuming LWP %ld: has queued stop reply",
4307 lwpid_of (thread));
4308 continue;
4309 }
4310 }
4311
4312 /* If the thread has a pending event that has already been
4313 reported to GDBserver core, but GDB has not pulled the
4314 event out of the vStopped queue yet, likewise, ignore the
4315 (wildcard) resume request. */
4316 if (in_queued_stop_replies (thread->id))
4317 {
4318 threads_debug_printf
4319 ("not resuming LWP %ld: has queued stop reply",
4320 lwpid_of (thread));
4321 continue;
4322 }
4323
4324 lwp->resume = &resume[ndx];
4325 thread->last_resume_kind = lwp->resume->kind;
4326
4327 lwp->step_range_start = lwp->resume->step_range_start;
4328 lwp->step_range_end = lwp->resume->step_range_end;
4329
4330 /* If we had a deferred signal to report, dequeue one now.
4331 This can happen if LWP gets more than one signal while
4332 trying to get out of a jump pad. */
4333 if (lwp->stopped
4334 && !lwp->status_pending_p
4335 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4336 {
4337 lwp->status_pending_p = 1;
4338
4339 threads_debug_printf
4340 ("Dequeueing deferred signal %d for LWP %ld, "
4341 "leaving status pending.",
4342 WSTOPSIG (lwp->status_pending),
4343 lwpid_of (thread));
4344 }
4345
4346 return;
4347 }
4348 }
4349
4350 /* No resume action for this thread. */
4351 lwp->resume = NULL;
4352 }
4353
4354 bool
4355 linux_process_target::resume_status_pending (thread_info *thread)
4356 {
4357 struct lwp_info *lwp = get_thread_lwp (thread);
4358
4359 /* LWPs which will not be resumed are not interesting, because
4360 we might not wait for them next time through linux_wait. */
4361 if (lwp->resume == NULL)
4362 return false;
4363
4364 return thread_still_has_status_pending (thread);
4365 }
4366
4367 bool
4368 linux_process_target::thread_needs_step_over (thread_info *thread)
4369 {
4370 struct lwp_info *lwp = get_thread_lwp (thread);
4371 CORE_ADDR pc;
4372 struct process_info *proc = get_thread_process (thread);
4373
4374 /* GDBserver is skipping the extra traps from the wrapper program,
4375 don't have to do step over. */
4376 if (proc->tdesc == NULL)
4377 return false;
4378
4379 /* LWPs which will not be resumed are not interesting, because we
4380 might not wait for them next time through linux_wait. */
4381
4382 if (!lwp->stopped)
4383 {
4384 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped",
4385 lwpid_of (thread));
4386 return false;
4387 }
4388
4389 if (thread->last_resume_kind == resume_stop)
4390 {
4391 threads_debug_printf
4392 ("Need step over [LWP %ld]? Ignoring, should remain stopped",
4393 lwpid_of (thread));
4394 return false;
4395 }
4396
4397 gdb_assert (lwp->suspended >= 0);
4398
4399 if (lwp->suspended)
4400 {
4401 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, suspended",
4402 lwpid_of (thread));
4403 return false;
4404 }
4405
4406 if (lwp->status_pending_p)
4407 {
4408 threads_debug_printf
4409 ("Need step over [LWP %ld]? Ignoring, has pending status.",
4410 lwpid_of (thread));
4411 return false;
4412 }
4413
4414 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4415 or we have. */
4416 pc = get_pc (lwp);
4417
4418 /* If the PC has changed since we stopped, then don't do anything,
4419 and let the breakpoint/tracepoint be hit. This happens if, for
4420 instance, GDB handled the decr_pc_after_break subtraction itself,
4421 GDB is OOL stepping this thread, or the user has issued a "jump"
4422 command, or poked thread's registers herself. */
4423 if (pc != lwp->stop_pc)
4424 {
4425 threads_debug_printf
4426 ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4427 "Old stop_pc was 0x%s, PC is now 0x%s", lwpid_of (thread),
4428 paddress (lwp->stop_pc), paddress (pc));
4429 return false;
4430 }
4431
4432 /* On software single step target, resume the inferior with signal
4433 rather than stepping over. */
4434 if (supports_software_single_step ()
4435 && !lwp->pending_signals.empty ()
4436 && lwp_signal_can_be_delivered (lwp))
4437 {
4438 threads_debug_printf
4439 ("Need step over [LWP %ld]? Ignoring, has pending signals.",
4440 lwpid_of (thread));
4441
4442 return false;
4443 }
4444
4445 scoped_restore_current_thread restore_thread;
4446 switch_to_thread (thread);
4447
4448 /* We can only step over breakpoints we know about. */
4449 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4450 {
4451 /* Don't step over a breakpoint that GDB expects to hit
4452 though. If the condition is being evaluated on the target's side
4453 and it evaluate to false, step over this breakpoint as well. */
4454 if (gdb_breakpoint_here (pc)
4455 && gdb_condition_true_at_breakpoint (pc)
4456 && gdb_no_commands_at_breakpoint (pc))
4457 {
4458 threads_debug_printf ("Need step over [LWP %ld]? yes, but found"
4459 " GDB breakpoint at 0x%s; skipping step over",
4460 lwpid_of (thread), paddress (pc));
4461
4462 return false;
4463 }
4464 else
4465 {
4466 threads_debug_printf ("Need step over [LWP %ld]? yes, "
4467 "found breakpoint at 0x%s",
4468 lwpid_of (thread), paddress (pc));
4469
4470 /* We've found an lwp that needs stepping over --- return 1 so
4471 that find_thread stops looking. */
4472 return true;
4473 }
4474 }
4475
4476 threads_debug_printf
4477 ("Need step over [LWP %ld]? No, no breakpoint found at 0x%s",
4478 lwpid_of (thread), paddress (pc));
4479
4480 return false;
4481 }
4482
4483 void
4484 linux_process_target::start_step_over (lwp_info *lwp)
4485 {
4486 struct thread_info *thread = get_lwp_thread (lwp);
4487 CORE_ADDR pc;
4488
4489 threads_debug_printf ("Starting step-over on LWP %ld. Stopping all threads",
4490 lwpid_of (thread));
4491
4492 stop_all_lwps (1, lwp);
4493
4494 if (lwp->suspended != 0)
4495 {
4496 internal_error ("LWP %ld suspended=%d\n", lwpid_of (thread),
4497 lwp->suspended);
4498 }
4499
4500 threads_debug_printf ("Done stopping all threads for step-over.");
4501
4502 /* Note, we should always reach here with an already adjusted PC,
4503 either by GDB (if we're resuming due to GDB's request), or by our
4504 caller, if we just finished handling an internal breakpoint GDB
4505 shouldn't care about. */
4506 pc = get_pc (lwp);
4507
4508 bool step = false;
4509 {
4510 scoped_restore_current_thread restore_thread;
4511 switch_to_thread (thread);
4512
4513 lwp->bp_reinsert = pc;
4514 uninsert_breakpoints_at (pc);
4515 uninsert_fast_tracepoint_jumps_at (pc);
4516
4517 step = single_step (lwp);
4518 }
4519
4520 resume_one_lwp (lwp, step, 0, NULL);
4521
4522 /* Require next event from this LWP. */
4523 step_over_bkpt = thread->id;
4524 }
4525
4526 bool
4527 linux_process_target::finish_step_over (lwp_info *lwp)
4528 {
4529 if (lwp->bp_reinsert != 0)
4530 {
4531 scoped_restore_current_thread restore_thread;
4532
4533 threads_debug_printf ("Finished step over.");
4534
4535 switch_to_thread (get_lwp_thread (lwp));
4536
4537 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4538 may be no breakpoint to reinsert there by now. */
4539 reinsert_breakpoints_at (lwp->bp_reinsert);
4540 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4541
4542 lwp->bp_reinsert = 0;
4543
4544 /* Delete any single-step breakpoints. No longer needed. We
4545 don't have to worry about other threads hitting this trap,
4546 and later not being able to explain it, because we were
4547 stepping over a breakpoint, and we hold all threads but
4548 LWP stopped while doing that. */
4549 if (!supports_hardware_single_step ())
4550 {
4551 gdb_assert (has_single_step_breakpoints (current_thread));
4552 delete_single_step_breakpoints (current_thread);
4553 }
4554
4555 step_over_bkpt = null_ptid;
4556 return true;
4557 }
4558 else
4559 return false;
4560 }
4561
4562 void
4563 linux_process_target::complete_ongoing_step_over ()
4564 {
4565 if (step_over_bkpt != null_ptid)
4566 {
4567 struct lwp_info *lwp;
4568 int wstat;
4569 int ret;
4570
4571 threads_debug_printf ("detach: step over in progress, finish it first");
4572
4573 /* Passing NULL_PTID as filter indicates we want all events to
4574 be left pending. Eventually this returns when there are no
4575 unwaited-for children left. */
4576 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4577 __WALL);
4578 gdb_assert (ret == -1);
4579
4580 lwp = find_lwp_pid (step_over_bkpt);
4581 if (lwp != NULL)
4582 {
4583 finish_step_over (lwp);
4584
4585 /* If we got our step SIGTRAP, don't leave it pending,
4586 otherwise we would report it to GDB as a spurious
4587 SIGTRAP. */
4588 gdb_assert (lwp->status_pending_p);
4589 if (WIFSTOPPED (lwp->status_pending)
4590 && WSTOPSIG (lwp->status_pending) == SIGTRAP)
4591 {
4592 thread_info *thread = get_lwp_thread (lwp);
4593 if (thread->last_resume_kind != resume_step)
4594 {
4595 threads_debug_printf ("detach: discard step-over SIGTRAP");
4596
4597 lwp->status_pending_p = 0;
4598 lwp->status_pending = 0;
4599 resume_one_lwp (lwp, lwp->stepping, 0, NULL);
4600 }
4601 else
4602 threads_debug_printf
4603 ("detach: resume_step, not discarding step-over SIGTRAP");
4604 }
4605 }
4606 step_over_bkpt = null_ptid;
4607 unsuspend_all_lwps (lwp);
4608 }
4609 }
4610
4611 void
4612 linux_process_target::resume_one_thread (thread_info *thread,
4613 bool leave_all_stopped)
4614 {
4615 struct lwp_info *lwp = get_thread_lwp (thread);
4616 int leave_pending;
4617
4618 if (lwp->resume == NULL)
4619 return;
4620
4621 if (lwp->resume->kind == resume_stop)
4622 {
4623 threads_debug_printf ("resume_stop request for LWP %ld",
4624 lwpid_of (thread));
4625
4626 if (!lwp->stopped)
4627 {
4628 threads_debug_printf ("stopping LWP %ld", lwpid_of (thread));
4629
4630 /* Stop the thread, and wait for the event asynchronously,
4631 through the event loop. */
4632 send_sigstop (lwp);
4633 }
4634 else
4635 {
4636 threads_debug_printf ("already stopped LWP %ld", lwpid_of (thread));
4637
4638 /* The LWP may have been stopped in an internal event that
4639 was not meant to be notified back to GDB (e.g., gdbserver
4640 breakpoint), so we should be reporting a stop event in
4641 this case too. */
4642
4643 /* If the thread already has a pending SIGSTOP, this is a
4644 no-op. Otherwise, something later will presumably resume
4645 the thread and this will cause it to cancel any pending
4646 operation, due to last_resume_kind == resume_stop. If
4647 the thread already has a pending status to report, we
4648 will still report it the next time we wait - see
4649 status_pending_p_callback. */
4650
4651 /* If we already have a pending signal to report, then
4652 there's no need to queue a SIGSTOP, as this means we're
4653 midway through moving the LWP out of the jumppad, and we
4654 will report the pending signal as soon as that is
4655 finished. */
4656 if (lwp->pending_signals_to_report.empty ())
4657 send_sigstop (lwp);
4658 }
4659
4660 /* For stop requests, we're done. */
4661 lwp->resume = NULL;
4662 thread->last_status.set_ignore ();
4663 return;
4664 }
4665
4666 /* If this thread which is about to be resumed has a pending status,
4667 then don't resume it - we can just report the pending status.
4668 Likewise if it is suspended, because e.g., another thread is
4669 stepping past a breakpoint. Make sure to queue any signals that
4670 would otherwise be sent. In all-stop mode, we do this decision
4671 based on if *any* thread has a pending status. If there's a
4672 thread that needs the step-over-breakpoint dance, then don't
4673 resume any other thread but that particular one. */
4674 leave_pending = (lwp->suspended
4675 || lwp->status_pending_p
4676 || leave_all_stopped);
4677
4678 /* If we have a new signal, enqueue the signal. */
4679 if (lwp->resume->sig != 0)
4680 {
4681 siginfo_t info, *info_p;
4682
4683 /* If this is the same signal we were previously stopped by,
4684 make sure to queue its siginfo. */
4685 if (WIFSTOPPED (lwp->last_status)
4686 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4687 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4688 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4689 info_p = &info;
4690 else
4691 info_p = NULL;
4692
4693 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4694 }
4695
4696 if (!leave_pending)
4697 {
4698 threads_debug_printf ("resuming LWP %ld", lwpid_of (thread));
4699
4700 proceed_one_lwp (thread, NULL);
4701 }
4702 else
4703 threads_debug_printf ("leaving LWP %ld stopped", lwpid_of (thread));
4704
4705 thread->last_status.set_ignore ();
4706 lwp->resume = NULL;
4707 }
4708
4709 void
4710 linux_process_target::resume (thread_resume *resume_info, size_t n)
4711 {
4712 struct thread_info *need_step_over = NULL;
4713
4714 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4715
4716 for_each_thread ([&] (thread_info *thread)
4717 {
4718 linux_set_resume_request (thread, resume_info, n);
4719 });
4720
4721 /* If there is a thread which would otherwise be resumed, which has
4722 a pending status, then don't resume any threads - we can just
4723 report the pending status. Make sure to queue any signals that
4724 would otherwise be sent. In non-stop mode, we'll apply this
4725 logic to each thread individually. We consume all pending events
4726 before considering to start a step-over (in all-stop). */
4727 bool any_pending = false;
4728 if (!non_stop)
4729 any_pending = find_thread ([this] (thread_info *thread)
4730 {
4731 return resume_status_pending (thread);
4732 }) != nullptr;
4733
4734 /* If there is a thread which would otherwise be resumed, which is
4735 stopped at a breakpoint that needs stepping over, then don't
4736 resume any threads - have it step over the breakpoint with all
4737 other threads stopped, then resume all threads again. Make sure
4738 to queue any signals that would otherwise be delivered or
4739 queued. */
4740 if (!any_pending && low_supports_breakpoints ())
4741 need_step_over = find_thread ([this] (thread_info *thread)
4742 {
4743 return thread_needs_step_over (thread);
4744 });
4745
4746 bool leave_all_stopped = (need_step_over != NULL || any_pending);
4747
4748 if (need_step_over != NULL)
4749 threads_debug_printf ("Not resuming all, need step over");
4750 else if (any_pending)
4751 threads_debug_printf ("Not resuming, all-stop and found "
4752 "an LWP with pending status");
4753 else
4754 threads_debug_printf ("Resuming, no pending status or step over needed");
4755
4756 /* Even if we're leaving threads stopped, queue all signals we'd
4757 otherwise deliver. */
4758 for_each_thread ([&] (thread_info *thread)
4759 {
4760 resume_one_thread (thread, leave_all_stopped);
4761 });
4762
4763 if (need_step_over)
4764 start_step_over (get_thread_lwp (need_step_over));
4765
4766 /* We may have events that were pending that can/should be sent to
4767 the client now. Trigger a linux_wait call. */
4768 if (target_is_async_p ())
4769 async_file_mark ();
4770 }
4771
4772 void
4773 linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
4774 {
4775 struct lwp_info *lwp = get_thread_lwp (thread);
4776 int step;
4777
4778 if (lwp == except)
4779 return;
4780
4781 threads_debug_printf ("lwp %ld", lwpid_of (thread));
4782
4783 if (!lwp->stopped)
4784 {
4785 threads_debug_printf (" LWP %ld already running", lwpid_of (thread));
4786 return;
4787 }
4788
4789 if (thread->last_resume_kind == resume_stop
4790 && thread->last_status.kind () != TARGET_WAITKIND_IGNORE)
4791 {
4792 threads_debug_printf (" client wants LWP to remain %ld stopped",
4793 lwpid_of (thread));
4794 return;
4795 }
4796
4797 if (lwp->status_pending_p)
4798 {
4799 threads_debug_printf (" LWP %ld has pending status, leaving stopped",
4800 lwpid_of (thread));
4801 return;
4802 }
4803
4804 gdb_assert (lwp->suspended >= 0);
4805
4806 if (lwp->suspended)
4807 {
4808 threads_debug_printf (" LWP %ld is suspended", lwpid_of (thread));
4809 return;
4810 }
4811
4812 if (thread->last_resume_kind == resume_stop
4813 && lwp->pending_signals_to_report.empty ()
4814 && (lwp->collecting_fast_tracepoint
4815 == fast_tpoint_collect_result::not_collecting))
4816 {
4817 /* We haven't reported this LWP as stopped yet (otherwise, the
4818 last_status.kind check above would catch it, and we wouldn't
4819 reach here. This LWP may have been momentarily paused by a
4820 stop_all_lwps call while handling for example, another LWP's
4821 step-over. In that case, the pending expected SIGSTOP signal
4822 that was queued at vCont;t handling time will have already
4823 been consumed by wait_for_sigstop, and so we need to requeue
4824 another one here. Note that if the LWP already has a SIGSTOP
4825 pending, this is a no-op. */
4826
4827 threads_debug_printf
4828 ("Client wants LWP %ld to stop. Making sure it has a SIGSTOP pending",
4829 lwpid_of (thread));
4830
4831 send_sigstop (lwp);
4832 }
4833
4834 if (thread->last_resume_kind == resume_step)
4835 {
4836 threads_debug_printf (" stepping LWP %ld, client wants it stepping",
4837 lwpid_of (thread));
4838
4839 /* If resume_step is requested by GDB, install single-step
4840 breakpoints when the thread is about to be actually resumed if
4841 the single-step breakpoints weren't removed. */
4842 if (supports_software_single_step ()
4843 && !has_single_step_breakpoints (thread))
4844 install_software_single_step_breakpoints (lwp);
4845
4846 step = maybe_hw_step (thread);
4847 }
4848 else if (lwp->bp_reinsert != 0)
4849 {
4850 threads_debug_printf (" stepping LWP %ld, reinsert set",
4851 lwpid_of (thread));
4852
4853 step = maybe_hw_step (thread);
4854 }
4855 else
4856 step = 0;
4857
4858 resume_one_lwp (lwp, step, 0, NULL);
4859 }
4860
4861 void
4862 linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
4863 lwp_info *except)
4864 {
4865 struct lwp_info *lwp = get_thread_lwp (thread);
4866
4867 if (lwp == except)
4868 return;
4869
4870 lwp_suspended_decr (lwp);
4871
4872 proceed_one_lwp (thread, except);
4873 }
4874
4875 void
4876 linux_process_target::proceed_all_lwps ()
4877 {
4878 struct thread_info *need_step_over;
4879
4880 /* If there is a thread which would otherwise be resumed, which is
4881 stopped at a breakpoint that needs stepping over, then don't
4882 resume any threads - have it step over the breakpoint with all
4883 other threads stopped, then resume all threads again. */
4884
4885 if (low_supports_breakpoints ())
4886 {
4887 need_step_over = find_thread ([this] (thread_info *thread)
4888 {
4889 return thread_needs_step_over (thread);
4890 });
4891
4892 if (need_step_over != NULL)
4893 {
4894 threads_debug_printf ("found thread %ld needing a step-over",
4895 lwpid_of (need_step_over));
4896
4897 start_step_over (get_thread_lwp (need_step_over));
4898 return;
4899 }
4900 }
4901
4902 threads_debug_printf ("Proceeding, no step-over needed");
4903
4904 for_each_thread ([this] (thread_info *thread)
4905 {
4906 proceed_one_lwp (thread, NULL);
4907 });
4908 }
4909
4910 void
4911 linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
4912 {
4913 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4914
4915 if (except)
4916 threads_debug_printf ("except=(LWP %ld)",
4917 lwpid_of (get_lwp_thread (except)));
4918 else
4919 threads_debug_printf ("except=nullptr");
4920
4921 if (unsuspend)
4922 for_each_thread ([&] (thread_info *thread)
4923 {
4924 unsuspend_and_proceed_one_lwp (thread, except);
4925 });
4926 else
4927 for_each_thread ([&] (thread_info *thread)
4928 {
4929 proceed_one_lwp (thread, except);
4930 });
4931 }
4932
4933
4934 #ifdef HAVE_LINUX_REGSETS
4935
4936 #define use_linux_regsets 1
4937
4938 /* Returns true if REGSET has been disabled. */
4939
4940 static int
4941 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4942 {
4943 return (info->disabled_regsets != NULL
4944 && info->disabled_regsets[regset - info->regsets]);
4945 }
4946
4947 /* Disable REGSET. */
4948
4949 static void
4950 disable_regset (struct regsets_info *info, struct regset_info *regset)
4951 {
4952 int dr_offset;
4953
4954 dr_offset = regset - info->regsets;
4955 if (info->disabled_regsets == NULL)
4956 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
4957 info->disabled_regsets[dr_offset] = 1;
4958 }
4959
4960 static int
4961 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4962 struct regcache *regcache)
4963 {
4964 struct regset_info *regset;
4965 int saw_general_regs = 0;
4966 int pid;
4967 struct iovec iov;
4968
4969 pid = lwpid_of (current_thread);
4970 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4971 {
4972 void *buf, *data;
4973 int nt_type, res;
4974
4975 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4976 continue;
4977
4978 buf = xmalloc (regset->size);
4979
4980 nt_type = regset->nt_type;
4981 if (nt_type)
4982 {
4983 iov.iov_base = buf;
4984 iov.iov_len = regset->size;
4985 data = (void *) &iov;
4986 }
4987 else
4988 data = buf;
4989
4990 #ifndef __sparc__
4991 res = ptrace (regset->get_request, pid,
4992 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4993 #else
4994 res = ptrace (regset->get_request, pid, data, nt_type);
4995 #endif
4996 if (res < 0)
4997 {
4998 if (errno == EIO
4999 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5000 {
5001 /* If we get EIO on a regset, or an EINVAL and the regset is
5002 optional, do not try it again for this process mode. */
5003 disable_regset (regsets_info, regset);
5004 }
5005 else if (errno == ENODATA)
5006 {
5007 /* ENODATA may be returned if the regset is currently
5008 not "active". This can happen in normal operation,
5009 so suppress the warning in this case. */
5010 }
5011 else if (errno == ESRCH)
5012 {
5013 /* At this point, ESRCH should mean the process is
5014 already gone, in which case we simply ignore attempts
5015 to read its registers. */
5016 }
5017 else
5018 {
5019 char s[256];
5020 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5021 pid);
5022 perror (s);
5023 }
5024 }
5025 else
5026 {
5027 if (regset->type == GENERAL_REGS)
5028 saw_general_regs = 1;
5029 regset->store_function (regcache, buf);
5030 }
5031 free (buf);
5032 }
5033 if (saw_general_regs)
5034 return 0;
5035 else
5036 return 1;
5037 }
5038
5039 static int
5040 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5041 struct regcache *regcache)
5042 {
5043 struct regset_info *regset;
5044 int saw_general_regs = 0;
5045 int pid;
5046 struct iovec iov;
5047
5048 pid = lwpid_of (current_thread);
5049 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5050 {
5051 void *buf, *data;
5052 int nt_type, res;
5053
5054 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5055 || regset->fill_function == NULL)
5056 continue;
5057
5058 buf = xmalloc (regset->size);
5059
5060 /* First fill the buffer with the current register set contents,
5061 in case there are any items in the kernel's regset that are
5062 not in gdbserver's regcache. */
5063
5064 nt_type = regset->nt_type;
5065 if (nt_type)
5066 {
5067 iov.iov_base = buf;
5068 iov.iov_len = regset->size;
5069 data = (void *) &iov;
5070 }
5071 else
5072 data = buf;
5073
5074 #ifndef __sparc__
5075 res = ptrace (regset->get_request, pid,
5076 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5077 #else
5078 res = ptrace (regset->get_request, pid, data, nt_type);
5079 #endif
5080
5081 if (res == 0)
5082 {
5083 /* Then overlay our cached registers on that. */
5084 regset->fill_function (regcache, buf);
5085
5086 /* Only now do we write the register set. */
5087 #ifndef __sparc__
5088 res = ptrace (regset->set_request, pid,
5089 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5090 #else
5091 res = ptrace (regset->set_request, pid, data, nt_type);
5092 #endif
5093 }
5094
5095 if (res < 0)
5096 {
5097 if (errno == EIO
5098 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5099 {
5100 /* If we get EIO on a regset, or an EINVAL and the regset is
5101 optional, do not try it again for this process mode. */
5102 disable_regset (regsets_info, regset);
5103 }
5104 else if (errno == ESRCH)
5105 {
5106 /* At this point, ESRCH should mean the process is
5107 already gone, in which case we simply ignore attempts
5108 to change its registers. See also the related
5109 comment in resume_one_lwp. */
5110 free (buf);
5111 return 0;
5112 }
5113 else
5114 {
5115 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5116 }
5117 }
5118 else if (regset->type == GENERAL_REGS)
5119 saw_general_regs = 1;
5120 free (buf);
5121 }
5122 if (saw_general_regs)
5123 return 0;
5124 else
5125 return 1;
5126 }
5127
5128 #else /* !HAVE_LINUX_REGSETS */
5129
5130 #define use_linux_regsets 0
5131 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5132 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5133
5134 #endif
5135
5136 /* Return 1 if register REGNO is supported by one of the regset ptrace
5137 calls or 0 if it has to be transferred individually. */
5138
5139 static int
5140 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5141 {
5142 unsigned char mask = 1 << (regno % 8);
5143 size_t index = regno / 8;
5144
5145 return (use_linux_regsets
5146 && (regs_info->regset_bitmap == NULL
5147 || (regs_info->regset_bitmap[index] & mask) != 0));
5148 }
5149
5150 #ifdef HAVE_LINUX_USRREGS
5151
5152 static int
5153 register_addr (const struct usrregs_info *usrregs, int regnum)
5154 {
5155 int addr;
5156
5157 if (regnum < 0 || regnum >= usrregs->num_regs)
5158 error ("Invalid register number %d.", regnum);
5159
5160 addr = usrregs->regmap[regnum];
5161
5162 return addr;
5163 }
5164
5165
5166 void
5167 linux_process_target::fetch_register (const usrregs_info *usrregs,
5168 regcache *regcache, int regno)
5169 {
5170 CORE_ADDR regaddr;
5171 int i, size;
5172 char *buf;
5173 int pid;
5174
5175 if (regno >= usrregs->num_regs)
5176 return;
5177 if (low_cannot_fetch_register (regno))
5178 return;
5179
5180 regaddr = register_addr (usrregs, regno);
5181 if (regaddr == -1)
5182 return;
5183
5184 size = ((register_size (regcache->tdesc, regno)
5185 + sizeof (PTRACE_XFER_TYPE) - 1)
5186 & -sizeof (PTRACE_XFER_TYPE));
5187 buf = (char *) alloca (size);
5188
5189 pid = lwpid_of (current_thread);
5190 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5191 {
5192 errno = 0;
5193 *(PTRACE_XFER_TYPE *) (buf + i) =
5194 ptrace (PTRACE_PEEKUSER, pid,
5195 /* Coerce to a uintptr_t first to avoid potential gcc warning
5196 of coercing an 8 byte integer to a 4 byte pointer. */
5197 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5198 regaddr += sizeof (PTRACE_XFER_TYPE);
5199 if (errno != 0)
5200 {
5201 /* Mark register REGNO unavailable. */
5202 supply_register (regcache, regno, NULL);
5203 return;
5204 }
5205 }
5206
5207 low_supply_ptrace_register (regcache, regno, buf);
5208 }
5209
5210 void
5211 linux_process_target::store_register (const usrregs_info *usrregs,
5212 regcache *regcache, int regno)
5213 {
5214 CORE_ADDR regaddr;
5215 int i, size;
5216 char *buf;
5217 int pid;
5218
5219 if (regno >= usrregs->num_regs)
5220 return;
5221 if (low_cannot_store_register (regno))
5222 return;
5223
5224 regaddr = register_addr (usrregs, regno);
5225 if (regaddr == -1)
5226 return;
5227
5228 size = ((register_size (regcache->tdesc, regno)
5229 + sizeof (PTRACE_XFER_TYPE) - 1)
5230 & -sizeof (PTRACE_XFER_TYPE));
5231 buf = (char *) alloca (size);
5232 memset (buf, 0, size);
5233
5234 low_collect_ptrace_register (regcache, regno, buf);
5235
5236 pid = lwpid_of (current_thread);
5237 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5238 {
5239 errno = 0;
5240 ptrace (PTRACE_POKEUSER, pid,
5241 /* Coerce to a uintptr_t first to avoid potential gcc warning
5242 about coercing an 8 byte integer to a 4 byte pointer. */
5243 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5244 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5245 if (errno != 0)
5246 {
5247 /* At this point, ESRCH should mean the process is
5248 already gone, in which case we simply ignore attempts
5249 to change its registers. See also the related
5250 comment in resume_one_lwp. */
5251 if (errno == ESRCH)
5252 return;
5253
5254
5255 if (!low_cannot_store_register (regno))
5256 error ("writing register %d: %s", regno, safe_strerror (errno));
5257 }
5258 regaddr += sizeof (PTRACE_XFER_TYPE);
5259 }
5260 }
5261 #endif /* HAVE_LINUX_USRREGS */
5262
5263 void
5264 linux_process_target::low_collect_ptrace_register (regcache *regcache,
5265 int regno, char *buf)
5266 {
5267 collect_register (regcache, regno, buf);
5268 }
5269
5270 void
5271 linux_process_target::low_supply_ptrace_register (regcache *regcache,
5272 int regno, const char *buf)
5273 {
5274 supply_register (regcache, regno, buf);
5275 }
5276
5277 void
5278 linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5279 regcache *regcache,
5280 int regno, int all)
5281 {
5282 #ifdef HAVE_LINUX_USRREGS
5283 struct usrregs_info *usr = regs_info->usrregs;
5284
5285 if (regno == -1)
5286 {
5287 for (regno = 0; regno < usr->num_regs; regno++)
5288 if (all || !linux_register_in_regsets (regs_info, regno))
5289 fetch_register (usr, regcache, regno);
5290 }
5291 else
5292 fetch_register (usr, regcache, regno);
5293 #endif
5294 }
5295
5296 void
5297 linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5298 regcache *regcache,
5299 int regno, int all)
5300 {
5301 #ifdef HAVE_LINUX_USRREGS
5302 struct usrregs_info *usr = regs_info->usrregs;
5303
5304 if (regno == -1)
5305 {
5306 for (regno = 0; regno < usr->num_regs; regno++)
5307 if (all || !linux_register_in_regsets (regs_info, regno))
5308 store_register (usr, regcache, regno);
5309 }
5310 else
5311 store_register (usr, regcache, regno);
5312 #endif
5313 }
5314
5315 void
5316 linux_process_target::fetch_registers (regcache *regcache, int regno)
5317 {
5318 int use_regsets;
5319 int all = 0;
5320 const regs_info *regs_info = get_regs_info ();
5321
5322 if (regno == -1)
5323 {
5324 if (regs_info->usrregs != NULL)
5325 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5326 low_fetch_register (regcache, regno);
5327
5328 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5329 if (regs_info->usrregs != NULL)
5330 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5331 }
5332 else
5333 {
5334 if (low_fetch_register (regcache, regno))
5335 return;
5336
5337 use_regsets = linux_register_in_regsets (regs_info, regno);
5338 if (use_regsets)
5339 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5340 regcache);
5341 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5342 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5343 }
5344 }
5345
5346 void
5347 linux_process_target::store_registers (regcache *regcache, int regno)
5348 {
5349 int use_regsets;
5350 int all = 0;
5351 const regs_info *regs_info = get_regs_info ();
5352
5353 if (regno == -1)
5354 {
5355 all = regsets_store_inferior_registers (regs_info->regsets_info,
5356 regcache);
5357 if (regs_info->usrregs != NULL)
5358 usr_store_inferior_registers (regs_info, regcache, regno, all);
5359 }
5360 else
5361 {
5362 use_regsets = linux_register_in_regsets (regs_info, regno);
5363 if (use_regsets)
5364 all = regsets_store_inferior_registers (regs_info->regsets_info,
5365 regcache);
5366 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5367 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5368 }
5369 }
5370
5371 bool
5372 linux_process_target::low_fetch_register (regcache *regcache, int regno)
5373 {
5374 return false;
5375 }
5376
5377 /* A wrapper for the read_memory target op. */
5378
5379 static int
5380 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5381 {
5382 return the_target->read_memory (memaddr, myaddr, len);
5383 }
5384
5385
5386 /* Helper for read_memory/write_memory using /proc/PID/mem. Because
5387 we can use a single read/write call, this can be much more
5388 efficient than banging away at PTRACE_PEEKTEXT. Also, unlike
5389 PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running threads.
5390 One an only one of READBUF and WRITEBUF is non-null. If READBUF is
5391 not null, then we're reading, otherwise we're writing. */
5392
5393 static int
5394 proc_xfer_memory (CORE_ADDR memaddr, unsigned char *readbuf,
5395 const gdb_byte *writebuf, int len)
5396 {
5397 gdb_assert ((readbuf == nullptr) != (writebuf == nullptr));
5398
5399 process_info *proc = current_process ();
5400
5401 int fd = proc->priv->mem_fd;
5402 if (fd == -1)
5403 return EIO;
5404
5405 while (len > 0)
5406 {
5407 int bytes;
5408
5409 /* Use pread64/pwrite64 if available, since they save a syscall
5410 and can handle 64-bit offsets even on 32-bit platforms (for
5411 instance, SPARC debugging a SPARC64 application). But only
5412 use them if the offset isn't so high that when cast to off_t
5413 it'd be negative, as seen on SPARC64. pread64/pwrite64
5414 outright reject such offsets. lseek does not. */
5415 #ifdef HAVE_PREAD64
5416 if ((off_t) memaddr >= 0)
5417 bytes = (readbuf != nullptr
5418 ? pread64 (fd, readbuf, len, memaddr)
5419 : pwrite64 (fd, writebuf, len, memaddr));
5420 else
5421 #endif
5422 {
5423 bytes = -1;
5424 if (lseek (fd, memaddr, SEEK_SET) != -1)
5425 bytes = (readbuf != nullptr
5426 ? read (fd, readbuf, len)
5427 : write (fd, writebuf, len));
5428 }
5429
5430 if (bytes < 0)
5431 return errno;
5432 else if (bytes == 0)
5433 {
5434 /* EOF means the address space is gone, the whole process
5435 exited or execed. */
5436 return EIO;
5437 }
5438
5439 memaddr += bytes;
5440 if (readbuf != nullptr)
5441 readbuf += bytes;
5442 else
5443 writebuf += bytes;
5444 len -= bytes;
5445 }
5446
5447 return 0;
5448 }
5449
5450 int
5451 linux_process_target::read_memory (CORE_ADDR memaddr,
5452 unsigned char *myaddr, int len)
5453 {
5454 return proc_xfer_memory (memaddr, myaddr, nullptr, len);
5455 }
5456
5457 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5458 memory at MEMADDR. On failure (cannot write to the inferior)
5459 returns the value of errno. Always succeeds if LEN is zero. */
5460
5461 int
5462 linux_process_target::write_memory (CORE_ADDR memaddr,
5463 const unsigned char *myaddr, int len)
5464 {
5465 if (debug_threads)
5466 {
5467 /* Dump up to four bytes. */
5468 char str[4 * 2 + 1];
5469 char *p = str;
5470 int dump = len < 4 ? len : 4;
5471
5472 for (int i = 0; i < dump; i++)
5473 {
5474 sprintf (p, "%02x", myaddr[i]);
5475 p += 2;
5476 }
5477 *p = '\0';
5478
5479 threads_debug_printf ("Writing %s to 0x%08lx in process %d",
5480 str, (long) memaddr, current_process ()->pid);
5481 }
5482
5483 return proc_xfer_memory (memaddr, nullptr, myaddr, len);
5484 }
5485
5486 void
5487 linux_process_target::look_up_symbols ()
5488 {
5489 #ifdef USE_THREAD_DB
5490 struct process_info *proc = current_process ();
5491
5492 if (proc->priv->thread_db != NULL)
5493 return;
5494
5495 thread_db_init ();
5496 #endif
5497 }
5498
5499 void
5500 linux_process_target::request_interrupt ()
5501 {
5502 /* Send a SIGINT to the process group. This acts just like the user
5503 typed a ^C on the controlling terminal. */
5504 int res = ::kill (-signal_pid, SIGINT);
5505 if (res == -1)
5506 warning (_("Sending SIGINT to process group of pid %ld failed: %s"),
5507 signal_pid, safe_strerror (errno));
5508 }
5509
5510 bool
5511 linux_process_target::supports_read_auxv ()
5512 {
5513 return true;
5514 }
5515
5516 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5517 to debugger memory starting at MYADDR. */
5518
5519 int
5520 linux_process_target::read_auxv (int pid, CORE_ADDR offset,
5521 unsigned char *myaddr, unsigned int len)
5522 {
5523 char filename[PATH_MAX];
5524 int fd, n;
5525
5526 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5527
5528 fd = open (filename, O_RDONLY);
5529 if (fd < 0)
5530 return -1;
5531
5532 if (offset != (CORE_ADDR) 0
5533 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5534 n = -1;
5535 else
5536 n = read (fd, myaddr, len);
5537
5538 close (fd);
5539
5540 return n;
5541 }
5542
5543 int
5544 linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5545 int size, raw_breakpoint *bp)
5546 {
5547 if (type == raw_bkpt_type_sw)
5548 return insert_memory_breakpoint (bp);
5549 else
5550 return low_insert_point (type, addr, size, bp);
5551 }
5552
5553 int
5554 linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5555 int size, raw_breakpoint *bp)
5556 {
5557 /* Unsupported (see target.h). */
5558 return 1;
5559 }
5560
5561 int
5562 linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5563 int size, raw_breakpoint *bp)
5564 {
5565 if (type == raw_bkpt_type_sw)
5566 return remove_memory_breakpoint (bp);
5567 else
5568 return low_remove_point (type, addr, size, bp);
5569 }
5570
5571 int
5572 linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5573 int size, raw_breakpoint *bp)
5574 {
5575 /* Unsupported (see target.h). */
5576 return 1;
5577 }
5578
5579 /* Implement the stopped_by_sw_breakpoint target_ops
5580 method. */
5581
5582 bool
5583 linux_process_target::stopped_by_sw_breakpoint ()
5584 {
5585 struct lwp_info *lwp = get_thread_lwp (current_thread);
5586
5587 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5588 }
5589
5590 /* Implement the supports_stopped_by_sw_breakpoint target_ops
5591 method. */
5592
5593 bool
5594 linux_process_target::supports_stopped_by_sw_breakpoint ()
5595 {
5596 return USE_SIGTRAP_SIGINFO;
5597 }
5598
5599 /* Implement the stopped_by_hw_breakpoint target_ops
5600 method. */
5601
5602 bool
5603 linux_process_target::stopped_by_hw_breakpoint ()
5604 {
5605 struct lwp_info *lwp = get_thread_lwp (current_thread);
5606
5607 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5608 }
5609
5610 /* Implement the supports_stopped_by_hw_breakpoint target_ops
5611 method. */
5612
5613 bool
5614 linux_process_target::supports_stopped_by_hw_breakpoint ()
5615 {
5616 return USE_SIGTRAP_SIGINFO;
5617 }
5618
5619 /* Implement the supports_hardware_single_step target_ops method. */
5620
5621 bool
5622 linux_process_target::supports_hardware_single_step ()
5623 {
5624 return true;
5625 }
5626
5627 bool
5628 linux_process_target::stopped_by_watchpoint ()
5629 {
5630 struct lwp_info *lwp = get_thread_lwp (current_thread);
5631
5632 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5633 }
5634
5635 CORE_ADDR
5636 linux_process_target::stopped_data_address ()
5637 {
5638 struct lwp_info *lwp = get_thread_lwp (current_thread);
5639
5640 return lwp->stopped_data_address;
5641 }
5642
5643 /* This is only used for targets that define PT_TEXT_ADDR,
5644 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5645 the target has different ways of acquiring this information, like
5646 loadmaps. */
5647
5648 bool
5649 linux_process_target::supports_read_offsets ()
5650 {
5651 #ifdef SUPPORTS_READ_OFFSETS
5652 return true;
5653 #else
5654 return false;
5655 #endif
5656 }
5657
5658 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5659 to tell gdb about. */
5660
5661 int
5662 linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5663 {
5664 #ifdef SUPPORTS_READ_OFFSETS
5665 unsigned long text, text_end, data;
5666 int pid = lwpid_of (current_thread);
5667
5668 errno = 0;
5669
5670 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5671 (PTRACE_TYPE_ARG4) 0);
5672 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5673 (PTRACE_TYPE_ARG4) 0);
5674 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5675 (PTRACE_TYPE_ARG4) 0);
5676
5677 if (errno == 0)
5678 {
5679 /* Both text and data offsets produced at compile-time (and so
5680 used by gdb) are relative to the beginning of the program,
5681 with the data segment immediately following the text segment.
5682 However, the actual runtime layout in memory may put the data
5683 somewhere else, so when we send gdb a data base-address, we
5684 use the real data base address and subtract the compile-time
5685 data base-address from it (which is just the length of the
5686 text segment). BSS immediately follows data in both
5687 cases. */
5688 *text_p = text;
5689 *data_p = data - (text_end - text);
5690
5691 return 1;
5692 }
5693 return 0;
5694 #else
5695 gdb_assert_not_reached ("target op read_offsets not supported");
5696 #endif
5697 }
5698
5699 bool
5700 linux_process_target::supports_get_tls_address ()
5701 {
5702 #ifdef USE_THREAD_DB
5703 return true;
5704 #else
5705 return false;
5706 #endif
5707 }
5708
5709 int
5710 linux_process_target::get_tls_address (thread_info *thread,
5711 CORE_ADDR offset,
5712 CORE_ADDR load_module,
5713 CORE_ADDR *address)
5714 {
5715 #ifdef USE_THREAD_DB
5716 return thread_db_get_tls_address (thread, offset, load_module, address);
5717 #else
5718 return -1;
5719 #endif
5720 }
5721
5722 bool
5723 linux_process_target::supports_qxfer_osdata ()
5724 {
5725 return true;
5726 }
5727
5728 int
5729 linux_process_target::qxfer_osdata (const char *annex,
5730 unsigned char *readbuf,
5731 unsigned const char *writebuf,
5732 CORE_ADDR offset, int len)
5733 {
5734 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5735 }
5736
5737 void
5738 linux_process_target::siginfo_fixup (siginfo_t *siginfo,
5739 gdb_byte *inf_siginfo, int direction)
5740 {
5741 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
5742
5743 /* If there was no callback, or the callback didn't do anything,
5744 then just do a straight memcpy. */
5745 if (!done)
5746 {
5747 if (direction == 1)
5748 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5749 else
5750 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5751 }
5752 }
5753
5754 bool
5755 linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
5756 int direction)
5757 {
5758 return false;
5759 }
5760
5761 bool
5762 linux_process_target::supports_qxfer_siginfo ()
5763 {
5764 return true;
5765 }
5766
5767 int
5768 linux_process_target::qxfer_siginfo (const char *annex,
5769 unsigned char *readbuf,
5770 unsigned const char *writebuf,
5771 CORE_ADDR offset, int len)
5772 {
5773 int pid;
5774 siginfo_t siginfo;
5775 gdb_byte inf_siginfo[sizeof (siginfo_t)];
5776
5777 if (current_thread == NULL)
5778 return -1;
5779
5780 pid = lwpid_of (current_thread);
5781
5782 threads_debug_printf ("%s siginfo for lwp %d.",
5783 readbuf != NULL ? "Reading" : "Writing",
5784 pid);
5785
5786 if (offset >= sizeof (siginfo))
5787 return -1;
5788
5789 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5790 return -1;
5791
5792 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5793 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5794 inferior with a 64-bit GDBSERVER should look the same as debugging it
5795 with a 32-bit GDBSERVER, we need to convert it. */
5796 siginfo_fixup (&siginfo, inf_siginfo, 0);
5797
5798 if (offset + len > sizeof (siginfo))
5799 len = sizeof (siginfo) - offset;
5800
5801 if (readbuf != NULL)
5802 memcpy (readbuf, inf_siginfo + offset, len);
5803 else
5804 {
5805 memcpy (inf_siginfo + offset, writebuf, len);
5806
5807 /* Convert back to ptrace layout before flushing it out. */
5808 siginfo_fixup (&siginfo, inf_siginfo, 1);
5809
5810 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5811 return -1;
5812 }
5813
5814 return len;
5815 }
5816
5817 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5818 so we notice when children change state; as the handler for the
5819 sigsuspend in my_waitpid. */
5820
5821 static void
5822 sigchld_handler (int signo)
5823 {
5824 int old_errno = errno;
5825
5826 if (debug_threads)
5827 {
5828 do
5829 {
5830 /* Use the async signal safe debug function. */
5831 if (debug_write ("sigchld_handler\n",
5832 sizeof ("sigchld_handler\n") - 1) < 0)
5833 break; /* just ignore */
5834 } while (0);
5835 }
5836
5837 if (target_is_async_p ())
5838 async_file_mark (); /* trigger a linux_wait */
5839
5840 errno = old_errno;
5841 }
5842
5843 bool
5844 linux_process_target::supports_non_stop ()
5845 {
5846 return true;
5847 }
5848
5849 bool
5850 linux_process_target::async (bool enable)
5851 {
5852 bool previous = target_is_async_p ();
5853
5854 threads_debug_printf ("async (%d), previous=%d",
5855 enable, previous);
5856
5857 if (previous != enable)
5858 {
5859 sigset_t mask;
5860 sigemptyset (&mask);
5861 sigaddset (&mask, SIGCHLD);
5862
5863 gdb_sigmask (SIG_BLOCK, &mask, NULL);
5864
5865 if (enable)
5866 {
5867 if (!linux_event_pipe.open_pipe ())
5868 {
5869 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
5870
5871 warning ("creating event pipe failed.");
5872 return previous;
5873 }
5874
5875 /* Register the event loop handler. */
5876 add_file_handler (linux_event_pipe.event_fd (),
5877 handle_target_event, NULL,
5878 "linux-low");
5879
5880 /* Always trigger a linux_wait. */
5881 async_file_mark ();
5882 }
5883 else
5884 {
5885 delete_file_handler (linux_event_pipe.event_fd ());
5886
5887 linux_event_pipe.close_pipe ();
5888 }
5889
5890 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
5891 }
5892
5893 return previous;
5894 }
5895
5896 int
5897 linux_process_target::start_non_stop (bool nonstop)
5898 {
5899 /* Register or unregister from event-loop accordingly. */
5900 target_async (nonstop);
5901
5902 if (target_is_async_p () != (nonstop != false))
5903 return -1;
5904
5905 return 0;
5906 }
5907
5908 bool
5909 linux_process_target::supports_multi_process ()
5910 {
5911 return true;
5912 }
5913
5914 /* Check if fork events are supported. */
5915
5916 bool
5917 linux_process_target::supports_fork_events ()
5918 {
5919 return true;
5920 }
5921
5922 /* Check if vfork events are supported. */
5923
5924 bool
5925 linux_process_target::supports_vfork_events ()
5926 {
5927 return true;
5928 }
5929
5930 /* Return the set of supported thread options. */
5931
5932 gdb_thread_options
5933 linux_process_target::supported_thread_options ()
5934 {
5935 return GDB_THREAD_OPTION_CLONE | GDB_THREAD_OPTION_EXIT;
5936 }
5937
5938 /* Check if exec events are supported. */
5939
5940 bool
5941 linux_process_target::supports_exec_events ()
5942 {
5943 return true;
5944 }
5945
5946 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5947 ptrace flags for all inferiors. This is in case the new GDB connection
5948 doesn't support the same set of events that the previous one did. */
5949
5950 void
5951 linux_process_target::handle_new_gdb_connection ()
5952 {
5953 /* Request that all the lwps reset their ptrace options. */
5954 for_each_thread ([] (thread_info *thread)
5955 {
5956 struct lwp_info *lwp = get_thread_lwp (thread);
5957
5958 if (!lwp->stopped)
5959 {
5960 /* Stop the lwp so we can modify its ptrace options. */
5961 lwp->must_set_ptrace_flags = 1;
5962 linux_stop_lwp (lwp);
5963 }
5964 else
5965 {
5966 /* Already stopped; go ahead and set the ptrace options. */
5967 struct process_info *proc = find_process_pid (pid_of (thread));
5968 int options = linux_low_ptrace_options (proc->attached);
5969
5970 linux_enable_event_reporting (lwpid_of (thread), options);
5971 lwp->must_set_ptrace_flags = 0;
5972 }
5973 });
5974 }
5975
5976 int
5977 linux_process_target::handle_monitor_command (char *mon)
5978 {
5979 #ifdef USE_THREAD_DB
5980 return thread_db_handle_monitor_command (mon);
5981 #else
5982 return 0;
5983 #endif
5984 }
5985
5986 int
5987 linux_process_target::core_of_thread (ptid_t ptid)
5988 {
5989 return linux_common_core_of_thread (ptid);
5990 }
5991
5992 bool
5993 linux_process_target::supports_disable_randomization ()
5994 {
5995 return true;
5996 }
5997
5998 bool
5999 linux_process_target::supports_agent ()
6000 {
6001 return true;
6002 }
6003
6004 bool
6005 linux_process_target::supports_range_stepping ()
6006 {
6007 if (supports_software_single_step ())
6008 return true;
6009
6010 return low_supports_range_stepping ();
6011 }
6012
6013 bool
6014 linux_process_target::low_supports_range_stepping ()
6015 {
6016 return false;
6017 }
6018
6019 bool
6020 linux_process_target::supports_pid_to_exec_file ()
6021 {
6022 return true;
6023 }
6024
6025 const char *
6026 linux_process_target::pid_to_exec_file (int pid)
6027 {
6028 return linux_proc_pid_to_exec_file (pid);
6029 }
6030
6031 bool
6032 linux_process_target::supports_multifs ()
6033 {
6034 return true;
6035 }
6036
6037 int
6038 linux_process_target::multifs_open (int pid, const char *filename,
6039 int flags, mode_t mode)
6040 {
6041 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6042 }
6043
6044 int
6045 linux_process_target::multifs_unlink (int pid, const char *filename)
6046 {
6047 return linux_mntns_unlink (pid, filename);
6048 }
6049
6050 ssize_t
6051 linux_process_target::multifs_readlink (int pid, const char *filename,
6052 char *buf, size_t bufsiz)
6053 {
6054 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6055 }
6056
6057 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6058 struct target_loadseg
6059 {
6060 /* Core address to which the segment is mapped. */
6061 Elf32_Addr addr;
6062 /* VMA recorded in the program header. */
6063 Elf32_Addr p_vaddr;
6064 /* Size of this segment in memory. */
6065 Elf32_Word p_memsz;
6066 };
6067
6068 # if defined PT_GETDSBT
6069 struct target_loadmap
6070 {
6071 /* Protocol version number, must be zero. */
6072 Elf32_Word version;
6073 /* Pointer to the DSBT table, its size, and the DSBT index. */
6074 unsigned *dsbt_table;
6075 unsigned dsbt_size, dsbt_index;
6076 /* Number of segments in this map. */
6077 Elf32_Word nsegs;
6078 /* The actual memory map. */
6079 struct target_loadseg segs[/*nsegs*/];
6080 };
6081 # define LINUX_LOADMAP PT_GETDSBT
6082 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6083 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6084 # else
6085 struct target_loadmap
6086 {
6087 /* Protocol version number, must be zero. */
6088 Elf32_Half version;
6089 /* Number of segments in this map. */
6090 Elf32_Half nsegs;
6091 /* The actual memory map. */
6092 struct target_loadseg segs[/*nsegs*/];
6093 };
6094 # define LINUX_LOADMAP PTRACE_GETFDPIC
6095 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6096 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6097 # endif
6098
6099 bool
6100 linux_process_target::supports_read_loadmap ()
6101 {
6102 return true;
6103 }
6104
6105 int
6106 linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6107 unsigned char *myaddr, unsigned int len)
6108 {
6109 int pid = lwpid_of (current_thread);
6110 int addr = -1;
6111 struct target_loadmap *data = NULL;
6112 unsigned int actual_length, copy_length;
6113
6114 if (strcmp (annex, "exec") == 0)
6115 addr = (int) LINUX_LOADMAP_EXEC;
6116 else if (strcmp (annex, "interp") == 0)
6117 addr = (int) LINUX_LOADMAP_INTERP;
6118 else
6119 return -1;
6120
6121 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6122 return -1;
6123
6124 if (data == NULL)
6125 return -1;
6126
6127 actual_length = sizeof (struct target_loadmap)
6128 + sizeof (struct target_loadseg) * data->nsegs;
6129
6130 if (offset < 0 || offset > actual_length)
6131 return -1;
6132
6133 copy_length = actual_length - offset < len ? actual_length - offset : len;
6134 memcpy (myaddr, (char *) data + offset, copy_length);
6135 return copy_length;
6136 }
6137 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6138
6139 bool
6140 linux_process_target::supports_catch_syscall ()
6141 {
6142 return low_supports_catch_syscall ();
6143 }
6144
6145 bool
6146 linux_process_target::low_supports_catch_syscall ()
6147 {
6148 return false;
6149 }
6150
6151 CORE_ADDR
6152 linux_process_target::read_pc (regcache *regcache)
6153 {
6154 if (!low_supports_breakpoints ())
6155 return 0;
6156
6157 return low_get_pc (regcache);
6158 }
6159
6160 void
6161 linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
6162 {
6163 gdb_assert (low_supports_breakpoints ());
6164
6165 low_set_pc (regcache, pc);
6166 }
6167
6168 bool
6169 linux_process_target::supports_thread_stopped ()
6170 {
6171 return true;
6172 }
6173
6174 bool
6175 linux_process_target::thread_stopped (thread_info *thread)
6176 {
6177 return get_thread_lwp (thread)->stopped;
6178 }
6179
6180 /* This exposes stop-all-threads functionality to other modules. */
6181
6182 void
6183 linux_process_target::pause_all (bool freeze)
6184 {
6185 stop_all_lwps (freeze, NULL);
6186 }
6187
6188 /* This exposes unstop-all-threads functionality to other gdbserver
6189 modules. */
6190
6191 void
6192 linux_process_target::unpause_all (bool unfreeze)
6193 {
6194 unstop_all_lwps (unfreeze, NULL);
6195 }
6196
6197 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6198
6199 static int
6200 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6201 CORE_ADDR *phdr_memaddr, int *num_phdr)
6202 {
6203 char filename[PATH_MAX];
6204 int fd;
6205 const int auxv_size = is_elf64
6206 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6207 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6208
6209 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6210
6211 fd = open (filename, O_RDONLY);
6212 if (fd < 0)
6213 return 1;
6214
6215 *phdr_memaddr = 0;
6216 *num_phdr = 0;
6217 while (read (fd, buf, auxv_size) == auxv_size
6218 && (*phdr_memaddr == 0 || *num_phdr == 0))
6219 {
6220 if (is_elf64)
6221 {
6222 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6223
6224 switch (aux->a_type)
6225 {
6226 case AT_PHDR:
6227 *phdr_memaddr = aux->a_un.a_val;
6228 break;
6229 case AT_PHNUM:
6230 *num_phdr = aux->a_un.a_val;
6231 break;
6232 }
6233 }
6234 else
6235 {
6236 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6237
6238 switch (aux->a_type)
6239 {
6240 case AT_PHDR:
6241 *phdr_memaddr = aux->a_un.a_val;
6242 break;
6243 case AT_PHNUM:
6244 *num_phdr = aux->a_un.a_val;
6245 break;
6246 }
6247 }
6248 }
6249
6250 close (fd);
6251
6252 if (*phdr_memaddr == 0 || *num_phdr == 0)
6253 {
6254 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6255 "phdr_memaddr = %ld, phdr_num = %d",
6256 (long) *phdr_memaddr, *num_phdr);
6257 return 2;
6258 }
6259
6260 return 0;
6261 }
6262
6263 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6264
6265 static CORE_ADDR
6266 get_dynamic (const int pid, const int is_elf64)
6267 {
6268 CORE_ADDR phdr_memaddr, relocation;
6269 int num_phdr, i;
6270 unsigned char *phdr_buf;
6271 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6272
6273 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6274 return 0;
6275
6276 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6277 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6278
6279 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6280 return 0;
6281
6282 /* Compute relocation: it is expected to be 0 for "regular" executables,
6283 non-zero for PIE ones. */
6284 relocation = -1;
6285 for (i = 0; relocation == -1 && i < num_phdr; i++)
6286 if (is_elf64)
6287 {
6288 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6289
6290 if (p->p_type == PT_PHDR)
6291 relocation = phdr_memaddr - p->p_vaddr;
6292 }
6293 else
6294 {
6295 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6296
6297 if (p->p_type == PT_PHDR)
6298 relocation = phdr_memaddr - p->p_vaddr;
6299 }
6300
6301 if (relocation == -1)
6302 {
6303 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6304 any real world executables, including PIE executables, have always
6305 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6306 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6307 or present DT_DEBUG anyway (fpc binaries are statically linked).
6308
6309 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6310
6311 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6312
6313 return 0;
6314 }
6315
6316 for (i = 0; i < num_phdr; i++)
6317 {
6318 if (is_elf64)
6319 {
6320 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6321
6322 if (p->p_type == PT_DYNAMIC)
6323 return p->p_vaddr + relocation;
6324 }
6325 else
6326 {
6327 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6328
6329 if (p->p_type == PT_DYNAMIC)
6330 return p->p_vaddr + relocation;
6331 }
6332 }
6333
6334 return 0;
6335 }
6336
6337 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6338 can be 0 if the inferior does not yet have the library list initialized.
6339 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6340 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6341
6342 static CORE_ADDR
6343 get_r_debug (const int pid, const int is_elf64)
6344 {
6345 CORE_ADDR dynamic_memaddr;
6346 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6347 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6348 CORE_ADDR map = -1;
6349
6350 dynamic_memaddr = get_dynamic (pid, is_elf64);
6351 if (dynamic_memaddr == 0)
6352 return map;
6353
6354 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6355 {
6356 if (is_elf64)
6357 {
6358 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6359 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6360 union
6361 {
6362 Elf64_Xword map;
6363 unsigned char buf[sizeof (Elf64_Xword)];
6364 }
6365 rld_map;
6366 #endif
6367 #ifdef DT_MIPS_RLD_MAP
6368 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6369 {
6370 if (linux_read_memory (dyn->d_un.d_val,
6371 rld_map.buf, sizeof (rld_map.buf)) == 0)
6372 return rld_map.map;
6373 else
6374 break;
6375 }
6376 #endif /* DT_MIPS_RLD_MAP */
6377 #ifdef DT_MIPS_RLD_MAP_REL
6378 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6379 {
6380 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6381 rld_map.buf, sizeof (rld_map.buf)) == 0)
6382 return rld_map.map;
6383 else
6384 break;
6385 }
6386 #endif /* DT_MIPS_RLD_MAP_REL */
6387
6388 if (dyn->d_tag == DT_DEBUG && map == -1)
6389 map = dyn->d_un.d_val;
6390
6391 if (dyn->d_tag == DT_NULL)
6392 break;
6393 }
6394 else
6395 {
6396 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6397 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6398 union
6399 {
6400 Elf32_Word map;
6401 unsigned char buf[sizeof (Elf32_Word)];
6402 }
6403 rld_map;
6404 #endif
6405 #ifdef DT_MIPS_RLD_MAP
6406 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6407 {
6408 if (linux_read_memory (dyn->d_un.d_val,
6409 rld_map.buf, sizeof (rld_map.buf)) == 0)
6410 return rld_map.map;
6411 else
6412 break;
6413 }
6414 #endif /* DT_MIPS_RLD_MAP */
6415 #ifdef DT_MIPS_RLD_MAP_REL
6416 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6417 {
6418 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6419 rld_map.buf, sizeof (rld_map.buf)) == 0)
6420 return rld_map.map;
6421 else
6422 break;
6423 }
6424 #endif /* DT_MIPS_RLD_MAP_REL */
6425
6426 if (dyn->d_tag == DT_DEBUG && map == -1)
6427 map = dyn->d_un.d_val;
6428
6429 if (dyn->d_tag == DT_NULL)
6430 break;
6431 }
6432
6433 dynamic_memaddr += dyn_size;
6434 }
6435
6436 return map;
6437 }
6438
6439 /* Read one pointer from MEMADDR in the inferior. */
6440
6441 static int
6442 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6443 {
6444 int ret;
6445
6446 /* Go through a union so this works on either big or little endian
6447 hosts, when the inferior's pointer size is smaller than the size
6448 of CORE_ADDR. It is assumed the inferior's endianness is the
6449 same of the superior's. */
6450 union
6451 {
6452 CORE_ADDR core_addr;
6453 unsigned int ui;
6454 unsigned char uc;
6455 } addr;
6456
6457 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6458 if (ret == 0)
6459 {
6460 if (ptr_size == sizeof (CORE_ADDR))
6461 *ptr = addr.core_addr;
6462 else if (ptr_size == sizeof (unsigned int))
6463 *ptr = addr.ui;
6464 else
6465 gdb_assert_not_reached ("unhandled pointer size");
6466 }
6467 return ret;
6468 }
6469
6470 bool
6471 linux_process_target::supports_qxfer_libraries_svr4 ()
6472 {
6473 return true;
6474 }
6475
6476 struct link_map_offsets
6477 {
6478 /* Offset and size of r_debug.r_version. */
6479 int r_version_offset;
6480
6481 /* Offset and size of r_debug.r_map. */
6482 int r_map_offset;
6483
6484 /* Offset of r_debug_extended.r_next. */
6485 int r_next_offset;
6486
6487 /* Offset to l_addr field in struct link_map. */
6488 int l_addr_offset;
6489
6490 /* Offset to l_name field in struct link_map. */
6491 int l_name_offset;
6492
6493 /* Offset to l_ld field in struct link_map. */
6494 int l_ld_offset;
6495
6496 /* Offset to l_next field in struct link_map. */
6497 int l_next_offset;
6498
6499 /* Offset to l_prev field in struct link_map. */
6500 int l_prev_offset;
6501 };
6502
6503 static const link_map_offsets lmo_32bit_offsets =
6504 {
6505 0, /* r_version offset. */
6506 4, /* r_debug.r_map offset. */
6507 20, /* r_debug_extended.r_next. */
6508 0, /* l_addr offset in link_map. */
6509 4, /* l_name offset in link_map. */
6510 8, /* l_ld offset in link_map. */
6511 12, /* l_next offset in link_map. */
6512 16 /* l_prev offset in link_map. */
6513 };
6514
6515 static const link_map_offsets lmo_64bit_offsets =
6516 {
6517 0, /* r_version offset. */
6518 8, /* r_debug.r_map offset. */
6519 40, /* r_debug_extended.r_next. */
6520 0, /* l_addr offset in link_map. */
6521 8, /* l_name offset in link_map. */
6522 16, /* l_ld offset in link_map. */
6523 24, /* l_next offset in link_map. */
6524 32 /* l_prev offset in link_map. */
6525 };
6526
6527 /* Get the loaded shared libraries from one namespace. */
6528
6529 static void
6530 read_link_map (std::string &document, CORE_ADDR lmid, CORE_ADDR lm_addr,
6531 CORE_ADDR lm_prev, int ptr_size, const link_map_offsets *lmo)
6532 {
6533 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6534
6535 while (lm_addr
6536 && read_one_ptr (lm_addr + lmo->l_name_offset,
6537 &l_name, ptr_size) == 0
6538 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6539 &l_addr, ptr_size) == 0
6540 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6541 &l_ld, ptr_size) == 0
6542 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6543 &l_prev, ptr_size) == 0
6544 && read_one_ptr (lm_addr + lmo->l_next_offset,
6545 &l_next, ptr_size) == 0)
6546 {
6547 unsigned char libname[PATH_MAX];
6548
6549 if (lm_prev != l_prev)
6550 {
6551 warning ("Corrupted shared library list: 0x%s != 0x%s",
6552 paddress (lm_prev), paddress (l_prev));
6553 break;
6554 }
6555
6556 /* Not checking for error because reading may stop before we've got
6557 PATH_MAX worth of characters. */
6558 libname[0] = '\0';
6559 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6560 libname[sizeof (libname) - 1] = '\0';
6561 if (libname[0] != '\0')
6562 {
6563 string_appendf (document, "<library name=\"");
6564 xml_escape_text_append (document, (char *) libname);
6565 string_appendf (document, "\" lm=\"0x%s\" l_addr=\"0x%s\" "
6566 "l_ld=\"0x%s\" lmid=\"0x%s\"/>",
6567 paddress (lm_addr), paddress (l_addr),
6568 paddress (l_ld), paddress (lmid));
6569 }
6570
6571 lm_prev = lm_addr;
6572 lm_addr = l_next;
6573 }
6574 }
6575
6576 /* Construct qXfer:libraries-svr4:read reply. */
6577
6578 int
6579 linux_process_target::qxfer_libraries_svr4 (const char *annex,
6580 unsigned char *readbuf,
6581 unsigned const char *writebuf,
6582 CORE_ADDR offset, int len)
6583 {
6584 struct process_info_private *const priv = current_process ()->priv;
6585 char filename[PATH_MAX];
6586 int pid, is_elf64;
6587 unsigned int machine;
6588 CORE_ADDR lmid = 0, lm_addr = 0, lm_prev = 0;
6589
6590 if (writebuf != NULL)
6591 return -2;
6592 if (readbuf == NULL)
6593 return -1;
6594
6595 pid = lwpid_of (current_thread);
6596 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6597 is_elf64 = elf_64_file_p (filename, &machine);
6598 const link_map_offsets *lmo;
6599 int ptr_size;
6600 if (is_elf64)
6601 {
6602 lmo = &lmo_64bit_offsets;
6603 ptr_size = 8;
6604 }
6605 else
6606 {
6607 lmo = &lmo_32bit_offsets;
6608 ptr_size = 4;
6609 }
6610
6611 while (annex[0] != '\0')
6612 {
6613 const char *sep;
6614 CORE_ADDR *addrp;
6615 int name_len;
6616
6617 sep = strchr (annex, '=');
6618 if (sep == NULL)
6619 break;
6620
6621 name_len = sep - annex;
6622 if (name_len == 4 && startswith (annex, "lmid"))
6623 addrp = &lmid;
6624 else if (name_len == 5 && startswith (annex, "start"))
6625 addrp = &lm_addr;
6626 else if (name_len == 4 && startswith (annex, "prev"))
6627 addrp = &lm_prev;
6628 else
6629 {
6630 annex = strchr (sep, ';');
6631 if (annex == NULL)
6632 break;
6633 annex++;
6634 continue;
6635 }
6636
6637 annex = decode_address_to_semicolon (addrp, sep + 1);
6638 }
6639
6640 std::string document = "<library-list-svr4 version=\"1.0\"";
6641
6642 /* When the starting LM_ADDR is passed in the annex, only traverse that
6643 namespace, which is assumed to be identified by LMID.
6644
6645 Otherwise, start with R_DEBUG and traverse all namespaces we find. */
6646 if (lm_addr != 0)
6647 {
6648 document += ">";
6649 read_link_map (document, lmid, lm_addr, lm_prev, ptr_size, lmo);
6650 }
6651 else
6652 {
6653 if (lm_prev != 0)
6654 warning ("ignoring prev=0x%s without start", paddress (lm_prev));
6655
6656 /* We could interpret LMID as 'provide only the libraries for this
6657 namespace' but GDB is currently only providing lmid, start, and
6658 prev, or nothing. */
6659 if (lmid != 0)
6660 warning ("ignoring lmid=0x%s without start", paddress (lmid));
6661
6662 CORE_ADDR r_debug = priv->r_debug;
6663 if (r_debug == 0)
6664 r_debug = priv->r_debug = get_r_debug (pid, is_elf64);
6665
6666 /* We failed to find DT_DEBUG. Such situation will not change
6667 for this inferior - do not retry it. Report it to GDB as
6668 E01, see for the reasons at the GDB solib-svr4.c side. */
6669 if (r_debug == (CORE_ADDR) -1)
6670 return -1;
6671
6672 /* Terminate the header if we end up with an empty list. */
6673 if (r_debug == 0)
6674 document += ">";
6675
6676 while (r_debug != 0)
6677 {
6678 int r_version = 0;
6679 if (linux_read_memory (r_debug + lmo->r_version_offset,
6680 (unsigned char *) &r_version,
6681 sizeof (r_version)) != 0)
6682 {
6683 warning ("unable to read r_version from 0x%s",
6684 paddress (r_debug + lmo->r_version_offset));
6685 break;
6686 }
6687
6688 if (r_version < 1)
6689 {
6690 warning ("unexpected r_debug version %d", r_version);
6691 break;
6692 }
6693
6694 if (read_one_ptr (r_debug + lmo->r_map_offset, &lm_addr,
6695 ptr_size) != 0)
6696 {
6697 warning ("unable to read r_map from 0x%s",
6698 paddress (r_debug + lmo->r_map_offset));
6699 break;
6700 }
6701
6702 /* We read the entire namespace. */
6703 lm_prev = 0;
6704
6705 /* The first entry corresponds to the main executable unless the
6706 dynamic loader was loaded late by a static executable. But
6707 in such case the main executable does not have PT_DYNAMIC
6708 present and we would not have gotten here. */
6709 if (r_debug == priv->r_debug)
6710 {
6711 if (lm_addr != 0)
6712 string_appendf (document, " main-lm=\"0x%s\">",
6713 paddress (lm_addr));
6714 else
6715 document += ">";
6716
6717 lm_prev = lm_addr;
6718 if (read_one_ptr (lm_addr + lmo->l_next_offset,
6719 &lm_addr, ptr_size) != 0)
6720 {
6721 warning ("unable to read l_next from 0x%s",
6722 paddress (lm_addr + lmo->l_next_offset));
6723 break;
6724 }
6725 }
6726
6727 read_link_map (document, r_debug, lm_addr, lm_prev, ptr_size, lmo);
6728
6729 if (r_version < 2)
6730 break;
6731
6732 if (read_one_ptr (r_debug + lmo->r_next_offset, &r_debug,
6733 ptr_size) != 0)
6734 {
6735 warning ("unable to read r_next from 0x%s",
6736 paddress (r_debug + lmo->r_next_offset));
6737 break;
6738 }
6739 }
6740 }
6741
6742 document += "</library-list-svr4>";
6743
6744 int document_len = document.length ();
6745 if (offset < document_len)
6746 document_len -= offset;
6747 else
6748 document_len = 0;
6749 if (len > document_len)
6750 len = document_len;
6751
6752 memcpy (readbuf, document.data () + offset, len);
6753
6754 return len;
6755 }
6756
6757 #ifdef HAVE_LINUX_BTRACE
6758
6759 bool
6760 linux_process_target::supports_btrace ()
6761 {
6762 return true;
6763 }
6764
6765 btrace_target_info *
6766 linux_process_target::enable_btrace (thread_info *tp,
6767 const btrace_config *conf)
6768 {
6769 return linux_enable_btrace (tp->id, conf);
6770 }
6771
6772 /* See to_disable_btrace target method. */
6773
6774 int
6775 linux_process_target::disable_btrace (btrace_target_info *tinfo)
6776 {
6777 enum btrace_error err;
6778
6779 err = linux_disable_btrace (tinfo);
6780 return (err == BTRACE_ERR_NONE ? 0 : -1);
6781 }
6782
6783 /* Encode an Intel Processor Trace configuration. */
6784
6785 static void
6786 linux_low_encode_pt_config (std::string *buffer,
6787 const struct btrace_data_pt_config *config)
6788 {
6789 *buffer += "<pt-config>\n";
6790
6791 switch (config->cpu.vendor)
6792 {
6793 case CV_INTEL:
6794 string_xml_appendf (*buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6795 "model=\"%u\" stepping=\"%u\"/>\n",
6796 config->cpu.family, config->cpu.model,
6797 config->cpu.stepping);
6798 break;
6799
6800 default:
6801 break;
6802 }
6803
6804 *buffer += "</pt-config>\n";
6805 }
6806
6807 /* Encode a raw buffer. */
6808
6809 static void
6810 linux_low_encode_raw (std::string *buffer, const gdb_byte *data,
6811 unsigned int size)
6812 {
6813 if (size == 0)
6814 return;
6815
6816 /* We use hex encoding - see gdbsupport/rsp-low.h. */
6817 *buffer += "<raw>\n";
6818
6819 while (size-- > 0)
6820 {
6821 char elem[2];
6822
6823 elem[0] = tohex ((*data >> 4) & 0xf);
6824 elem[1] = tohex (*data++ & 0xf);
6825
6826 buffer->append (elem, 2);
6827 }
6828
6829 *buffer += "</raw>\n";
6830 }
6831
6832 /* See to_read_btrace target method. */
6833
6834 int
6835 linux_process_target::read_btrace (btrace_target_info *tinfo,
6836 std::string *buffer,
6837 enum btrace_read_type type)
6838 {
6839 struct btrace_data btrace;
6840 enum btrace_error err;
6841
6842 err = linux_read_btrace (&btrace, tinfo, type);
6843 if (err != BTRACE_ERR_NONE)
6844 {
6845 if (err == BTRACE_ERR_OVERFLOW)
6846 *buffer += "E.Overflow.";
6847 else
6848 *buffer += "E.Generic Error.";
6849
6850 return -1;
6851 }
6852
6853 switch (btrace.format)
6854 {
6855 case BTRACE_FORMAT_NONE:
6856 *buffer += "E.No Trace.";
6857 return -1;
6858
6859 case BTRACE_FORMAT_BTS:
6860 *buffer += "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n";
6861 *buffer += "<btrace version=\"1.0\">\n";
6862
6863 for (const btrace_block &block : *btrace.variant.bts.blocks)
6864 string_xml_appendf (*buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6865 paddress (block.begin), paddress (block.end));
6866
6867 *buffer += "</btrace>\n";
6868 break;
6869
6870 case BTRACE_FORMAT_PT:
6871 *buffer += "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n";
6872 *buffer += "<btrace version=\"1.0\">\n";
6873 *buffer += "<pt>\n";
6874
6875 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
6876
6877 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6878 btrace.variant.pt.size);
6879
6880 *buffer += "</pt>\n";
6881 *buffer += "</btrace>\n";
6882 break;
6883
6884 default:
6885 *buffer += "E.Unsupported Trace Format.";
6886 return -1;
6887 }
6888
6889 return 0;
6890 }
6891
6892 /* See to_btrace_conf target method. */
6893
6894 int
6895 linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
6896 std::string *buffer)
6897 {
6898 const struct btrace_config *conf;
6899
6900 *buffer += "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n";
6901 *buffer += "<btrace-conf version=\"1.0\">\n";
6902
6903 conf = linux_btrace_conf (tinfo);
6904 if (conf != NULL)
6905 {
6906 switch (conf->format)
6907 {
6908 case BTRACE_FORMAT_NONE:
6909 break;
6910
6911 case BTRACE_FORMAT_BTS:
6912 string_xml_appendf (*buffer, "<bts");
6913 string_xml_appendf (*buffer, " size=\"0x%x\"", conf->bts.size);
6914 string_xml_appendf (*buffer, " />\n");
6915 break;
6916
6917 case BTRACE_FORMAT_PT:
6918 string_xml_appendf (*buffer, "<pt");
6919 string_xml_appendf (*buffer, " size=\"0x%x\"", conf->pt.size);
6920 string_xml_appendf (*buffer, "/>\n");
6921 break;
6922 }
6923 }
6924
6925 *buffer += "</btrace-conf>\n";
6926 return 0;
6927 }
6928 #endif /* HAVE_LINUX_BTRACE */
6929
6930 /* See nat/linux-nat.h. */
6931
6932 ptid_t
6933 current_lwp_ptid (void)
6934 {
6935 return ptid_of (current_thread);
6936 }
6937
6938 const char *
6939 linux_process_target::thread_name (ptid_t thread)
6940 {
6941 return linux_proc_tid_get_name (thread);
6942 }
6943
6944 #if USE_THREAD_DB
6945 bool
6946 linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
6947 int *handle_len)
6948 {
6949 return thread_db_thread_handle (ptid, handle, handle_len);
6950 }
6951 #endif
6952
6953 thread_info *
6954 linux_process_target::thread_pending_parent (thread_info *thread)
6955 {
6956 lwp_info *parent = get_thread_lwp (thread)->pending_parent ();
6957
6958 if (parent == nullptr)
6959 return nullptr;
6960
6961 return get_lwp_thread (parent);
6962 }
6963
6964 thread_info *
6965 linux_process_target::thread_pending_child (thread_info *thread,
6966 target_waitkind *kind)
6967 {
6968 lwp_info *child = get_thread_lwp (thread)->pending_child (kind);
6969
6970 if (child == nullptr)
6971 return nullptr;
6972
6973 return get_lwp_thread (child);
6974 }
6975
6976 /* Default implementation of linux_target_ops method "set_pc" for
6977 32-bit pc register which is literally named "pc". */
6978
6979 void
6980 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
6981 {
6982 uint32_t newpc = pc;
6983
6984 supply_register_by_name (regcache, "pc", &newpc);
6985 }
6986
6987 /* Default implementation of linux_target_ops method "get_pc" for
6988 32-bit pc register which is literally named "pc". */
6989
6990 CORE_ADDR
6991 linux_get_pc_32bit (struct regcache *regcache)
6992 {
6993 uint32_t pc;
6994
6995 collect_register_by_name (regcache, "pc", &pc);
6996 threads_debug_printf ("stop pc is 0x%" PRIx32, pc);
6997 return pc;
6998 }
6999
7000 /* Default implementation of linux_target_ops method "set_pc" for
7001 64-bit pc register which is literally named "pc". */
7002
7003 void
7004 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7005 {
7006 uint64_t newpc = pc;
7007
7008 supply_register_by_name (regcache, "pc", &newpc);
7009 }
7010
7011 /* Default implementation of linux_target_ops method "get_pc" for
7012 64-bit pc register which is literally named "pc". */
7013
7014 CORE_ADDR
7015 linux_get_pc_64bit (struct regcache *regcache)
7016 {
7017 uint64_t pc;
7018
7019 collect_register_by_name (regcache, "pc", &pc);
7020 threads_debug_printf ("stop pc is 0x%" PRIx64, pc);
7021 return pc;
7022 }
7023
7024 /* See linux-low.h. */
7025
7026 int
7027 linux_get_auxv (int pid, int wordsize, CORE_ADDR match, CORE_ADDR *valp)
7028 {
7029 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7030 int offset = 0;
7031
7032 gdb_assert (wordsize == 4 || wordsize == 8);
7033
7034 while (the_target->read_auxv (pid, offset, data, 2 * wordsize)
7035 == 2 * wordsize)
7036 {
7037 if (wordsize == 4)
7038 {
7039 uint32_t *data_p = (uint32_t *) data;
7040 if (data_p[0] == match)
7041 {
7042 *valp = data_p[1];
7043 return 1;
7044 }
7045 }
7046 else
7047 {
7048 uint64_t *data_p = (uint64_t *) data;
7049 if (data_p[0] == match)
7050 {
7051 *valp = data_p[1];
7052 return 1;
7053 }
7054 }
7055
7056 offset += 2 * wordsize;
7057 }
7058
7059 return 0;
7060 }
7061
7062 /* See linux-low.h. */
7063
7064 CORE_ADDR
7065 linux_get_hwcap (int pid, int wordsize)
7066 {
7067 CORE_ADDR hwcap = 0;
7068 linux_get_auxv (pid, wordsize, AT_HWCAP, &hwcap);
7069 return hwcap;
7070 }
7071
7072 /* See linux-low.h. */
7073
7074 CORE_ADDR
7075 linux_get_hwcap2 (int pid, int wordsize)
7076 {
7077 CORE_ADDR hwcap2 = 0;
7078 linux_get_auxv (pid, wordsize, AT_HWCAP2, &hwcap2);
7079 return hwcap2;
7080 }
7081
7082 #ifdef HAVE_LINUX_REGSETS
7083 void
7084 initialize_regsets_info (struct regsets_info *info)
7085 {
7086 for (info->num_regsets = 0;
7087 info->regsets[info->num_regsets].size >= 0;
7088 info->num_regsets++)
7089 ;
7090 }
7091 #endif
7092
7093 void
7094 initialize_low (void)
7095 {
7096 struct sigaction sigchld_action;
7097
7098 memset (&sigchld_action, 0, sizeof (sigchld_action));
7099 set_target_ops (the_linux_target);
7100
7101 linux_ptrace_init_warnings ();
7102 linux_proc_init_warnings ();
7103
7104 sigchld_action.sa_handler = sigchld_handler;
7105 sigemptyset (&sigchld_action.sa_mask);
7106 sigchld_action.sa_flags = SA_RESTART;
7107 sigaction (SIGCHLD, &sigchld_action, NULL);
7108
7109 initialize_low_arch ();
7110
7111 linux_check_ptrace_features ();
7112 }