gdb, gdbserver: detach fork child when detaching from fork parent
[binutils-gdb.git] / gdbserver / linux-low.cc
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2021 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "gdbsupport/agent.h"
23 #include "tdesc.h"
24 #include "gdbsupport/rsp-low.h"
25 #include "gdbsupport/signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdbsupport/gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "gdbsupport/filestuff.h"
47 #include "tracepoint.h"
48 #include <inttypes.h>
49 #include "gdbsupport/common-inferior.h"
50 #include "nat/fork-inferior.h"
51 #include "gdbsupport/environ.h"
52 #include "gdbsupport/gdb-sigmask.h"
53 #include "gdbsupport/scoped_restore.h"
54 #ifndef ELFMAG0
55 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
56 then ELFMAG0 will have been defined. If it didn't get included by
57 gdb_proc_service.h then including it will likely introduce a duplicate
58 definition of elf_fpregset_t. */
59 #include <elf.h>
60 #endif
61 #include "nat/linux-namespaces.h"
62
63 #ifndef O_LARGEFILE
64 #define O_LARGEFILE 0
65 #endif
66
67 #ifndef AT_HWCAP2
68 #define AT_HWCAP2 26
69 #endif
70
71 /* Some targets did not define these ptrace constants from the start,
72 so gdbserver defines them locally here. In the future, these may
73 be removed after they are added to asm/ptrace.h. */
74 #if !(defined(PT_TEXT_ADDR) \
75 || defined(PT_DATA_ADDR) \
76 || defined(PT_TEXT_END_ADDR))
77 #if defined(__mcoldfire__)
78 /* These are still undefined in 3.10 kernels. */
79 #define PT_TEXT_ADDR 49*4
80 #define PT_DATA_ADDR 50*4
81 #define PT_TEXT_END_ADDR 51*4
82 /* These are still undefined in 3.10 kernels. */
83 #elif defined(__TMS320C6X__)
84 #define PT_TEXT_ADDR (0x10000*4)
85 #define PT_DATA_ADDR (0x10004*4)
86 #define PT_TEXT_END_ADDR (0x10008*4)
87 #endif
88 #endif
89
90 #if (defined(__UCLIBC__) \
91 && defined(HAS_NOMMU) \
92 && defined(PT_TEXT_ADDR) \
93 && defined(PT_DATA_ADDR) \
94 && defined(PT_TEXT_END_ADDR))
95 #define SUPPORTS_READ_OFFSETS
96 #endif
97
98 #ifdef HAVE_LINUX_BTRACE
99 # include "nat/linux-btrace.h"
100 # include "gdbsupport/btrace-common.h"
101 #endif
102
103 #ifndef HAVE_ELF32_AUXV_T
104 /* Copied from glibc's elf.h. */
105 typedef struct
106 {
107 uint32_t a_type; /* Entry type */
108 union
109 {
110 uint32_t a_val; /* Integer value */
111 /* We use to have pointer elements added here. We cannot do that,
112 though, since it does not work when using 32-bit definitions
113 on 64-bit platforms and vice versa. */
114 } a_un;
115 } Elf32_auxv_t;
116 #endif
117
118 #ifndef HAVE_ELF64_AUXV_T
119 /* Copied from glibc's elf.h. */
120 typedef struct
121 {
122 uint64_t a_type; /* Entry type */
123 union
124 {
125 uint64_t a_val; /* Integer value */
126 /* We use to have pointer elements added here. We cannot do that,
127 though, since it does not work when using 32-bit definitions
128 on 64-bit platforms and vice versa. */
129 } a_un;
130 } Elf64_auxv_t;
131 #endif
132
133 /* Does the current host support PTRACE_GETREGSET? */
134 int have_ptrace_getregset = -1;
135
136 /* LWP accessors. */
137
138 /* See nat/linux-nat.h. */
139
140 ptid_t
141 ptid_of_lwp (struct lwp_info *lwp)
142 {
143 return ptid_of (get_lwp_thread (lwp));
144 }
145
146 /* See nat/linux-nat.h. */
147
148 void
149 lwp_set_arch_private_info (struct lwp_info *lwp,
150 struct arch_lwp_info *info)
151 {
152 lwp->arch_private = info;
153 }
154
155 /* See nat/linux-nat.h. */
156
157 struct arch_lwp_info *
158 lwp_arch_private_info (struct lwp_info *lwp)
159 {
160 return lwp->arch_private;
161 }
162
163 /* See nat/linux-nat.h. */
164
165 int
166 lwp_is_stopped (struct lwp_info *lwp)
167 {
168 return lwp->stopped;
169 }
170
171 /* See nat/linux-nat.h. */
172
173 enum target_stop_reason
174 lwp_stop_reason (struct lwp_info *lwp)
175 {
176 return lwp->stop_reason;
177 }
178
179 /* See nat/linux-nat.h. */
180
181 int
182 lwp_is_stepping (struct lwp_info *lwp)
183 {
184 return lwp->stepping;
185 }
186
187 /* A list of all unknown processes which receive stop signals. Some
188 other process will presumably claim each of these as forked
189 children momentarily. */
190
191 struct simple_pid_list
192 {
193 /* The process ID. */
194 int pid;
195
196 /* The status as reported by waitpid. */
197 int status;
198
199 /* Next in chain. */
200 struct simple_pid_list *next;
201 };
202 static struct simple_pid_list *stopped_pids;
203
204 /* Trivial list manipulation functions to keep track of a list of new
205 stopped processes. */
206
207 static void
208 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
209 {
210 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
211
212 new_pid->pid = pid;
213 new_pid->status = status;
214 new_pid->next = *listp;
215 *listp = new_pid;
216 }
217
218 static int
219 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
220 {
221 struct simple_pid_list **p;
222
223 for (p = listp; *p != NULL; p = &(*p)->next)
224 if ((*p)->pid == pid)
225 {
226 struct simple_pid_list *next = (*p)->next;
227
228 *statusp = (*p)->status;
229 xfree (*p);
230 *p = next;
231 return 1;
232 }
233 return 0;
234 }
235
236 enum stopping_threads_kind
237 {
238 /* Not stopping threads presently. */
239 NOT_STOPPING_THREADS,
240
241 /* Stopping threads. */
242 STOPPING_THREADS,
243
244 /* Stopping and suspending threads. */
245 STOPPING_AND_SUSPENDING_THREADS
246 };
247
248 /* This is set while stop_all_lwps is in effect. */
249 static stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
250
251 /* FIXME make into a target method? */
252 int using_threads = 1;
253
254 /* True if we're presently stabilizing threads (moving them out of
255 jump pads). */
256 static int stabilizing_threads;
257
258 static void unsuspend_all_lwps (struct lwp_info *except);
259 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
260 static int lwp_is_marked_dead (struct lwp_info *lwp);
261 static int kill_lwp (unsigned long lwpid, int signo);
262 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
263 static int linux_low_ptrace_options (int attached);
264 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
265
266 /* When the event-loop is doing a step-over, this points at the thread
267 being stepped. */
268 static ptid_t step_over_bkpt;
269
270 bool
271 linux_process_target::low_supports_breakpoints ()
272 {
273 return false;
274 }
275
276 CORE_ADDR
277 linux_process_target::low_get_pc (regcache *regcache)
278 {
279 return 0;
280 }
281
282 void
283 linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
284 {
285 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
286 }
287
288 std::vector<CORE_ADDR>
289 linux_process_target::low_get_next_pcs (regcache *regcache)
290 {
291 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
292 "implemented");
293 }
294
295 int
296 linux_process_target::low_decr_pc_after_break ()
297 {
298 return 0;
299 }
300
301 /* True if LWP is stopped in its stepping range. */
302
303 static int
304 lwp_in_step_range (struct lwp_info *lwp)
305 {
306 CORE_ADDR pc = lwp->stop_pc;
307
308 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
309 }
310
311 /* The read/write ends of the pipe registered as waitable file in the
312 event loop. */
313 static int linux_event_pipe[2] = { -1, -1 };
314
315 /* True if we're currently in async mode. */
316 #define target_is_async_p() (linux_event_pipe[0] != -1)
317
318 static void send_sigstop (struct lwp_info *lwp);
319
320 /* Return non-zero if HEADER is a 64-bit ELF file. */
321
322 static int
323 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
324 {
325 if (header->e_ident[EI_MAG0] == ELFMAG0
326 && header->e_ident[EI_MAG1] == ELFMAG1
327 && header->e_ident[EI_MAG2] == ELFMAG2
328 && header->e_ident[EI_MAG3] == ELFMAG3)
329 {
330 *machine = header->e_machine;
331 return header->e_ident[EI_CLASS] == ELFCLASS64;
332
333 }
334 *machine = EM_NONE;
335 return -1;
336 }
337
338 /* Return non-zero if FILE is a 64-bit ELF file,
339 zero if the file is not a 64-bit ELF file,
340 and -1 if the file is not accessible or doesn't exist. */
341
342 static int
343 elf_64_file_p (const char *file, unsigned int *machine)
344 {
345 Elf64_Ehdr header;
346 int fd;
347
348 fd = open (file, O_RDONLY);
349 if (fd < 0)
350 return -1;
351
352 if (read (fd, &header, sizeof (header)) != sizeof (header))
353 {
354 close (fd);
355 return 0;
356 }
357 close (fd);
358
359 return elf_64_header_p (&header, machine);
360 }
361
362 /* Accepts an integer PID; Returns true if the executable PID is
363 running is a 64-bit ELF file.. */
364
365 int
366 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
367 {
368 char file[PATH_MAX];
369
370 sprintf (file, "/proc/%d/exe", pid);
371 return elf_64_file_p (file, machine);
372 }
373
374 void
375 linux_process_target::delete_lwp (lwp_info *lwp)
376 {
377 struct thread_info *thr = get_lwp_thread (lwp);
378
379 if (debug_threads)
380 debug_printf ("deleting %ld\n", lwpid_of (thr));
381
382 remove_thread (thr);
383
384 low_delete_thread (lwp->arch_private);
385
386 delete lwp;
387 }
388
389 void
390 linux_process_target::low_delete_thread (arch_lwp_info *info)
391 {
392 /* Default implementation should be overridden if architecture-specific
393 info is being used. */
394 gdb_assert (info == nullptr);
395 }
396
397 process_info *
398 linux_process_target::add_linux_process (int pid, int attached)
399 {
400 struct process_info *proc;
401
402 proc = add_process (pid, attached);
403 proc->priv = XCNEW (struct process_info_private);
404
405 proc->priv->arch_private = low_new_process ();
406
407 return proc;
408 }
409
410 arch_process_info *
411 linux_process_target::low_new_process ()
412 {
413 return nullptr;
414 }
415
416 void
417 linux_process_target::low_delete_process (arch_process_info *info)
418 {
419 /* Default implementation must be overridden if architecture-specific
420 info exists. */
421 gdb_assert (info == nullptr);
422 }
423
424 void
425 linux_process_target::low_new_fork (process_info *parent, process_info *child)
426 {
427 /* Nop. */
428 }
429
430 void
431 linux_process_target::arch_setup_thread (thread_info *thread)
432 {
433 struct thread_info *saved_thread;
434
435 saved_thread = current_thread;
436 current_thread = thread;
437
438 low_arch_setup ();
439
440 current_thread = saved_thread;
441 }
442
443 int
444 linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
445 int wstat)
446 {
447 client_state &cs = get_client_state ();
448 struct lwp_info *event_lwp = *orig_event_lwp;
449 int event = linux_ptrace_get_extended_event (wstat);
450 struct thread_info *event_thr = get_lwp_thread (event_lwp);
451 struct lwp_info *new_lwp;
452
453 gdb_assert (event_lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
454
455 /* All extended events we currently use are mid-syscall. Only
456 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
457 you have to be using PTRACE_SEIZE to get that. */
458 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
459
460 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
461 || (event == PTRACE_EVENT_CLONE))
462 {
463 ptid_t ptid;
464 unsigned long new_pid;
465 int ret, status;
466
467 /* Get the pid of the new lwp. */
468 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
469 &new_pid);
470
471 /* If we haven't already seen the new PID stop, wait for it now. */
472 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
473 {
474 /* The new child has a pending SIGSTOP. We can't affect it until it
475 hits the SIGSTOP, but we're already attached. */
476
477 ret = my_waitpid (new_pid, &status, __WALL);
478
479 if (ret == -1)
480 perror_with_name ("waiting for new child");
481 else if (ret != new_pid)
482 warning ("wait returned unexpected PID %d", ret);
483 else if (!WIFSTOPPED (status))
484 warning ("wait returned unexpected status 0x%x", status);
485 }
486
487 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
488 {
489 struct process_info *parent_proc;
490 struct process_info *child_proc;
491 struct lwp_info *child_lwp;
492 struct thread_info *child_thr;
493
494 ptid = ptid_t (new_pid, new_pid);
495
496 if (debug_threads)
497 {
498 debug_printf ("HEW: Got fork event from LWP %ld, "
499 "new child is %d\n",
500 ptid_of (event_thr).lwp (),
501 ptid.pid ());
502 }
503
504 /* Add the new process to the tables and clone the breakpoint
505 lists of the parent. We need to do this even if the new process
506 will be detached, since we will need the process object and the
507 breakpoints to remove any breakpoints from memory when we
508 detach, and the client side will access registers. */
509 child_proc = add_linux_process (new_pid, 0);
510 gdb_assert (child_proc != NULL);
511 child_lwp = add_lwp (ptid);
512 gdb_assert (child_lwp != NULL);
513 child_lwp->stopped = 1;
514 child_lwp->must_set_ptrace_flags = 1;
515 child_lwp->status_pending_p = 0;
516 child_thr = get_lwp_thread (child_lwp);
517 child_thr->last_resume_kind = resume_stop;
518 child_thr->last_status.set_stopped (GDB_SIGNAL_0);
519
520 /* If we're suspending all threads, leave this one suspended
521 too. If the fork/clone parent is stepping over a breakpoint,
522 all other threads have been suspended already. Leave the
523 child suspended too. */
524 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
525 || event_lwp->bp_reinsert != 0)
526 {
527 if (debug_threads)
528 debug_printf ("HEW: leaving child suspended\n");
529 child_lwp->suspended = 1;
530 }
531
532 parent_proc = get_thread_process (event_thr);
533 child_proc->attached = parent_proc->attached;
534
535 if (event_lwp->bp_reinsert != 0
536 && supports_software_single_step ()
537 && event == PTRACE_EVENT_VFORK)
538 {
539 /* If we leave single-step breakpoints there, child will
540 hit it, so uninsert single-step breakpoints from parent
541 (and child). Once vfork child is done, reinsert
542 them back to parent. */
543 uninsert_single_step_breakpoints (event_thr);
544 }
545
546 clone_all_breakpoints (child_thr, event_thr);
547
548 target_desc_up tdesc = allocate_target_description ();
549 copy_target_description (tdesc.get (), parent_proc->tdesc);
550 child_proc->tdesc = tdesc.release ();
551
552 /* Clone arch-specific process data. */
553 low_new_fork (parent_proc, child_proc);
554
555 /* Save fork info in the parent thread. */
556 if (event == PTRACE_EVENT_FORK)
557 event_lwp->waitstatus.set_forked (ptid);
558 else if (event == PTRACE_EVENT_VFORK)
559 event_lwp->waitstatus.set_vforked (ptid);
560
561 /* The status_pending field contains bits denoting the
562 extended event, so when the pending event is handled,
563 the handler will look at lwp->waitstatus. */
564 event_lwp->status_pending_p = 1;
565 event_lwp->status_pending = wstat;
566
567 /* Link the threads until the parent event is passed on to
568 higher layers. */
569 event_lwp->fork_relative = child_lwp;
570 child_lwp->fork_relative = event_lwp;
571
572 /* If the parent thread is doing step-over with single-step
573 breakpoints, the list of single-step breakpoints are cloned
574 from the parent's. Remove them from the child process.
575 In case of vfork, we'll reinsert them back once vforked
576 child is done. */
577 if (event_lwp->bp_reinsert != 0
578 && supports_software_single_step ())
579 {
580 /* The child process is forked and stopped, so it is safe
581 to access its memory without stopping all other threads
582 from other processes. */
583 delete_single_step_breakpoints (child_thr);
584
585 gdb_assert (has_single_step_breakpoints (event_thr));
586 gdb_assert (!has_single_step_breakpoints (child_thr));
587 }
588
589 /* Report the event. */
590 return 0;
591 }
592
593 if (debug_threads)
594 debug_printf ("HEW: Got clone event "
595 "from LWP %ld, new child is LWP %ld\n",
596 lwpid_of (event_thr), new_pid);
597
598 ptid = ptid_t (pid_of (event_thr), new_pid);
599 new_lwp = add_lwp (ptid);
600
601 /* Either we're going to immediately resume the new thread
602 or leave it stopped. resume_one_lwp is a nop if it
603 thinks the thread is currently running, so set this first
604 before calling resume_one_lwp. */
605 new_lwp->stopped = 1;
606
607 /* If we're suspending all threads, leave this one suspended
608 too. If the fork/clone parent is stepping over a breakpoint,
609 all other threads have been suspended already. Leave the
610 child suspended too. */
611 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
612 || event_lwp->bp_reinsert != 0)
613 new_lwp->suspended = 1;
614
615 /* Normally we will get the pending SIGSTOP. But in some cases
616 we might get another signal delivered to the group first.
617 If we do get another signal, be sure not to lose it. */
618 if (WSTOPSIG (status) != SIGSTOP)
619 {
620 new_lwp->stop_expected = 1;
621 new_lwp->status_pending_p = 1;
622 new_lwp->status_pending = status;
623 }
624 else if (cs.report_thread_events)
625 {
626 new_lwp->waitstatus.set_thread_created ();
627 new_lwp->status_pending_p = 1;
628 new_lwp->status_pending = status;
629 }
630
631 #ifdef USE_THREAD_DB
632 thread_db_notice_clone (event_thr, ptid);
633 #endif
634
635 /* Don't report the event. */
636 return 1;
637 }
638 else if (event == PTRACE_EVENT_VFORK_DONE)
639 {
640 event_lwp->waitstatus.set_vfork_done ();
641
642 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
643 {
644 reinsert_single_step_breakpoints (event_thr);
645
646 gdb_assert (has_single_step_breakpoints (event_thr));
647 }
648
649 /* Report the event. */
650 return 0;
651 }
652 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
653 {
654 struct process_info *proc;
655 std::vector<int> syscalls_to_catch;
656 ptid_t event_ptid;
657 pid_t event_pid;
658
659 if (debug_threads)
660 {
661 debug_printf ("HEW: Got exec event from LWP %ld\n",
662 lwpid_of (event_thr));
663 }
664
665 /* Get the event ptid. */
666 event_ptid = ptid_of (event_thr);
667 event_pid = event_ptid.pid ();
668
669 /* Save the syscall list from the execing process. */
670 proc = get_thread_process (event_thr);
671 syscalls_to_catch = std::move (proc->syscalls_to_catch);
672
673 /* Delete the execing process and all its threads. */
674 mourn (proc);
675 current_thread = NULL;
676
677 /* Create a new process/lwp/thread. */
678 proc = add_linux_process (event_pid, 0);
679 event_lwp = add_lwp (event_ptid);
680 event_thr = get_lwp_thread (event_lwp);
681 gdb_assert (current_thread == event_thr);
682 arch_setup_thread (event_thr);
683
684 /* Set the event status. */
685 event_lwp->waitstatus.set_execd
686 (make_unique_xstrdup
687 (linux_proc_pid_to_exec_file (lwpid_of (event_thr))));
688
689 /* Mark the exec status as pending. */
690 event_lwp->stopped = 1;
691 event_lwp->status_pending_p = 1;
692 event_lwp->status_pending = wstat;
693 event_thr->last_resume_kind = resume_continue;
694 event_thr->last_status.set_ignore ();
695
696 /* Update syscall state in the new lwp, effectively mid-syscall too. */
697 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
698
699 /* Restore the list to catch. Don't rely on the client, which is free
700 to avoid sending a new list when the architecture doesn't change.
701 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
702 proc->syscalls_to_catch = std::move (syscalls_to_catch);
703
704 /* Report the event. */
705 *orig_event_lwp = event_lwp;
706 return 0;
707 }
708
709 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
710 }
711
712 CORE_ADDR
713 linux_process_target::get_pc (lwp_info *lwp)
714 {
715 struct thread_info *saved_thread;
716 struct regcache *regcache;
717 CORE_ADDR pc;
718
719 if (!low_supports_breakpoints ())
720 return 0;
721
722 saved_thread = current_thread;
723 current_thread = get_lwp_thread (lwp);
724
725 regcache = get_thread_regcache (current_thread, 1);
726 pc = low_get_pc (regcache);
727
728 if (debug_threads)
729 debug_printf ("pc is 0x%lx\n", (long) pc);
730
731 current_thread = saved_thread;
732 return pc;
733 }
734
735 void
736 linux_process_target::get_syscall_trapinfo (lwp_info *lwp, int *sysno)
737 {
738 struct thread_info *saved_thread;
739 struct regcache *regcache;
740
741 saved_thread = current_thread;
742 current_thread = get_lwp_thread (lwp);
743
744 regcache = get_thread_regcache (current_thread, 1);
745 low_get_syscall_trapinfo (regcache, sysno);
746
747 if (debug_threads)
748 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
749
750 current_thread = saved_thread;
751 }
752
753 void
754 linux_process_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
755 {
756 /* By default, report an unknown system call number. */
757 *sysno = UNKNOWN_SYSCALL;
758 }
759
760 bool
761 linux_process_target::save_stop_reason (lwp_info *lwp)
762 {
763 CORE_ADDR pc;
764 CORE_ADDR sw_breakpoint_pc;
765 struct thread_info *saved_thread;
766 #if USE_SIGTRAP_SIGINFO
767 siginfo_t siginfo;
768 #endif
769
770 if (!low_supports_breakpoints ())
771 return false;
772
773 pc = get_pc (lwp);
774 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
775
776 /* breakpoint_at reads from the current thread. */
777 saved_thread = current_thread;
778 current_thread = get_lwp_thread (lwp);
779
780 #if USE_SIGTRAP_SIGINFO
781 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
782 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
783 {
784 if (siginfo.si_signo == SIGTRAP)
785 {
786 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
787 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
788 {
789 /* The si_code is ambiguous on this arch -- check debug
790 registers. */
791 if (!check_stopped_by_watchpoint (lwp))
792 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
793 }
794 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
795 {
796 /* If we determine the LWP stopped for a SW breakpoint,
797 trust it. Particularly don't check watchpoint
798 registers, because at least on s390, we'd find
799 stopped-by-watchpoint as long as there's a watchpoint
800 set. */
801 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
802 }
803 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
804 {
805 /* This can indicate either a hardware breakpoint or
806 hardware watchpoint. Check debug registers. */
807 if (!check_stopped_by_watchpoint (lwp))
808 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
809 }
810 else if (siginfo.si_code == TRAP_TRACE)
811 {
812 /* We may have single stepped an instruction that
813 triggered a watchpoint. In that case, on some
814 architectures (such as x86), instead of TRAP_HWBKPT,
815 si_code indicates TRAP_TRACE, and we need to check
816 the debug registers separately. */
817 if (!check_stopped_by_watchpoint (lwp))
818 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
819 }
820 }
821 }
822 #else
823 /* We may have just stepped a breakpoint instruction. E.g., in
824 non-stop mode, GDB first tells the thread A to step a range, and
825 then the user inserts a breakpoint inside the range. In that
826 case we need to report the breakpoint PC. */
827 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
828 && low_breakpoint_at (sw_breakpoint_pc))
829 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
830
831 if (hardware_breakpoint_inserted_here (pc))
832 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
833
834 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
835 check_stopped_by_watchpoint (lwp);
836 #endif
837
838 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
839 {
840 if (debug_threads)
841 {
842 struct thread_info *thr = get_lwp_thread (lwp);
843
844 debug_printf ("CSBB: %s stopped by software breakpoint\n",
845 target_pid_to_str (ptid_of (thr)).c_str ());
846 }
847
848 /* Back up the PC if necessary. */
849 if (pc != sw_breakpoint_pc)
850 {
851 struct regcache *regcache
852 = get_thread_regcache (current_thread, 1);
853 low_set_pc (regcache, sw_breakpoint_pc);
854 }
855
856 /* Update this so we record the correct stop PC below. */
857 pc = sw_breakpoint_pc;
858 }
859 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
860 {
861 if (debug_threads)
862 {
863 struct thread_info *thr = get_lwp_thread (lwp);
864
865 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
866 target_pid_to_str (ptid_of (thr)).c_str ());
867 }
868 }
869 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
870 {
871 if (debug_threads)
872 {
873 struct thread_info *thr = get_lwp_thread (lwp);
874
875 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
876 target_pid_to_str (ptid_of (thr)).c_str ());
877 }
878 }
879 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
880 {
881 if (debug_threads)
882 {
883 struct thread_info *thr = get_lwp_thread (lwp);
884
885 debug_printf ("CSBB: %s stopped by trace\n",
886 target_pid_to_str (ptid_of (thr)).c_str ());
887 }
888 }
889
890 lwp->stop_pc = pc;
891 current_thread = saved_thread;
892 return true;
893 }
894
895 lwp_info *
896 linux_process_target::add_lwp (ptid_t ptid)
897 {
898 lwp_info *lwp = new lwp_info;
899
900 lwp->thread = add_thread (ptid, lwp);
901
902 low_new_thread (lwp);
903
904 return lwp;
905 }
906
907 void
908 linux_process_target::low_new_thread (lwp_info *info)
909 {
910 /* Nop. */
911 }
912
913 /* Callback to be used when calling fork_inferior, responsible for
914 actually initiating the tracing of the inferior. */
915
916 static void
917 linux_ptrace_fun ()
918 {
919 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
920 (PTRACE_TYPE_ARG4) 0) < 0)
921 trace_start_error_with_name ("ptrace");
922
923 if (setpgid (0, 0) < 0)
924 trace_start_error_with_name ("setpgid");
925
926 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
927 stdout to stderr so that inferior i/o doesn't corrupt the connection.
928 Also, redirect stdin to /dev/null. */
929 if (remote_connection_is_stdio ())
930 {
931 if (close (0) < 0)
932 trace_start_error_with_name ("close");
933 if (open ("/dev/null", O_RDONLY) < 0)
934 trace_start_error_with_name ("open");
935 if (dup2 (2, 1) < 0)
936 trace_start_error_with_name ("dup2");
937 if (write (2, "stdin/stdout redirected\n",
938 sizeof ("stdin/stdout redirected\n") - 1) < 0)
939 {
940 /* Errors ignored. */;
941 }
942 }
943 }
944
945 /* Start an inferior process and returns its pid.
946 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
947 are its arguments. */
948
949 int
950 linux_process_target::create_inferior (const char *program,
951 const std::vector<char *> &program_args)
952 {
953 client_state &cs = get_client_state ();
954 struct lwp_info *new_lwp;
955 int pid;
956 ptid_t ptid;
957
958 {
959 maybe_disable_address_space_randomization restore_personality
960 (cs.disable_randomization);
961 std::string str_program_args = construct_inferior_arguments (program_args);
962
963 pid = fork_inferior (program,
964 str_program_args.c_str (),
965 get_environ ()->envp (), linux_ptrace_fun,
966 NULL, NULL, NULL, NULL);
967 }
968
969 add_linux_process (pid, 0);
970
971 ptid = ptid_t (pid, pid);
972 new_lwp = add_lwp (ptid);
973 new_lwp->must_set_ptrace_flags = 1;
974
975 post_fork_inferior (pid, program);
976
977 return pid;
978 }
979
980 /* Implement the post_create_inferior target_ops method. */
981
982 void
983 linux_process_target::post_create_inferior ()
984 {
985 struct lwp_info *lwp = get_thread_lwp (current_thread);
986
987 low_arch_setup ();
988
989 if (lwp->must_set_ptrace_flags)
990 {
991 struct process_info *proc = current_process ();
992 int options = linux_low_ptrace_options (proc->attached);
993
994 linux_enable_event_reporting (lwpid_of (current_thread), options);
995 lwp->must_set_ptrace_flags = 0;
996 }
997 }
998
999 int
1000 linux_process_target::attach_lwp (ptid_t ptid)
1001 {
1002 struct lwp_info *new_lwp;
1003 int lwpid = ptid.lwp ();
1004
1005 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1006 != 0)
1007 return errno;
1008
1009 new_lwp = add_lwp (ptid);
1010
1011 /* We need to wait for SIGSTOP before being able to make the next
1012 ptrace call on this LWP. */
1013 new_lwp->must_set_ptrace_flags = 1;
1014
1015 if (linux_proc_pid_is_stopped (lwpid))
1016 {
1017 if (debug_threads)
1018 debug_printf ("Attached to a stopped process\n");
1019
1020 /* The process is definitely stopped. It is in a job control
1021 stop, unless the kernel predates the TASK_STOPPED /
1022 TASK_TRACED distinction, in which case it might be in a
1023 ptrace stop. Make sure it is in a ptrace stop; from there we
1024 can kill it, signal it, et cetera.
1025
1026 First make sure there is a pending SIGSTOP. Since we are
1027 already attached, the process can not transition from stopped
1028 to running without a PTRACE_CONT; so we know this signal will
1029 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1030 probably already in the queue (unless this kernel is old
1031 enough to use TASK_STOPPED for ptrace stops); but since
1032 SIGSTOP is not an RT signal, it can only be queued once. */
1033 kill_lwp (lwpid, SIGSTOP);
1034
1035 /* Finally, resume the stopped process. This will deliver the
1036 SIGSTOP (or a higher priority signal, just like normal
1037 PTRACE_ATTACH), which we'll catch later on. */
1038 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1039 }
1040
1041 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1042 brings it to a halt.
1043
1044 There are several cases to consider here:
1045
1046 1) gdbserver has already attached to the process and is being notified
1047 of a new thread that is being created.
1048 In this case we should ignore that SIGSTOP and resume the
1049 process. This is handled below by setting stop_expected = 1,
1050 and the fact that add_thread sets last_resume_kind ==
1051 resume_continue.
1052
1053 2) This is the first thread (the process thread), and we're attaching
1054 to it via attach_inferior.
1055 In this case we want the process thread to stop.
1056 This is handled by having linux_attach set last_resume_kind ==
1057 resume_stop after we return.
1058
1059 If the pid we are attaching to is also the tgid, we attach to and
1060 stop all the existing threads. Otherwise, we attach to pid and
1061 ignore any other threads in the same group as this pid.
1062
1063 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1064 existing threads.
1065 In this case we want the thread to stop.
1066 FIXME: This case is currently not properly handled.
1067 We should wait for the SIGSTOP but don't. Things work apparently
1068 because enough time passes between when we ptrace (ATTACH) and when
1069 gdb makes the next ptrace call on the thread.
1070
1071 On the other hand, if we are currently trying to stop all threads, we
1072 should treat the new thread as if we had sent it a SIGSTOP. This works
1073 because we are guaranteed that the add_lwp call above added us to the
1074 end of the list, and so the new thread has not yet reached
1075 wait_for_sigstop (but will). */
1076 new_lwp->stop_expected = 1;
1077
1078 return 0;
1079 }
1080
1081 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1082 already attached. Returns true if a new LWP is found, false
1083 otherwise. */
1084
1085 static int
1086 attach_proc_task_lwp_callback (ptid_t ptid)
1087 {
1088 /* Is this a new thread? */
1089 if (find_thread_ptid (ptid) == NULL)
1090 {
1091 int lwpid = ptid.lwp ();
1092 int err;
1093
1094 if (debug_threads)
1095 debug_printf ("Found new lwp %d\n", lwpid);
1096
1097 err = the_linux_target->attach_lwp (ptid);
1098
1099 /* Be quiet if we simply raced with the thread exiting. EPERM
1100 is returned if the thread's task still exists, and is marked
1101 as exited or zombie, as well as other conditions, so in that
1102 case, confirm the status in /proc/PID/status. */
1103 if (err == ESRCH
1104 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1105 {
1106 if (debug_threads)
1107 {
1108 debug_printf ("Cannot attach to lwp %d: "
1109 "thread is gone (%d: %s)\n",
1110 lwpid, err, safe_strerror (err));
1111 }
1112 }
1113 else if (err != 0)
1114 {
1115 std::string reason
1116 = linux_ptrace_attach_fail_reason_string (ptid, err);
1117
1118 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1119 }
1120
1121 return 1;
1122 }
1123 return 0;
1124 }
1125
1126 static void async_file_mark (void);
1127
1128 /* Attach to PID. If PID is the tgid, attach to it and all
1129 of its threads. */
1130
1131 int
1132 linux_process_target::attach (unsigned long pid)
1133 {
1134 struct process_info *proc;
1135 struct thread_info *initial_thread;
1136 ptid_t ptid = ptid_t (pid, pid);
1137 int err;
1138
1139 proc = add_linux_process (pid, 1);
1140
1141 /* Attach to PID. We will check for other threads
1142 soon. */
1143 err = attach_lwp (ptid);
1144 if (err != 0)
1145 {
1146 remove_process (proc);
1147
1148 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1149 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1150 }
1151
1152 /* Don't ignore the initial SIGSTOP if we just attached to this
1153 process. It will be collected by wait shortly. */
1154 initial_thread = find_thread_ptid (ptid_t (pid, pid));
1155 initial_thread->last_resume_kind = resume_stop;
1156
1157 /* We must attach to every LWP. If /proc is mounted, use that to
1158 find them now. On the one hand, the inferior may be using raw
1159 clone instead of using pthreads. On the other hand, even if it
1160 is using pthreads, GDB may not be connected yet (thread_db needs
1161 to do symbol lookups, through qSymbol). Also, thread_db walks
1162 structures in the inferior's address space to find the list of
1163 threads/LWPs, and those structures may well be corrupted. Note
1164 that once thread_db is loaded, we'll still use it to list threads
1165 and associate pthread info with each LWP. */
1166 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1167
1168 /* GDB will shortly read the xml target description for this
1169 process, to figure out the process' architecture. But the target
1170 description is only filled in when the first process/thread in
1171 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1172 that now, otherwise, if GDB is fast enough, it could read the
1173 target description _before_ that initial stop. */
1174 if (non_stop)
1175 {
1176 struct lwp_info *lwp;
1177 int wstat, lwpid;
1178 ptid_t pid_ptid = ptid_t (pid);
1179
1180 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
1181 gdb_assert (lwpid > 0);
1182
1183 lwp = find_lwp_pid (ptid_t (lwpid));
1184
1185 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1186 {
1187 lwp->status_pending_p = 1;
1188 lwp->status_pending = wstat;
1189 }
1190
1191 initial_thread->last_resume_kind = resume_continue;
1192
1193 async_file_mark ();
1194
1195 gdb_assert (proc->tdesc != NULL);
1196 }
1197
1198 return 0;
1199 }
1200
1201 static int
1202 last_thread_of_process_p (int pid)
1203 {
1204 bool seen_one = false;
1205
1206 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1207 {
1208 if (!seen_one)
1209 {
1210 /* This is the first thread of this process we see. */
1211 seen_one = true;
1212 return false;
1213 }
1214 else
1215 {
1216 /* This is the second thread of this process we see. */
1217 return true;
1218 }
1219 });
1220
1221 return thread == NULL;
1222 }
1223
1224 /* Kill LWP. */
1225
1226 static void
1227 linux_kill_one_lwp (struct lwp_info *lwp)
1228 {
1229 struct thread_info *thr = get_lwp_thread (lwp);
1230 int pid = lwpid_of (thr);
1231
1232 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1233 there is no signal context, and ptrace(PTRACE_KILL) (or
1234 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1235 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1236 alternative is to kill with SIGKILL. We only need one SIGKILL
1237 per process, not one for each thread. But since we still support
1238 support debugging programs using raw clone without CLONE_THREAD,
1239 we send one for each thread. For years, we used PTRACE_KILL
1240 only, so we're being a bit paranoid about some old kernels where
1241 PTRACE_KILL might work better (dubious if there are any such, but
1242 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1243 second, and so we're fine everywhere. */
1244
1245 errno = 0;
1246 kill_lwp (pid, SIGKILL);
1247 if (debug_threads)
1248 {
1249 int save_errno = errno;
1250
1251 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1252 target_pid_to_str (ptid_of (thr)).c_str (),
1253 save_errno ? safe_strerror (save_errno) : "OK");
1254 }
1255
1256 errno = 0;
1257 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1258 if (debug_threads)
1259 {
1260 int save_errno = errno;
1261
1262 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1263 target_pid_to_str (ptid_of (thr)).c_str (),
1264 save_errno ? safe_strerror (save_errno) : "OK");
1265 }
1266 }
1267
1268 /* Kill LWP and wait for it to die. */
1269
1270 static void
1271 kill_wait_lwp (struct lwp_info *lwp)
1272 {
1273 struct thread_info *thr = get_lwp_thread (lwp);
1274 int pid = ptid_of (thr).pid ();
1275 int lwpid = ptid_of (thr).lwp ();
1276 int wstat;
1277 int res;
1278
1279 if (debug_threads)
1280 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1281
1282 do
1283 {
1284 linux_kill_one_lwp (lwp);
1285
1286 /* Make sure it died. Notes:
1287
1288 - The loop is most likely unnecessary.
1289
1290 - We don't use wait_for_event as that could delete lwps
1291 while we're iterating over them. We're not interested in
1292 any pending status at this point, only in making sure all
1293 wait status on the kernel side are collected until the
1294 process is reaped.
1295
1296 - We don't use __WALL here as the __WALL emulation relies on
1297 SIGCHLD, and killing a stopped process doesn't generate
1298 one, nor an exit status.
1299 */
1300 res = my_waitpid (lwpid, &wstat, 0);
1301 if (res == -1 && errno == ECHILD)
1302 res = my_waitpid (lwpid, &wstat, __WCLONE);
1303 } while (res > 0 && WIFSTOPPED (wstat));
1304
1305 /* Even if it was stopped, the child may have already disappeared.
1306 E.g., if it was killed by SIGKILL. */
1307 if (res < 0 && errno != ECHILD)
1308 perror_with_name ("kill_wait_lwp");
1309 }
1310
1311 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1312 except the leader. */
1313
1314 static void
1315 kill_one_lwp_callback (thread_info *thread, int pid)
1316 {
1317 struct lwp_info *lwp = get_thread_lwp (thread);
1318
1319 /* We avoid killing the first thread here, because of a Linux kernel (at
1320 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1321 the children get a chance to be reaped, it will remain a zombie
1322 forever. */
1323
1324 if (lwpid_of (thread) == pid)
1325 {
1326 if (debug_threads)
1327 debug_printf ("lkop: is last of process %s\n",
1328 target_pid_to_str (thread->id).c_str ());
1329 return;
1330 }
1331
1332 kill_wait_lwp (lwp);
1333 }
1334
1335 int
1336 linux_process_target::kill (process_info *process)
1337 {
1338 int pid = process->pid;
1339
1340 /* If we're killing a running inferior, make sure it is stopped
1341 first, as PTRACE_KILL will not work otherwise. */
1342 stop_all_lwps (0, NULL);
1343
1344 for_each_thread (pid, [&] (thread_info *thread)
1345 {
1346 kill_one_lwp_callback (thread, pid);
1347 });
1348
1349 /* See the comment in linux_kill_one_lwp. We did not kill the first
1350 thread in the list, so do so now. */
1351 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1352
1353 if (lwp == NULL)
1354 {
1355 if (debug_threads)
1356 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1357 pid);
1358 }
1359 else
1360 kill_wait_lwp (lwp);
1361
1362 mourn (process);
1363
1364 /* Since we presently can only stop all lwps of all processes, we
1365 need to unstop lwps of other processes. */
1366 unstop_all_lwps (0, NULL);
1367 return 0;
1368 }
1369
1370 /* Get pending signal of THREAD, for detaching purposes. This is the
1371 signal the thread last stopped for, which we need to deliver to the
1372 thread when detaching, otherwise, it'd be suppressed/lost. */
1373
1374 static int
1375 get_detach_signal (struct thread_info *thread)
1376 {
1377 client_state &cs = get_client_state ();
1378 enum gdb_signal signo = GDB_SIGNAL_0;
1379 int status;
1380 struct lwp_info *lp = get_thread_lwp (thread);
1381
1382 if (lp->status_pending_p)
1383 status = lp->status_pending;
1384 else
1385 {
1386 /* If the thread had been suspended by gdbserver, and it stopped
1387 cleanly, then it'll have stopped with SIGSTOP. But we don't
1388 want to deliver that SIGSTOP. */
1389 if (thread->last_status.kind () != TARGET_WAITKIND_STOPPED
1390 || thread->last_status.sig () == GDB_SIGNAL_0)
1391 return 0;
1392
1393 /* Otherwise, we may need to deliver the signal we
1394 intercepted. */
1395 status = lp->last_status;
1396 }
1397
1398 if (!WIFSTOPPED (status))
1399 {
1400 if (debug_threads)
1401 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1402 target_pid_to_str (ptid_of (thread)).c_str ());
1403 return 0;
1404 }
1405
1406 /* Extended wait statuses aren't real SIGTRAPs. */
1407 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1408 {
1409 if (debug_threads)
1410 debug_printf ("GPS: lwp %s had stopped with extended "
1411 "status: no pending signal\n",
1412 target_pid_to_str (ptid_of (thread)).c_str ());
1413 return 0;
1414 }
1415
1416 signo = gdb_signal_from_host (WSTOPSIG (status));
1417
1418 if (cs.program_signals_p && !cs.program_signals[signo])
1419 {
1420 if (debug_threads)
1421 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1422 target_pid_to_str (ptid_of (thread)).c_str (),
1423 gdb_signal_to_string (signo));
1424 return 0;
1425 }
1426 else if (!cs.program_signals_p
1427 /* If we have no way to know which signals GDB does not
1428 want to have passed to the program, assume
1429 SIGTRAP/SIGINT, which is GDB's default. */
1430 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1431 {
1432 if (debug_threads)
1433 debug_printf ("GPS: lwp %s had signal %s, "
1434 "but we don't know if we should pass it. "
1435 "Default to not.\n",
1436 target_pid_to_str (ptid_of (thread)).c_str (),
1437 gdb_signal_to_string (signo));
1438 return 0;
1439 }
1440 else
1441 {
1442 if (debug_threads)
1443 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1444 target_pid_to_str (ptid_of (thread)).c_str (),
1445 gdb_signal_to_string (signo));
1446
1447 return WSTOPSIG (status);
1448 }
1449 }
1450
1451 void
1452 linux_process_target::detach_one_lwp (lwp_info *lwp)
1453 {
1454 struct thread_info *thread = get_lwp_thread (lwp);
1455 int sig;
1456 int lwpid;
1457
1458 /* If there is a pending SIGSTOP, get rid of it. */
1459 if (lwp->stop_expected)
1460 {
1461 if (debug_threads)
1462 debug_printf ("Sending SIGCONT to %s\n",
1463 target_pid_to_str (ptid_of (thread)).c_str ());
1464
1465 kill_lwp (lwpid_of (thread), SIGCONT);
1466 lwp->stop_expected = 0;
1467 }
1468
1469 /* Pass on any pending signal for this thread. */
1470 sig = get_detach_signal (thread);
1471
1472 /* Preparing to resume may try to write registers, and fail if the
1473 lwp is zombie. If that happens, ignore the error. We'll handle
1474 it below, when detach fails with ESRCH. */
1475 try
1476 {
1477 /* Flush any pending changes to the process's registers. */
1478 regcache_invalidate_thread (thread);
1479
1480 /* Finally, let it resume. */
1481 low_prepare_to_resume (lwp);
1482 }
1483 catch (const gdb_exception_error &ex)
1484 {
1485 if (!check_ptrace_stopped_lwp_gone (lwp))
1486 throw;
1487 }
1488
1489 lwpid = lwpid_of (thread);
1490 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1491 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1492 {
1493 int save_errno = errno;
1494
1495 /* We know the thread exists, so ESRCH must mean the lwp is
1496 zombie. This can happen if one of the already-detached
1497 threads exits the whole thread group. In that case we're
1498 still attached, and must reap the lwp. */
1499 if (save_errno == ESRCH)
1500 {
1501 int ret, status;
1502
1503 ret = my_waitpid (lwpid, &status, __WALL);
1504 if (ret == -1)
1505 {
1506 warning (_("Couldn't reap LWP %d while detaching: %s"),
1507 lwpid, safe_strerror (errno));
1508 }
1509 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1510 {
1511 warning (_("Reaping LWP %d while detaching "
1512 "returned unexpected status 0x%x"),
1513 lwpid, status);
1514 }
1515 }
1516 else
1517 {
1518 error (_("Can't detach %s: %s"),
1519 target_pid_to_str (ptid_of (thread)).c_str (),
1520 safe_strerror (save_errno));
1521 }
1522 }
1523 else if (debug_threads)
1524 {
1525 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1526 target_pid_to_str (ptid_of (thread)).c_str (),
1527 strsignal (sig));
1528 }
1529
1530 delete_lwp (lwp);
1531 }
1532
1533 int
1534 linux_process_target::detach (process_info *process)
1535 {
1536 struct lwp_info *main_lwp;
1537
1538 /* As there's a step over already in progress, let it finish first,
1539 otherwise nesting a stabilize_threads operation on top gets real
1540 messy. */
1541 complete_ongoing_step_over ();
1542
1543 /* Stop all threads before detaching. First, ptrace requires that
1544 the thread is stopped to successfully detach. Second, thread_db
1545 may need to uninstall thread event breakpoints from memory, which
1546 only works with a stopped process anyway. */
1547 stop_all_lwps (0, NULL);
1548
1549 #ifdef USE_THREAD_DB
1550 thread_db_detach (process);
1551 #endif
1552
1553 /* Stabilize threads (move out of jump pads). */
1554 target_stabilize_threads ();
1555
1556 /* Detach from the clone lwps first. If the thread group exits just
1557 while we're detaching, we must reap the clone lwps before we're
1558 able to reap the leader. */
1559 for_each_thread (process->pid, [this] (thread_info *thread)
1560 {
1561 /* We don't actually detach from the thread group leader just yet.
1562 If the thread group exits, we must reap the zombie clone lwps
1563 before we're able to reap the leader. */
1564 if (thread->id.pid () == thread->id.lwp ())
1565 return;
1566
1567 lwp_info *lwp = get_thread_lwp (thread);
1568 detach_one_lwp (lwp);
1569 });
1570
1571 main_lwp = find_lwp_pid (ptid_t (process->pid));
1572 detach_one_lwp (main_lwp);
1573
1574 mourn (process);
1575
1576 /* Since we presently can only stop all lwps of all processes, we
1577 need to unstop lwps of other processes. */
1578 unstop_all_lwps (0, NULL);
1579 return 0;
1580 }
1581
1582 /* Remove all LWPs that belong to process PROC from the lwp list. */
1583
1584 void
1585 linux_process_target::mourn (process_info *process)
1586 {
1587 struct process_info_private *priv;
1588
1589 #ifdef USE_THREAD_DB
1590 thread_db_mourn (process);
1591 #endif
1592
1593 for_each_thread (process->pid, [this] (thread_info *thread)
1594 {
1595 delete_lwp (get_thread_lwp (thread));
1596 });
1597
1598 /* Freeing all private data. */
1599 priv = process->priv;
1600 low_delete_process (priv->arch_private);
1601 free (priv);
1602 process->priv = NULL;
1603
1604 remove_process (process);
1605 }
1606
1607 void
1608 linux_process_target::join (int pid)
1609 {
1610 int status, ret;
1611
1612 do {
1613 ret = my_waitpid (pid, &status, 0);
1614 if (WIFEXITED (status) || WIFSIGNALED (status))
1615 break;
1616 } while (ret != -1 || errno != ECHILD);
1617 }
1618
1619 /* Return true if the given thread is still alive. */
1620
1621 bool
1622 linux_process_target::thread_alive (ptid_t ptid)
1623 {
1624 struct lwp_info *lwp = find_lwp_pid (ptid);
1625
1626 /* We assume we always know if a thread exits. If a whole process
1627 exited but we still haven't been able to report it to GDB, we'll
1628 hold on to the last lwp of the dead process. */
1629 if (lwp != NULL)
1630 return !lwp_is_marked_dead (lwp);
1631 else
1632 return 0;
1633 }
1634
1635 bool
1636 linux_process_target::thread_still_has_status_pending (thread_info *thread)
1637 {
1638 struct lwp_info *lp = get_thread_lwp (thread);
1639
1640 if (!lp->status_pending_p)
1641 return 0;
1642
1643 if (thread->last_resume_kind != resume_stop
1644 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1645 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1646 {
1647 struct thread_info *saved_thread;
1648 CORE_ADDR pc;
1649 int discard = 0;
1650
1651 gdb_assert (lp->last_status != 0);
1652
1653 pc = get_pc (lp);
1654
1655 saved_thread = current_thread;
1656 current_thread = thread;
1657
1658 if (pc != lp->stop_pc)
1659 {
1660 if (debug_threads)
1661 debug_printf ("PC of %ld changed\n",
1662 lwpid_of (thread));
1663 discard = 1;
1664 }
1665
1666 #if !USE_SIGTRAP_SIGINFO
1667 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1668 && !low_breakpoint_at (pc))
1669 {
1670 if (debug_threads)
1671 debug_printf ("previous SW breakpoint of %ld gone\n",
1672 lwpid_of (thread));
1673 discard = 1;
1674 }
1675 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1676 && !hardware_breakpoint_inserted_here (pc))
1677 {
1678 if (debug_threads)
1679 debug_printf ("previous HW breakpoint of %ld gone\n",
1680 lwpid_of (thread));
1681 discard = 1;
1682 }
1683 #endif
1684
1685 current_thread = saved_thread;
1686
1687 if (discard)
1688 {
1689 if (debug_threads)
1690 debug_printf ("discarding pending breakpoint status\n");
1691 lp->status_pending_p = 0;
1692 return 0;
1693 }
1694 }
1695
1696 return 1;
1697 }
1698
1699 /* Returns true if LWP is resumed from the client's perspective. */
1700
1701 static int
1702 lwp_resumed (struct lwp_info *lwp)
1703 {
1704 struct thread_info *thread = get_lwp_thread (lwp);
1705
1706 if (thread->last_resume_kind != resume_stop)
1707 return 1;
1708
1709 /* Did gdb send us a `vCont;t', but we haven't reported the
1710 corresponding stop to gdb yet? If so, the thread is still
1711 resumed/running from gdb's perspective. */
1712 if (thread->last_resume_kind == resume_stop
1713 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
1714 return 1;
1715
1716 return 0;
1717 }
1718
1719 bool
1720 linux_process_target::status_pending_p_callback (thread_info *thread,
1721 ptid_t ptid)
1722 {
1723 struct lwp_info *lp = get_thread_lwp (thread);
1724
1725 /* Check if we're only interested in events from a specific process
1726 or a specific LWP. */
1727 if (!thread->id.matches (ptid))
1728 return 0;
1729
1730 if (!lwp_resumed (lp))
1731 return 0;
1732
1733 if (lp->status_pending_p
1734 && !thread_still_has_status_pending (thread))
1735 {
1736 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1737 return 0;
1738 }
1739
1740 return lp->status_pending_p;
1741 }
1742
1743 struct lwp_info *
1744 find_lwp_pid (ptid_t ptid)
1745 {
1746 thread_info *thread = find_thread ([&] (thread_info *thr_arg)
1747 {
1748 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1749 return thr_arg->id.lwp () == lwp;
1750 });
1751
1752 if (thread == NULL)
1753 return NULL;
1754
1755 return get_thread_lwp (thread);
1756 }
1757
1758 /* Return the number of known LWPs in the tgid given by PID. */
1759
1760 static int
1761 num_lwps (int pid)
1762 {
1763 int count = 0;
1764
1765 for_each_thread (pid, [&] (thread_info *thread)
1766 {
1767 count++;
1768 });
1769
1770 return count;
1771 }
1772
1773 /* See nat/linux-nat.h. */
1774
1775 struct lwp_info *
1776 iterate_over_lwps (ptid_t filter,
1777 gdb::function_view<iterate_over_lwps_ftype> callback)
1778 {
1779 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1780 {
1781 lwp_info *lwp = get_thread_lwp (thr_arg);
1782
1783 return callback (lwp);
1784 });
1785
1786 if (thread == NULL)
1787 return NULL;
1788
1789 return get_thread_lwp (thread);
1790 }
1791
1792 void
1793 linux_process_target::check_zombie_leaders ()
1794 {
1795 for_each_process ([this] (process_info *proc) {
1796 pid_t leader_pid = pid_of (proc);
1797 struct lwp_info *leader_lp;
1798
1799 leader_lp = find_lwp_pid (ptid_t (leader_pid));
1800
1801 if (debug_threads)
1802 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1803 "num_lwps=%d, zombie=%d\n",
1804 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1805 linux_proc_pid_is_zombie (leader_pid));
1806
1807 if (leader_lp != NULL && !leader_lp->stopped
1808 /* Check if there are other threads in the group, as we may
1809 have raced with the inferior simply exiting. */
1810 && !last_thread_of_process_p (leader_pid)
1811 && linux_proc_pid_is_zombie (leader_pid))
1812 {
1813 /* A leader zombie can mean one of two things:
1814
1815 - It exited, and there's an exit status pending
1816 available, or only the leader exited (not the whole
1817 program). In the latter case, we can't waitpid the
1818 leader's exit status until all other threads are gone.
1819
1820 - There are 3 or more threads in the group, and a thread
1821 other than the leader exec'd. On an exec, the Linux
1822 kernel destroys all other threads (except the execing
1823 one) in the thread group, and resets the execing thread's
1824 tid to the tgid. No exit notification is sent for the
1825 execing thread -- from the ptracer's perspective, it
1826 appears as though the execing thread just vanishes.
1827 Until we reap all other threads except the leader and the
1828 execing thread, the leader will be zombie, and the
1829 execing thread will be in `D (disc sleep)'. As soon as
1830 all other threads are reaped, the execing thread changes
1831 it's tid to the tgid, and the previous (zombie) leader
1832 vanishes, giving place to the "new" leader. We could try
1833 distinguishing the exit and exec cases, by waiting once
1834 more, and seeing if something comes out, but it doesn't
1835 sound useful. The previous leader _does_ go away, and
1836 we'll re-add the new one once we see the exec event
1837 (which is just the same as what would happen if the
1838 previous leader did exit voluntarily before some other
1839 thread execs). */
1840
1841 if (debug_threads)
1842 debug_printf ("CZL: Thread group leader %d zombie "
1843 "(it exited, or another thread execd).\n",
1844 leader_pid);
1845
1846 delete_lwp (leader_lp);
1847 }
1848 });
1849 }
1850
1851 /* Callback for `find_thread'. Returns the first LWP that is not
1852 stopped. */
1853
1854 static bool
1855 not_stopped_callback (thread_info *thread, ptid_t filter)
1856 {
1857 if (!thread->id.matches (filter))
1858 return false;
1859
1860 lwp_info *lwp = get_thread_lwp (thread);
1861
1862 return !lwp->stopped;
1863 }
1864
1865 /* Increment LWP's suspend count. */
1866
1867 static void
1868 lwp_suspended_inc (struct lwp_info *lwp)
1869 {
1870 lwp->suspended++;
1871
1872 if (debug_threads && lwp->suspended > 4)
1873 {
1874 struct thread_info *thread = get_lwp_thread (lwp);
1875
1876 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1877 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1878 }
1879 }
1880
1881 /* Decrement LWP's suspend count. */
1882
1883 static void
1884 lwp_suspended_decr (struct lwp_info *lwp)
1885 {
1886 lwp->suspended--;
1887
1888 if (lwp->suspended < 0)
1889 {
1890 struct thread_info *thread = get_lwp_thread (lwp);
1891
1892 internal_error (__FILE__, __LINE__,
1893 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1894 lwp->suspended);
1895 }
1896 }
1897
1898 /* This function should only be called if the LWP got a SIGTRAP.
1899
1900 Handle any tracepoint steps or hits. Return true if a tracepoint
1901 event was handled, 0 otherwise. */
1902
1903 static int
1904 handle_tracepoints (struct lwp_info *lwp)
1905 {
1906 struct thread_info *tinfo = get_lwp_thread (lwp);
1907 int tpoint_related_event = 0;
1908
1909 gdb_assert (lwp->suspended == 0);
1910
1911 /* If this tracepoint hit causes a tracing stop, we'll immediately
1912 uninsert tracepoints. To do this, we temporarily pause all
1913 threads, unpatch away, and then unpause threads. We need to make
1914 sure the unpausing doesn't resume LWP too. */
1915 lwp_suspended_inc (lwp);
1916
1917 /* And we need to be sure that any all-threads-stopping doesn't try
1918 to move threads out of the jump pads, as it could deadlock the
1919 inferior (LWP could be in the jump pad, maybe even holding the
1920 lock.) */
1921
1922 /* Do any necessary step collect actions. */
1923 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1924
1925 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1926
1927 /* See if we just hit a tracepoint and do its main collect
1928 actions. */
1929 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1930
1931 lwp_suspended_decr (lwp);
1932
1933 gdb_assert (lwp->suspended == 0);
1934 gdb_assert (!stabilizing_threads
1935 || (lwp->collecting_fast_tracepoint
1936 != fast_tpoint_collect_result::not_collecting));
1937
1938 if (tpoint_related_event)
1939 {
1940 if (debug_threads)
1941 debug_printf ("got a tracepoint event\n");
1942 return 1;
1943 }
1944
1945 return 0;
1946 }
1947
1948 fast_tpoint_collect_result
1949 linux_process_target::linux_fast_tracepoint_collecting
1950 (lwp_info *lwp, fast_tpoint_collect_status *status)
1951 {
1952 CORE_ADDR thread_area;
1953 struct thread_info *thread = get_lwp_thread (lwp);
1954
1955 /* Get the thread area address. This is used to recognize which
1956 thread is which when tracing with the in-process agent library.
1957 We don't read anything from the address, and treat it as opaque;
1958 it's the address itself that we assume is unique per-thread. */
1959 if (low_get_thread_area (lwpid_of (thread), &thread_area) == -1)
1960 return fast_tpoint_collect_result::not_collecting;
1961
1962 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1963 }
1964
1965 int
1966 linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
1967 {
1968 return -1;
1969 }
1970
1971 bool
1972 linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
1973 {
1974 struct thread_info *saved_thread;
1975
1976 saved_thread = current_thread;
1977 current_thread = get_lwp_thread (lwp);
1978
1979 if ((wstat == NULL
1980 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1981 && supports_fast_tracepoints ()
1982 && agent_loaded_p ())
1983 {
1984 struct fast_tpoint_collect_status status;
1985
1986 if (debug_threads)
1987 debug_printf ("Checking whether LWP %ld needs to move out of the "
1988 "jump pad.\n",
1989 lwpid_of (current_thread));
1990
1991 fast_tpoint_collect_result r
1992 = linux_fast_tracepoint_collecting (lwp, &status);
1993
1994 if (wstat == NULL
1995 || (WSTOPSIG (*wstat) != SIGILL
1996 && WSTOPSIG (*wstat) != SIGFPE
1997 && WSTOPSIG (*wstat) != SIGSEGV
1998 && WSTOPSIG (*wstat) != SIGBUS))
1999 {
2000 lwp->collecting_fast_tracepoint = r;
2001
2002 if (r != fast_tpoint_collect_result::not_collecting)
2003 {
2004 if (r == fast_tpoint_collect_result::before_insn
2005 && lwp->exit_jump_pad_bkpt == NULL)
2006 {
2007 /* Haven't executed the original instruction yet.
2008 Set breakpoint there, and wait till it's hit,
2009 then single-step until exiting the jump pad. */
2010 lwp->exit_jump_pad_bkpt
2011 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2012 }
2013
2014 if (debug_threads)
2015 debug_printf ("Checking whether LWP %ld needs to move out of "
2016 "the jump pad...it does\n",
2017 lwpid_of (current_thread));
2018 current_thread = saved_thread;
2019
2020 return true;
2021 }
2022 }
2023 else
2024 {
2025 /* If we get a synchronous signal while collecting, *and*
2026 while executing the (relocated) original instruction,
2027 reset the PC to point at the tpoint address, before
2028 reporting to GDB. Otherwise, it's an IPA lib bug: just
2029 report the signal to GDB, and pray for the best. */
2030
2031 lwp->collecting_fast_tracepoint
2032 = fast_tpoint_collect_result::not_collecting;
2033
2034 if (r != fast_tpoint_collect_result::not_collecting
2035 && (status.adjusted_insn_addr <= lwp->stop_pc
2036 && lwp->stop_pc < status.adjusted_insn_addr_end))
2037 {
2038 siginfo_t info;
2039 struct regcache *regcache;
2040
2041 /* The si_addr on a few signals references the address
2042 of the faulting instruction. Adjust that as
2043 well. */
2044 if ((WSTOPSIG (*wstat) == SIGILL
2045 || WSTOPSIG (*wstat) == SIGFPE
2046 || WSTOPSIG (*wstat) == SIGBUS
2047 || WSTOPSIG (*wstat) == SIGSEGV)
2048 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2049 (PTRACE_TYPE_ARG3) 0, &info) == 0
2050 /* Final check just to make sure we don't clobber
2051 the siginfo of non-kernel-sent signals. */
2052 && (uintptr_t) info.si_addr == lwp->stop_pc)
2053 {
2054 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2055 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2056 (PTRACE_TYPE_ARG3) 0, &info);
2057 }
2058
2059 regcache = get_thread_regcache (current_thread, 1);
2060 low_set_pc (regcache, status.tpoint_addr);
2061 lwp->stop_pc = status.tpoint_addr;
2062
2063 /* Cancel any fast tracepoint lock this thread was
2064 holding. */
2065 force_unlock_trace_buffer ();
2066 }
2067
2068 if (lwp->exit_jump_pad_bkpt != NULL)
2069 {
2070 if (debug_threads)
2071 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2072 "stopping all threads momentarily.\n");
2073
2074 stop_all_lwps (1, lwp);
2075
2076 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2077 lwp->exit_jump_pad_bkpt = NULL;
2078
2079 unstop_all_lwps (1, lwp);
2080
2081 gdb_assert (lwp->suspended >= 0);
2082 }
2083 }
2084 }
2085
2086 if (debug_threads)
2087 debug_printf ("Checking whether LWP %ld needs to move out of the "
2088 "jump pad...no\n",
2089 lwpid_of (current_thread));
2090
2091 current_thread = saved_thread;
2092 return false;
2093 }
2094
2095 /* Enqueue one signal in the "signals to report later when out of the
2096 jump pad" list. */
2097
2098 static void
2099 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2100 {
2101 struct thread_info *thread = get_lwp_thread (lwp);
2102
2103 if (debug_threads)
2104 debug_printf ("Deferring signal %d for LWP %ld.\n",
2105 WSTOPSIG (*wstat), lwpid_of (thread));
2106
2107 if (debug_threads)
2108 {
2109 for (const auto &sig : lwp->pending_signals_to_report)
2110 debug_printf (" Already queued %d\n",
2111 sig.signal);
2112
2113 debug_printf (" (no more currently queued signals)\n");
2114 }
2115
2116 /* Don't enqueue non-RT signals if they are already in the deferred
2117 queue. (SIGSTOP being the easiest signal to see ending up here
2118 twice) */
2119 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2120 {
2121 for (const auto &sig : lwp->pending_signals_to_report)
2122 {
2123 if (sig.signal == WSTOPSIG (*wstat))
2124 {
2125 if (debug_threads)
2126 debug_printf ("Not requeuing already queued non-RT signal %d"
2127 " for LWP %ld\n",
2128 sig.signal,
2129 lwpid_of (thread));
2130 return;
2131 }
2132 }
2133 }
2134
2135 lwp->pending_signals_to_report.emplace_back (WSTOPSIG (*wstat));
2136
2137 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2138 &lwp->pending_signals_to_report.back ().info);
2139 }
2140
2141 /* Dequeue one signal from the "signals to report later when out of
2142 the jump pad" list. */
2143
2144 static int
2145 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2146 {
2147 struct thread_info *thread = get_lwp_thread (lwp);
2148
2149 if (!lwp->pending_signals_to_report.empty ())
2150 {
2151 const pending_signal &p_sig = lwp->pending_signals_to_report.front ();
2152
2153 *wstat = W_STOPCODE (p_sig.signal);
2154 if (p_sig.info.si_signo != 0)
2155 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2156 &p_sig.info);
2157
2158 lwp->pending_signals_to_report.pop_front ();
2159
2160 if (debug_threads)
2161 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2162 WSTOPSIG (*wstat), lwpid_of (thread));
2163
2164 if (debug_threads)
2165 {
2166 for (const auto &sig : lwp->pending_signals_to_report)
2167 debug_printf (" Still queued %d\n",
2168 sig.signal);
2169
2170 debug_printf (" (no more queued signals)\n");
2171 }
2172
2173 return 1;
2174 }
2175
2176 return 0;
2177 }
2178
2179 bool
2180 linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
2181 {
2182 struct thread_info *saved_thread = current_thread;
2183 current_thread = get_lwp_thread (child);
2184
2185 if (low_stopped_by_watchpoint ())
2186 {
2187 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2188 child->stopped_data_address = low_stopped_data_address ();
2189 }
2190
2191 current_thread = saved_thread;
2192
2193 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2194 }
2195
2196 bool
2197 linux_process_target::low_stopped_by_watchpoint ()
2198 {
2199 return false;
2200 }
2201
2202 CORE_ADDR
2203 linux_process_target::low_stopped_data_address ()
2204 {
2205 return 0;
2206 }
2207
2208 /* Return the ptrace options that we want to try to enable. */
2209
2210 static int
2211 linux_low_ptrace_options (int attached)
2212 {
2213 client_state &cs = get_client_state ();
2214 int options = 0;
2215
2216 if (!attached)
2217 options |= PTRACE_O_EXITKILL;
2218
2219 if (cs.report_fork_events)
2220 options |= PTRACE_O_TRACEFORK;
2221
2222 if (cs.report_vfork_events)
2223 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2224
2225 if (cs.report_exec_events)
2226 options |= PTRACE_O_TRACEEXEC;
2227
2228 options |= PTRACE_O_TRACESYSGOOD;
2229
2230 return options;
2231 }
2232
2233 void
2234 linux_process_target::filter_event (int lwpid, int wstat)
2235 {
2236 client_state &cs = get_client_state ();
2237 struct lwp_info *child;
2238 struct thread_info *thread;
2239 int have_stop_pc = 0;
2240
2241 child = find_lwp_pid (ptid_t (lwpid));
2242
2243 /* Check for stop events reported by a process we didn't already
2244 know about - anything not already in our LWP list.
2245
2246 If we're expecting to receive stopped processes after
2247 fork, vfork, and clone events, then we'll just add the
2248 new one to our list and go back to waiting for the event
2249 to be reported - the stopped process might be returned
2250 from waitpid before or after the event is.
2251
2252 But note the case of a non-leader thread exec'ing after the
2253 leader having exited, and gone from our lists (because
2254 check_zombie_leaders deleted it). The non-leader thread
2255 changes its tid to the tgid. */
2256
2257 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2258 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2259 {
2260 ptid_t child_ptid;
2261
2262 /* A multi-thread exec after we had seen the leader exiting. */
2263 if (debug_threads)
2264 {
2265 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2266 "after exec.\n", lwpid);
2267 }
2268
2269 child_ptid = ptid_t (lwpid, lwpid);
2270 child = add_lwp (child_ptid);
2271 child->stopped = 1;
2272 current_thread = child->thread;
2273 }
2274
2275 /* If we didn't find a process, one of two things presumably happened:
2276 - A process we started and then detached from has exited. Ignore it.
2277 - A process we are controlling has forked and the new child's stop
2278 was reported to us by the kernel. Save its PID. */
2279 if (child == NULL && WIFSTOPPED (wstat))
2280 {
2281 add_to_pid_list (&stopped_pids, lwpid, wstat);
2282 return;
2283 }
2284 else if (child == NULL)
2285 return;
2286
2287 thread = get_lwp_thread (child);
2288
2289 child->stopped = 1;
2290
2291 child->last_status = wstat;
2292
2293 /* Check if the thread has exited. */
2294 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2295 {
2296 if (debug_threads)
2297 debug_printf ("LLFE: %d exited.\n", lwpid);
2298
2299 if (finish_step_over (child))
2300 {
2301 /* Unsuspend all other LWPs, and set them back running again. */
2302 unsuspend_all_lwps (child);
2303 }
2304
2305 /* If there is at least one more LWP, then the exit signal was
2306 not the end of the debugged application and should be
2307 ignored, unless GDB wants to hear about thread exits. */
2308 if (cs.report_thread_events
2309 || last_thread_of_process_p (pid_of (thread)))
2310 {
2311 /* Since events are serialized to GDB core, and we can't
2312 report this one right now. Leave the status pending for
2313 the next time we're able to report it. */
2314 mark_lwp_dead (child, wstat);
2315 return;
2316 }
2317 else
2318 {
2319 delete_lwp (child);
2320 return;
2321 }
2322 }
2323
2324 gdb_assert (WIFSTOPPED (wstat));
2325
2326 if (WIFSTOPPED (wstat))
2327 {
2328 struct process_info *proc;
2329
2330 /* Architecture-specific setup after inferior is running. */
2331 proc = find_process_pid (pid_of (thread));
2332 if (proc->tdesc == NULL)
2333 {
2334 if (proc->attached)
2335 {
2336 /* This needs to happen after we have attached to the
2337 inferior and it is stopped for the first time, but
2338 before we access any inferior registers. */
2339 arch_setup_thread (thread);
2340 }
2341 else
2342 {
2343 /* The process is started, but GDBserver will do
2344 architecture-specific setup after the program stops at
2345 the first instruction. */
2346 child->status_pending_p = 1;
2347 child->status_pending = wstat;
2348 return;
2349 }
2350 }
2351 }
2352
2353 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2354 {
2355 struct process_info *proc = find_process_pid (pid_of (thread));
2356 int options = linux_low_ptrace_options (proc->attached);
2357
2358 linux_enable_event_reporting (lwpid, options);
2359 child->must_set_ptrace_flags = 0;
2360 }
2361
2362 /* Always update syscall_state, even if it will be filtered later. */
2363 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2364 {
2365 child->syscall_state
2366 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2367 ? TARGET_WAITKIND_SYSCALL_RETURN
2368 : TARGET_WAITKIND_SYSCALL_ENTRY);
2369 }
2370 else
2371 {
2372 /* Almost all other ptrace-stops are known to be outside of system
2373 calls, with further exceptions in handle_extended_wait. */
2374 child->syscall_state = TARGET_WAITKIND_IGNORE;
2375 }
2376
2377 /* Be careful to not overwrite stop_pc until save_stop_reason is
2378 called. */
2379 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2380 && linux_is_extended_waitstatus (wstat))
2381 {
2382 child->stop_pc = get_pc (child);
2383 if (handle_extended_wait (&child, wstat))
2384 {
2385 /* The event has been handled, so just return without
2386 reporting it. */
2387 return;
2388 }
2389 }
2390
2391 if (linux_wstatus_maybe_breakpoint (wstat))
2392 {
2393 if (save_stop_reason (child))
2394 have_stop_pc = 1;
2395 }
2396
2397 if (!have_stop_pc)
2398 child->stop_pc = get_pc (child);
2399
2400 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2401 && child->stop_expected)
2402 {
2403 if (debug_threads)
2404 debug_printf ("Expected stop.\n");
2405 child->stop_expected = 0;
2406
2407 if (thread->last_resume_kind == resume_stop)
2408 {
2409 /* We want to report the stop to the core. Treat the
2410 SIGSTOP as a normal event. */
2411 if (debug_threads)
2412 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2413 target_pid_to_str (ptid_of (thread)).c_str ());
2414 }
2415 else if (stopping_threads != NOT_STOPPING_THREADS)
2416 {
2417 /* Stopping threads. We don't want this SIGSTOP to end up
2418 pending. */
2419 if (debug_threads)
2420 debug_printf ("LLW: SIGSTOP caught for %s "
2421 "while stopping threads.\n",
2422 target_pid_to_str (ptid_of (thread)).c_str ());
2423 return;
2424 }
2425 else
2426 {
2427 /* This is a delayed SIGSTOP. Filter out the event. */
2428 if (debug_threads)
2429 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2430 child->stepping ? "step" : "continue",
2431 target_pid_to_str (ptid_of (thread)).c_str ());
2432
2433 resume_one_lwp (child, child->stepping, 0, NULL);
2434 return;
2435 }
2436 }
2437
2438 child->status_pending_p = 1;
2439 child->status_pending = wstat;
2440 return;
2441 }
2442
2443 bool
2444 linux_process_target::maybe_hw_step (thread_info *thread)
2445 {
2446 if (supports_hardware_single_step ())
2447 return true;
2448 else
2449 {
2450 /* GDBserver must insert single-step breakpoint for software
2451 single step. */
2452 gdb_assert (has_single_step_breakpoints (thread));
2453 return false;
2454 }
2455 }
2456
2457 void
2458 linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
2459 {
2460 struct lwp_info *lp = get_thread_lwp (thread);
2461
2462 if (lp->stopped
2463 && !lp->suspended
2464 && !lp->status_pending_p
2465 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
2466 {
2467 int step = 0;
2468
2469 if (thread->last_resume_kind == resume_step)
2470 step = maybe_hw_step (thread);
2471
2472 if (debug_threads)
2473 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2474 target_pid_to_str (ptid_of (thread)).c_str (),
2475 paddress (lp->stop_pc),
2476 step);
2477
2478 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2479 }
2480 }
2481
2482 int
2483 linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2484 ptid_t filter_ptid,
2485 int *wstatp, int options)
2486 {
2487 struct thread_info *event_thread;
2488 struct lwp_info *event_child, *requested_child;
2489 sigset_t block_mask, prev_mask;
2490
2491 retry:
2492 /* N.B. event_thread points to the thread_info struct that contains
2493 event_child. Keep them in sync. */
2494 event_thread = NULL;
2495 event_child = NULL;
2496 requested_child = NULL;
2497
2498 /* Check for a lwp with a pending status. */
2499
2500 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2501 {
2502 event_thread = find_thread_in_random ([&] (thread_info *thread)
2503 {
2504 return status_pending_p_callback (thread, filter_ptid);
2505 });
2506
2507 if (event_thread != NULL)
2508 event_child = get_thread_lwp (event_thread);
2509 if (debug_threads && event_thread)
2510 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2511 }
2512 else if (filter_ptid != null_ptid)
2513 {
2514 requested_child = find_lwp_pid (filter_ptid);
2515
2516 if (stopping_threads == NOT_STOPPING_THREADS
2517 && requested_child->status_pending_p
2518 && (requested_child->collecting_fast_tracepoint
2519 != fast_tpoint_collect_result::not_collecting))
2520 {
2521 enqueue_one_deferred_signal (requested_child,
2522 &requested_child->status_pending);
2523 requested_child->status_pending_p = 0;
2524 requested_child->status_pending = 0;
2525 resume_one_lwp (requested_child, 0, 0, NULL);
2526 }
2527
2528 if (requested_child->suspended
2529 && requested_child->status_pending_p)
2530 {
2531 internal_error (__FILE__, __LINE__,
2532 "requesting an event out of a"
2533 " suspended child?");
2534 }
2535
2536 if (requested_child->status_pending_p)
2537 {
2538 event_child = requested_child;
2539 event_thread = get_lwp_thread (event_child);
2540 }
2541 }
2542
2543 if (event_child != NULL)
2544 {
2545 if (debug_threads)
2546 debug_printf ("Got an event from pending child %ld (%04x)\n",
2547 lwpid_of (event_thread), event_child->status_pending);
2548 *wstatp = event_child->status_pending;
2549 event_child->status_pending_p = 0;
2550 event_child->status_pending = 0;
2551 current_thread = event_thread;
2552 return lwpid_of (event_thread);
2553 }
2554
2555 /* But if we don't find a pending event, we'll have to wait.
2556
2557 We only enter this loop if no process has a pending wait status.
2558 Thus any action taken in response to a wait status inside this
2559 loop is responding as soon as we detect the status, not after any
2560 pending events. */
2561
2562 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2563 all signals while here. */
2564 sigfillset (&block_mask);
2565 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
2566
2567 /* Always pull all events out of the kernel. We'll randomly select
2568 an event LWP out of all that have events, to prevent
2569 starvation. */
2570 while (event_child == NULL)
2571 {
2572 pid_t ret = 0;
2573
2574 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2575 quirks:
2576
2577 - If the thread group leader exits while other threads in the
2578 thread group still exist, waitpid(TGID, ...) hangs. That
2579 waitpid won't return an exit status until the other threads
2580 in the group are reaped.
2581
2582 - When a non-leader thread execs, that thread just vanishes
2583 without reporting an exit (so we'd hang if we waited for it
2584 explicitly in that case). The exec event is reported to
2585 the TGID pid. */
2586 errno = 0;
2587 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2588
2589 if (debug_threads)
2590 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2591 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
2592
2593 if (ret > 0)
2594 {
2595 if (debug_threads)
2596 {
2597 debug_printf ("LLW: waitpid %ld received %s\n",
2598 (long) ret, status_to_str (*wstatp).c_str ());
2599 }
2600
2601 /* Filter all events. IOW, leave all events pending. We'll
2602 randomly select an event LWP out of all that have events
2603 below. */
2604 filter_event (ret, *wstatp);
2605 /* Retry until nothing comes out of waitpid. A single
2606 SIGCHLD can indicate more than one child stopped. */
2607 continue;
2608 }
2609
2610 /* Now that we've pulled all events out of the kernel, resume
2611 LWPs that don't have an interesting event to report. */
2612 if (stopping_threads == NOT_STOPPING_THREADS)
2613 for_each_thread ([this] (thread_info *thread)
2614 {
2615 resume_stopped_resumed_lwps (thread);
2616 });
2617
2618 /* ... and find an LWP with a status to report to the core, if
2619 any. */
2620 event_thread = find_thread_in_random ([&] (thread_info *thread)
2621 {
2622 return status_pending_p_callback (thread, filter_ptid);
2623 });
2624
2625 if (event_thread != NULL)
2626 {
2627 event_child = get_thread_lwp (event_thread);
2628 *wstatp = event_child->status_pending;
2629 event_child->status_pending_p = 0;
2630 event_child->status_pending = 0;
2631 break;
2632 }
2633
2634 /* Check for zombie thread group leaders. Those can't be reaped
2635 until all other threads in the thread group are. */
2636 check_zombie_leaders ();
2637
2638 auto not_stopped = [&] (thread_info *thread)
2639 {
2640 return not_stopped_callback (thread, wait_ptid);
2641 };
2642
2643 /* If there are no resumed children left in the set of LWPs we
2644 want to wait for, bail. We can't just block in
2645 waitpid/sigsuspend, because lwps might have been left stopped
2646 in trace-stop state, and we'd be stuck forever waiting for
2647 their status to change (which would only happen if we resumed
2648 them). Even if WNOHANG is set, this return code is preferred
2649 over 0 (below), as it is more detailed. */
2650 if (find_thread (not_stopped) == NULL)
2651 {
2652 if (debug_threads)
2653 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2654 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2655 return -1;
2656 }
2657
2658 /* No interesting event to report to the caller. */
2659 if ((options & WNOHANG))
2660 {
2661 if (debug_threads)
2662 debug_printf ("WNOHANG set, no event found\n");
2663
2664 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2665 return 0;
2666 }
2667
2668 /* Block until we get an event reported with SIGCHLD. */
2669 if (debug_threads)
2670 debug_printf ("sigsuspend'ing\n");
2671
2672 sigsuspend (&prev_mask);
2673 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2674 goto retry;
2675 }
2676
2677 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2678
2679 current_thread = event_thread;
2680
2681 return lwpid_of (event_thread);
2682 }
2683
2684 int
2685 linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
2686 {
2687 return wait_for_event_filtered (ptid, ptid, wstatp, options);
2688 }
2689
2690 /* Select one LWP out of those that have events pending. */
2691
2692 static void
2693 select_event_lwp (struct lwp_info **orig_lp)
2694 {
2695 struct thread_info *event_thread = NULL;
2696
2697 /* In all-stop, give preference to the LWP that is being
2698 single-stepped. There will be at most one, and it's the LWP that
2699 the core is most interested in. If we didn't do this, then we'd
2700 have to handle pending step SIGTRAPs somehow in case the core
2701 later continues the previously-stepped thread, otherwise we'd
2702 report the pending SIGTRAP, and the core, not having stepped the
2703 thread, wouldn't understand what the trap was for, and therefore
2704 would report it to the user as a random signal. */
2705 if (!non_stop)
2706 {
2707 event_thread = find_thread ([] (thread_info *thread)
2708 {
2709 lwp_info *lp = get_thread_lwp (thread);
2710
2711 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2712 && thread->last_resume_kind == resume_step
2713 && lp->status_pending_p);
2714 });
2715
2716 if (event_thread != NULL)
2717 {
2718 if (debug_threads)
2719 debug_printf ("SEL: Select single-step %s\n",
2720 target_pid_to_str (ptid_of (event_thread)).c_str ());
2721 }
2722 }
2723 if (event_thread == NULL)
2724 {
2725 /* No single-stepping LWP. Select one at random, out of those
2726 which have had events. */
2727
2728 event_thread = find_thread_in_random ([&] (thread_info *thread)
2729 {
2730 lwp_info *lp = get_thread_lwp (thread);
2731
2732 /* Only resumed LWPs that have an event pending. */
2733 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2734 && lp->status_pending_p);
2735 });
2736 }
2737
2738 if (event_thread != NULL)
2739 {
2740 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2741
2742 /* Switch the event LWP. */
2743 *orig_lp = event_lp;
2744 }
2745 }
2746
2747 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2748 NULL. */
2749
2750 static void
2751 unsuspend_all_lwps (struct lwp_info *except)
2752 {
2753 for_each_thread ([&] (thread_info *thread)
2754 {
2755 lwp_info *lwp = get_thread_lwp (thread);
2756
2757 if (lwp != except)
2758 lwp_suspended_decr (lwp);
2759 });
2760 }
2761
2762 static bool lwp_running (thread_info *thread);
2763
2764 /* Stabilize threads (move out of jump pads).
2765
2766 If a thread is midway collecting a fast tracepoint, we need to
2767 finish the collection and move it out of the jump pad before
2768 reporting the signal.
2769
2770 This avoids recursion while collecting (when a signal arrives
2771 midway, and the signal handler itself collects), which would trash
2772 the trace buffer. In case the user set a breakpoint in a signal
2773 handler, this avoids the backtrace showing the jump pad, etc..
2774 Most importantly, there are certain things we can't do safely if
2775 threads are stopped in a jump pad (or in its callee's). For
2776 example:
2777
2778 - starting a new trace run. A thread still collecting the
2779 previous run, could trash the trace buffer when resumed. The trace
2780 buffer control structures would have been reset but the thread had
2781 no way to tell. The thread could even midway memcpy'ing to the
2782 buffer, which would mean that when resumed, it would clobber the
2783 trace buffer that had been set for a new run.
2784
2785 - we can't rewrite/reuse the jump pads for new tracepoints
2786 safely. Say you do tstart while a thread is stopped midway while
2787 collecting. When the thread is later resumed, it finishes the
2788 collection, and returns to the jump pad, to execute the original
2789 instruction that was under the tracepoint jump at the time the
2790 older run had been started. If the jump pad had been rewritten
2791 since for something else in the new run, the thread would now
2792 execute the wrong / random instructions. */
2793
2794 void
2795 linux_process_target::stabilize_threads ()
2796 {
2797 thread_info *thread_stuck = find_thread ([this] (thread_info *thread)
2798 {
2799 return stuck_in_jump_pad (thread);
2800 });
2801
2802 if (thread_stuck != NULL)
2803 {
2804 if (debug_threads)
2805 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2806 lwpid_of (thread_stuck));
2807 return;
2808 }
2809
2810 thread_info *saved_thread = current_thread;
2811
2812 stabilizing_threads = 1;
2813
2814 /* Kick 'em all. */
2815 for_each_thread ([this] (thread_info *thread)
2816 {
2817 move_out_of_jump_pad (thread);
2818 });
2819
2820 /* Loop until all are stopped out of the jump pads. */
2821 while (find_thread (lwp_running) != NULL)
2822 {
2823 struct target_waitstatus ourstatus;
2824 struct lwp_info *lwp;
2825 int wstat;
2826
2827 /* Note that we go through the full wait even loop. While
2828 moving threads out of jump pad, we need to be able to step
2829 over internal breakpoints and such. */
2830 wait_1 (minus_one_ptid, &ourstatus, 0);
2831
2832 if (ourstatus.kind () == TARGET_WAITKIND_STOPPED)
2833 {
2834 lwp = get_thread_lwp (current_thread);
2835
2836 /* Lock it. */
2837 lwp_suspended_inc (lwp);
2838
2839 if (ourstatus.sig () != GDB_SIGNAL_0
2840 || current_thread->last_resume_kind == resume_stop)
2841 {
2842 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.sig ()));
2843 enqueue_one_deferred_signal (lwp, &wstat);
2844 }
2845 }
2846 }
2847
2848 unsuspend_all_lwps (NULL);
2849
2850 stabilizing_threads = 0;
2851
2852 current_thread = saved_thread;
2853
2854 if (debug_threads)
2855 {
2856 thread_stuck = find_thread ([this] (thread_info *thread)
2857 {
2858 return stuck_in_jump_pad (thread);
2859 });
2860
2861 if (thread_stuck != NULL)
2862 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2863 lwpid_of (thread_stuck));
2864 }
2865 }
2866
2867 /* Convenience function that is called when the kernel reports an
2868 event that is not passed out to GDB. */
2869
2870 static ptid_t
2871 ignore_event (struct target_waitstatus *ourstatus)
2872 {
2873 /* If we got an event, there may still be others, as a single
2874 SIGCHLD can indicate more than one child stopped. This forces
2875 another target_wait call. */
2876 async_file_mark ();
2877
2878 ourstatus->set_ignore ();
2879 return null_ptid;
2880 }
2881
2882 ptid_t
2883 linux_process_target::filter_exit_event (lwp_info *event_child,
2884 target_waitstatus *ourstatus)
2885 {
2886 client_state &cs = get_client_state ();
2887 struct thread_info *thread = get_lwp_thread (event_child);
2888 ptid_t ptid = ptid_of (thread);
2889
2890 if (!last_thread_of_process_p (pid_of (thread)))
2891 {
2892 if (cs.report_thread_events)
2893 ourstatus->set_thread_exited (0);
2894 else
2895 ourstatus->set_ignore ();
2896
2897 delete_lwp (event_child);
2898 }
2899 return ptid;
2900 }
2901
2902 /* Returns 1 if GDB is interested in any event_child syscalls. */
2903
2904 static int
2905 gdb_catching_syscalls_p (struct lwp_info *event_child)
2906 {
2907 struct thread_info *thread = get_lwp_thread (event_child);
2908 struct process_info *proc = get_thread_process (thread);
2909
2910 return !proc->syscalls_to_catch.empty ();
2911 }
2912
2913 bool
2914 linux_process_target::gdb_catch_this_syscall (lwp_info *event_child)
2915 {
2916 int sysno;
2917 struct thread_info *thread = get_lwp_thread (event_child);
2918 struct process_info *proc = get_thread_process (thread);
2919
2920 if (proc->syscalls_to_catch.empty ())
2921 return false;
2922
2923 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
2924 return true;
2925
2926 get_syscall_trapinfo (event_child, &sysno);
2927
2928 for (int iter : proc->syscalls_to_catch)
2929 if (iter == sysno)
2930 return true;
2931
2932 return false;
2933 }
2934
2935 ptid_t
2936 linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
2937 target_wait_flags target_options)
2938 {
2939 client_state &cs = get_client_state ();
2940 int w;
2941 struct lwp_info *event_child;
2942 int options;
2943 int pid;
2944 int step_over_finished;
2945 int bp_explains_trap;
2946 int maybe_internal_trap;
2947 int report_to_gdb;
2948 int trace_event;
2949 int in_step_range;
2950 int any_resumed;
2951
2952 if (debug_threads)
2953 {
2954 debug_enter ();
2955 debug_printf ("wait_1: [%s]\n", target_pid_to_str (ptid).c_str ());
2956 }
2957
2958 /* Translate generic target options into linux options. */
2959 options = __WALL;
2960 if (target_options & TARGET_WNOHANG)
2961 options |= WNOHANG;
2962
2963 bp_explains_trap = 0;
2964 trace_event = 0;
2965 in_step_range = 0;
2966 ourstatus->set_ignore ();
2967
2968 auto status_pending_p_any = [&] (thread_info *thread)
2969 {
2970 return status_pending_p_callback (thread, minus_one_ptid);
2971 };
2972
2973 auto not_stopped = [&] (thread_info *thread)
2974 {
2975 return not_stopped_callback (thread, minus_one_ptid);
2976 };
2977
2978 /* Find a resumed LWP, if any. */
2979 if (find_thread (status_pending_p_any) != NULL)
2980 any_resumed = 1;
2981 else if (find_thread (not_stopped) != NULL)
2982 any_resumed = 1;
2983 else
2984 any_resumed = 0;
2985
2986 if (step_over_bkpt == null_ptid)
2987 pid = wait_for_event (ptid, &w, options);
2988 else
2989 {
2990 if (debug_threads)
2991 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2992 target_pid_to_str (step_over_bkpt).c_str ());
2993 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2994 }
2995
2996 if (pid == 0 || (pid == -1 && !any_resumed))
2997 {
2998 gdb_assert (target_options & TARGET_WNOHANG);
2999
3000 if (debug_threads)
3001 {
3002 debug_printf ("wait_1 ret = null_ptid, "
3003 "TARGET_WAITKIND_IGNORE\n");
3004 debug_exit ();
3005 }
3006
3007 ourstatus->set_ignore ();
3008 return null_ptid;
3009 }
3010 else if (pid == -1)
3011 {
3012 if (debug_threads)
3013 {
3014 debug_printf ("wait_1 ret = null_ptid, "
3015 "TARGET_WAITKIND_NO_RESUMED\n");
3016 debug_exit ();
3017 }
3018
3019 ourstatus->set_no_resumed ();
3020 return null_ptid;
3021 }
3022
3023 event_child = get_thread_lwp (current_thread);
3024
3025 /* wait_for_event only returns an exit status for the last
3026 child of a process. Report it. */
3027 if (WIFEXITED (w) || WIFSIGNALED (w))
3028 {
3029 if (WIFEXITED (w))
3030 {
3031 ourstatus->set_exited (WEXITSTATUS (w));
3032
3033 if (debug_threads)
3034 {
3035 debug_printf ("wait_1 ret = %s, exited with "
3036 "retcode %d\n",
3037 target_pid_to_str (ptid_of (current_thread)).c_str (),
3038 WEXITSTATUS (w));
3039 debug_exit ();
3040 }
3041 }
3042 else
3043 {
3044 ourstatus->set_signalled (gdb_signal_from_host (WTERMSIG (w)));
3045
3046 if (debug_threads)
3047 {
3048 debug_printf ("wait_1 ret = %s, terminated with "
3049 "signal %d\n",
3050 target_pid_to_str (ptid_of (current_thread)).c_str (),
3051 WTERMSIG (w));
3052 debug_exit ();
3053 }
3054 }
3055
3056 if (ourstatus->kind () == TARGET_WAITKIND_EXITED)
3057 return filter_exit_event (event_child, ourstatus);
3058
3059 return ptid_of (current_thread);
3060 }
3061
3062 /* If step-over executes a breakpoint instruction, in the case of a
3063 hardware single step it means a gdb/gdbserver breakpoint had been
3064 planted on top of a permanent breakpoint, in the case of a software
3065 single step it may just mean that gdbserver hit the reinsert breakpoint.
3066 The PC has been adjusted by save_stop_reason to point at
3067 the breakpoint address.
3068 So in the case of the hardware single step advance the PC manually
3069 past the breakpoint and in the case of software single step advance only
3070 if it's not the single_step_breakpoint we are hitting.
3071 This avoids that a program would keep trapping a permanent breakpoint
3072 forever. */
3073 if (step_over_bkpt != null_ptid
3074 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3075 && (event_child->stepping
3076 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3077 {
3078 int increment_pc = 0;
3079 int breakpoint_kind = 0;
3080 CORE_ADDR stop_pc = event_child->stop_pc;
3081
3082 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3083 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3084
3085 if (debug_threads)
3086 {
3087 debug_printf ("step-over for %s executed software breakpoint\n",
3088 target_pid_to_str (ptid_of (current_thread)).c_str ());
3089 }
3090
3091 if (increment_pc != 0)
3092 {
3093 struct regcache *regcache
3094 = get_thread_regcache (current_thread, 1);
3095
3096 event_child->stop_pc += increment_pc;
3097 low_set_pc (regcache, event_child->stop_pc);
3098
3099 if (!low_breakpoint_at (event_child->stop_pc))
3100 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3101 }
3102 }
3103
3104 /* If this event was not handled before, and is not a SIGTRAP, we
3105 report it. SIGILL and SIGSEGV are also treated as traps in case
3106 a breakpoint is inserted at the current PC. If this target does
3107 not support internal breakpoints at all, we also report the
3108 SIGTRAP without further processing; it's of no concern to us. */
3109 maybe_internal_trap
3110 = (low_supports_breakpoints ()
3111 && (WSTOPSIG (w) == SIGTRAP
3112 || ((WSTOPSIG (w) == SIGILL
3113 || WSTOPSIG (w) == SIGSEGV)
3114 && low_breakpoint_at (event_child->stop_pc))));
3115
3116 if (maybe_internal_trap)
3117 {
3118 /* Handle anything that requires bookkeeping before deciding to
3119 report the event or continue waiting. */
3120
3121 /* First check if we can explain the SIGTRAP with an internal
3122 breakpoint, or if we should possibly report the event to GDB.
3123 Do this before anything that may remove or insert a
3124 breakpoint. */
3125 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3126
3127 /* We have a SIGTRAP, possibly a step-over dance has just
3128 finished. If so, tweak the state machine accordingly,
3129 reinsert breakpoints and delete any single-step
3130 breakpoints. */
3131 step_over_finished = finish_step_over (event_child);
3132
3133 /* Now invoke the callbacks of any internal breakpoints there. */
3134 check_breakpoints (event_child->stop_pc);
3135
3136 /* Handle tracepoint data collecting. This may overflow the
3137 trace buffer, and cause a tracing stop, removing
3138 breakpoints. */
3139 trace_event = handle_tracepoints (event_child);
3140
3141 if (bp_explains_trap)
3142 {
3143 if (debug_threads)
3144 debug_printf ("Hit a gdbserver breakpoint.\n");
3145 }
3146 }
3147 else
3148 {
3149 /* We have some other signal, possibly a step-over dance was in
3150 progress, and it should be cancelled too. */
3151 step_over_finished = finish_step_over (event_child);
3152 }
3153
3154 /* We have all the data we need. Either report the event to GDB, or
3155 resume threads and keep waiting for more. */
3156
3157 /* If we're collecting a fast tracepoint, finish the collection and
3158 move out of the jump pad before delivering a signal. See
3159 linux_stabilize_threads. */
3160
3161 if (WIFSTOPPED (w)
3162 && WSTOPSIG (w) != SIGTRAP
3163 && supports_fast_tracepoints ()
3164 && agent_loaded_p ())
3165 {
3166 if (debug_threads)
3167 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3168 "to defer or adjust it.\n",
3169 WSTOPSIG (w), lwpid_of (current_thread));
3170
3171 /* Allow debugging the jump pad itself. */
3172 if (current_thread->last_resume_kind != resume_step
3173 && maybe_move_out_of_jump_pad (event_child, &w))
3174 {
3175 enqueue_one_deferred_signal (event_child, &w);
3176
3177 if (debug_threads)
3178 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3179 WSTOPSIG (w), lwpid_of (current_thread));
3180
3181 resume_one_lwp (event_child, 0, 0, NULL);
3182
3183 if (debug_threads)
3184 debug_exit ();
3185 return ignore_event (ourstatus);
3186 }
3187 }
3188
3189 if (event_child->collecting_fast_tracepoint
3190 != fast_tpoint_collect_result::not_collecting)
3191 {
3192 if (debug_threads)
3193 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3194 "Check if we're already there.\n",
3195 lwpid_of (current_thread),
3196 (int) event_child->collecting_fast_tracepoint);
3197
3198 trace_event = 1;
3199
3200 event_child->collecting_fast_tracepoint
3201 = linux_fast_tracepoint_collecting (event_child, NULL);
3202
3203 if (event_child->collecting_fast_tracepoint
3204 != fast_tpoint_collect_result::before_insn)
3205 {
3206 /* No longer need this breakpoint. */
3207 if (event_child->exit_jump_pad_bkpt != NULL)
3208 {
3209 if (debug_threads)
3210 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3211 "stopping all threads momentarily.\n");
3212
3213 /* Other running threads could hit this breakpoint.
3214 We don't handle moribund locations like GDB does,
3215 instead we always pause all threads when removing
3216 breakpoints, so that any step-over or
3217 decr_pc_after_break adjustment is always taken
3218 care of while the breakpoint is still
3219 inserted. */
3220 stop_all_lwps (1, event_child);
3221
3222 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3223 event_child->exit_jump_pad_bkpt = NULL;
3224
3225 unstop_all_lwps (1, event_child);
3226
3227 gdb_assert (event_child->suspended >= 0);
3228 }
3229 }
3230
3231 if (event_child->collecting_fast_tracepoint
3232 == fast_tpoint_collect_result::not_collecting)
3233 {
3234 if (debug_threads)
3235 debug_printf ("fast tracepoint finished "
3236 "collecting successfully.\n");
3237
3238 /* We may have a deferred signal to report. */
3239 if (dequeue_one_deferred_signal (event_child, &w))
3240 {
3241 if (debug_threads)
3242 debug_printf ("dequeued one signal.\n");
3243 }
3244 else
3245 {
3246 if (debug_threads)
3247 debug_printf ("no deferred signals.\n");
3248
3249 if (stabilizing_threads)
3250 {
3251 ourstatus->set_stopped (GDB_SIGNAL_0);
3252
3253 if (debug_threads)
3254 {
3255 debug_printf ("wait_1 ret = %s, stopped "
3256 "while stabilizing threads\n",
3257 target_pid_to_str
3258 (ptid_of (current_thread)).c_str ());
3259 debug_exit ();
3260 }
3261
3262 return ptid_of (current_thread);
3263 }
3264 }
3265 }
3266 }
3267
3268 /* Check whether GDB would be interested in this event. */
3269
3270 /* Check if GDB is interested in this syscall. */
3271 if (WIFSTOPPED (w)
3272 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3273 && !gdb_catch_this_syscall (event_child))
3274 {
3275 if (debug_threads)
3276 {
3277 debug_printf ("Ignored syscall for LWP %ld.\n",
3278 lwpid_of (current_thread));
3279 }
3280
3281 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
3282
3283 if (debug_threads)
3284 debug_exit ();
3285 return ignore_event (ourstatus);
3286 }
3287
3288 /* If GDB is not interested in this signal, don't stop other
3289 threads, and don't report it to GDB. Just resume the inferior
3290 right away. We do this for threading-related signals as well as
3291 any that GDB specifically requested we ignore. But never ignore
3292 SIGSTOP if we sent it ourselves, and do not ignore signals when
3293 stepping - they may require special handling to skip the signal
3294 handler. Also never ignore signals that could be caused by a
3295 breakpoint. */
3296 if (WIFSTOPPED (w)
3297 && current_thread->last_resume_kind != resume_step
3298 && (
3299 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3300 (current_process ()->priv->thread_db != NULL
3301 && (WSTOPSIG (w) == __SIGRTMIN
3302 || WSTOPSIG (w) == __SIGRTMIN + 1))
3303 ||
3304 #endif
3305 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3306 && !(WSTOPSIG (w) == SIGSTOP
3307 && current_thread->last_resume_kind == resume_stop)
3308 && !linux_wstatus_maybe_breakpoint (w))))
3309 {
3310 siginfo_t info, *info_p;
3311
3312 if (debug_threads)
3313 debug_printf ("Ignored signal %d for LWP %ld.\n",
3314 WSTOPSIG (w), lwpid_of (current_thread));
3315
3316 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3317 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3318 info_p = &info;
3319 else
3320 info_p = NULL;
3321
3322 if (step_over_finished)
3323 {
3324 /* We cancelled this thread's step-over above. We still
3325 need to unsuspend all other LWPs, and set them back
3326 running again while the signal handler runs. */
3327 unsuspend_all_lwps (event_child);
3328
3329 /* Enqueue the pending signal info so that proceed_all_lwps
3330 doesn't lose it. */
3331 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3332
3333 proceed_all_lwps ();
3334 }
3335 else
3336 {
3337 resume_one_lwp (event_child, event_child->stepping,
3338 WSTOPSIG (w), info_p);
3339 }
3340
3341 if (debug_threads)
3342 debug_exit ();
3343
3344 return ignore_event (ourstatus);
3345 }
3346
3347 /* Note that all addresses are always "out of the step range" when
3348 there's no range to begin with. */
3349 in_step_range = lwp_in_step_range (event_child);
3350
3351 /* If GDB wanted this thread to single step, and the thread is out
3352 of the step range, we always want to report the SIGTRAP, and let
3353 GDB handle it. Watchpoints should always be reported. So should
3354 signals we can't explain. A SIGTRAP we can't explain could be a
3355 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3356 do, we're be able to handle GDB breakpoints on top of internal
3357 breakpoints, by handling the internal breakpoint and still
3358 reporting the event to GDB. If we don't, we're out of luck, GDB
3359 won't see the breakpoint hit. If we see a single-step event but
3360 the thread should be continuing, don't pass the trap to gdb.
3361 That indicates that we had previously finished a single-step but
3362 left the single-step pending -- see
3363 complete_ongoing_step_over. */
3364 report_to_gdb = (!maybe_internal_trap
3365 || (current_thread->last_resume_kind == resume_step
3366 && !in_step_range)
3367 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3368 || (!in_step_range
3369 && !bp_explains_trap
3370 && !trace_event
3371 && !step_over_finished
3372 && !(current_thread->last_resume_kind == resume_continue
3373 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3374 || (gdb_breakpoint_here (event_child->stop_pc)
3375 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3376 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3377 || event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE);
3378
3379 run_breakpoint_commands (event_child->stop_pc);
3380
3381 /* We found no reason GDB would want us to stop. We either hit one
3382 of our own breakpoints, or finished an internal step GDB
3383 shouldn't know about. */
3384 if (!report_to_gdb)
3385 {
3386 if (debug_threads)
3387 {
3388 if (bp_explains_trap)
3389 debug_printf ("Hit a gdbserver breakpoint.\n");
3390 if (step_over_finished)
3391 debug_printf ("Step-over finished.\n");
3392 if (trace_event)
3393 debug_printf ("Tracepoint event.\n");
3394 if (lwp_in_step_range (event_child))
3395 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3396 paddress (event_child->stop_pc),
3397 paddress (event_child->step_range_start),
3398 paddress (event_child->step_range_end));
3399 }
3400
3401 /* We're not reporting this breakpoint to GDB, so apply the
3402 decr_pc_after_break adjustment to the inferior's regcache
3403 ourselves. */
3404
3405 if (low_supports_breakpoints ())
3406 {
3407 struct regcache *regcache
3408 = get_thread_regcache (current_thread, 1);
3409 low_set_pc (regcache, event_child->stop_pc);
3410 }
3411
3412 if (step_over_finished)
3413 {
3414 /* If we have finished stepping over a breakpoint, we've
3415 stopped and suspended all LWPs momentarily except the
3416 stepping one. This is where we resume them all again.
3417 We're going to keep waiting, so use proceed, which
3418 handles stepping over the next breakpoint. */
3419 unsuspend_all_lwps (event_child);
3420 }
3421 else
3422 {
3423 /* Remove the single-step breakpoints if any. Note that
3424 there isn't single-step breakpoint if we finished stepping
3425 over. */
3426 if (supports_software_single_step ()
3427 && has_single_step_breakpoints (current_thread))
3428 {
3429 stop_all_lwps (0, event_child);
3430 delete_single_step_breakpoints (current_thread);
3431 unstop_all_lwps (0, event_child);
3432 }
3433 }
3434
3435 if (debug_threads)
3436 debug_printf ("proceeding all threads.\n");
3437 proceed_all_lwps ();
3438
3439 if (debug_threads)
3440 debug_exit ();
3441
3442 return ignore_event (ourstatus);
3443 }
3444
3445 if (debug_threads)
3446 {
3447 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3448 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3449 lwpid_of (get_lwp_thread (event_child)),
3450 event_child->waitstatus.to_string ().c_str ());
3451 if (current_thread->last_resume_kind == resume_step)
3452 {
3453 if (event_child->step_range_start == event_child->step_range_end)
3454 debug_printf ("GDB wanted to single-step, reporting event.\n");
3455 else if (!lwp_in_step_range (event_child))
3456 debug_printf ("Out of step range, reporting event.\n");
3457 }
3458 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3459 debug_printf ("Stopped by watchpoint.\n");
3460 else if (gdb_breakpoint_here (event_child->stop_pc))
3461 debug_printf ("Stopped by GDB breakpoint.\n");
3462 if (debug_threads)
3463 debug_printf ("Hit a non-gdbserver trap event.\n");
3464 }
3465
3466 /* Alright, we're going to report a stop. */
3467
3468 /* Remove single-step breakpoints. */
3469 if (supports_software_single_step ())
3470 {
3471 /* Remove single-step breakpoints or not. It it is true, stop all
3472 lwps, so that other threads won't hit the breakpoint in the
3473 staled memory. */
3474 int remove_single_step_breakpoints_p = 0;
3475
3476 if (non_stop)
3477 {
3478 remove_single_step_breakpoints_p
3479 = has_single_step_breakpoints (current_thread);
3480 }
3481 else
3482 {
3483 /* In all-stop, a stop reply cancels all previous resume
3484 requests. Delete all single-step breakpoints. */
3485
3486 find_thread ([&] (thread_info *thread) {
3487 if (has_single_step_breakpoints (thread))
3488 {
3489 remove_single_step_breakpoints_p = 1;
3490 return true;
3491 }
3492
3493 return false;
3494 });
3495 }
3496
3497 if (remove_single_step_breakpoints_p)
3498 {
3499 /* If we remove single-step breakpoints from memory, stop all lwps,
3500 so that other threads won't hit the breakpoint in the staled
3501 memory. */
3502 stop_all_lwps (0, event_child);
3503
3504 if (non_stop)
3505 {
3506 gdb_assert (has_single_step_breakpoints (current_thread));
3507 delete_single_step_breakpoints (current_thread);
3508 }
3509 else
3510 {
3511 for_each_thread ([] (thread_info *thread){
3512 if (has_single_step_breakpoints (thread))
3513 delete_single_step_breakpoints (thread);
3514 });
3515 }
3516
3517 unstop_all_lwps (0, event_child);
3518 }
3519 }
3520
3521 if (!stabilizing_threads)
3522 {
3523 /* In all-stop, stop all threads. */
3524 if (!non_stop)
3525 stop_all_lwps (0, NULL);
3526
3527 if (step_over_finished)
3528 {
3529 if (!non_stop)
3530 {
3531 /* If we were doing a step-over, all other threads but
3532 the stepping one had been paused in start_step_over,
3533 with their suspend counts incremented. We don't want
3534 to do a full unstop/unpause, because we're in
3535 all-stop mode (so we want threads stopped), but we
3536 still need to unsuspend the other threads, to
3537 decrement their `suspended' count back. */
3538 unsuspend_all_lwps (event_child);
3539 }
3540 else
3541 {
3542 /* If we just finished a step-over, then all threads had
3543 been momentarily paused. In all-stop, that's fine,
3544 we want threads stopped by now anyway. In non-stop,
3545 we need to re-resume threads that GDB wanted to be
3546 running. */
3547 unstop_all_lwps (1, event_child);
3548 }
3549 }
3550
3551 /* If we're not waiting for a specific LWP, choose an event LWP
3552 from among those that have had events. Giving equal priority
3553 to all LWPs that have had events helps prevent
3554 starvation. */
3555 if (ptid == minus_one_ptid)
3556 {
3557 event_child->status_pending_p = 1;
3558 event_child->status_pending = w;
3559
3560 select_event_lwp (&event_child);
3561
3562 /* current_thread and event_child must stay in sync. */
3563 current_thread = get_lwp_thread (event_child);
3564
3565 event_child->status_pending_p = 0;
3566 w = event_child->status_pending;
3567 }
3568
3569
3570 /* Stabilize threads (move out of jump pads). */
3571 if (!non_stop)
3572 target_stabilize_threads ();
3573 }
3574 else
3575 {
3576 /* If we just finished a step-over, then all threads had been
3577 momentarily paused. In all-stop, that's fine, we want
3578 threads stopped by now anyway. In non-stop, we need to
3579 re-resume threads that GDB wanted to be running. */
3580 if (step_over_finished)
3581 unstop_all_lwps (1, event_child);
3582 }
3583
3584 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3585 {
3586 /* If the reported event is an exit, fork, vfork or exec, let
3587 GDB know. */
3588
3589 /* Break the unreported fork relationship chain. */
3590 if (event_child->waitstatus.kind () == TARGET_WAITKIND_FORKED
3591 || event_child->waitstatus.kind () == TARGET_WAITKIND_VFORKED)
3592 {
3593 event_child->fork_relative->fork_relative = NULL;
3594 event_child->fork_relative = NULL;
3595 }
3596
3597 *ourstatus = event_child->waitstatus;
3598 /* Clear the event lwp's waitstatus since we handled it already. */
3599 event_child->waitstatus.set_ignore ();
3600 }
3601 else
3602 {
3603 /* The actual stop signal is overwritten below. */
3604 ourstatus->set_stopped (GDB_SIGNAL_0);
3605 }
3606
3607 /* Now that we've selected our final event LWP, un-adjust its PC if
3608 it was a software breakpoint, and the client doesn't know we can
3609 adjust the breakpoint ourselves. */
3610 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3611 && !cs.swbreak_feature)
3612 {
3613 int decr_pc = low_decr_pc_after_break ();
3614
3615 if (decr_pc != 0)
3616 {
3617 struct regcache *regcache
3618 = get_thread_regcache (current_thread, 1);
3619 low_set_pc (regcache, event_child->stop_pc + decr_pc);
3620 }
3621 }
3622
3623 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3624 {
3625 int syscall_number;
3626
3627 get_syscall_trapinfo (event_child, &syscall_number);
3628 if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
3629 ourstatus->set_syscall_entry (syscall_number);
3630 else if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
3631 ourstatus->set_syscall_return (syscall_number);
3632 else
3633 gdb_assert_not_reached ("unexpected syscall state");
3634 }
3635 else if (current_thread->last_resume_kind == resume_stop
3636 && WSTOPSIG (w) == SIGSTOP)
3637 {
3638 /* A thread that has been requested to stop by GDB with vCont;t,
3639 and it stopped cleanly, so report as SIG0. The use of
3640 SIGSTOP is an implementation detail. */
3641 ourstatus->set_stopped (GDB_SIGNAL_0);
3642 }
3643 else if (current_thread->last_resume_kind == resume_stop
3644 && WSTOPSIG (w) != SIGSTOP)
3645 {
3646 /* A thread that has been requested to stop by GDB with vCont;t,
3647 but, it stopped for other reasons. */
3648 ourstatus->set_stopped (gdb_signal_from_host (WSTOPSIG (w)));
3649 }
3650 else if (ourstatus->kind () == TARGET_WAITKIND_STOPPED)
3651 ourstatus->set_stopped (gdb_signal_from_host (WSTOPSIG (w)));
3652
3653 gdb_assert (step_over_bkpt == null_ptid);
3654
3655 if (debug_threads)
3656 {
3657 debug_printf ("wait_1 ret = %s, %d, %d\n",
3658 target_pid_to_str (ptid_of (current_thread)).c_str (),
3659 ourstatus->kind (), ourstatus->sig ());
3660 debug_exit ();
3661 }
3662
3663 if (ourstatus->kind () == TARGET_WAITKIND_EXITED)
3664 return filter_exit_event (event_child, ourstatus);
3665
3666 return ptid_of (current_thread);
3667 }
3668
3669 /* Get rid of any pending event in the pipe. */
3670 static void
3671 async_file_flush (void)
3672 {
3673 int ret;
3674 char buf;
3675
3676 do
3677 ret = read (linux_event_pipe[0], &buf, 1);
3678 while (ret >= 0 || (ret == -1 && errno == EINTR));
3679 }
3680
3681 /* Put something in the pipe, so the event loop wakes up. */
3682 static void
3683 async_file_mark (void)
3684 {
3685 int ret;
3686
3687 async_file_flush ();
3688
3689 do
3690 ret = write (linux_event_pipe[1], "+", 1);
3691 while (ret == 0 || (ret == -1 && errno == EINTR));
3692
3693 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3694 be awakened anyway. */
3695 }
3696
3697 ptid_t
3698 linux_process_target::wait (ptid_t ptid,
3699 target_waitstatus *ourstatus,
3700 target_wait_flags target_options)
3701 {
3702 ptid_t event_ptid;
3703
3704 /* Flush the async file first. */
3705 if (target_is_async_p ())
3706 async_file_flush ();
3707
3708 do
3709 {
3710 event_ptid = wait_1 (ptid, ourstatus, target_options);
3711 }
3712 while ((target_options & TARGET_WNOHANG) == 0
3713 && event_ptid == null_ptid
3714 && ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3715
3716 /* If at least one stop was reported, there may be more. A single
3717 SIGCHLD can signal more than one child stop. */
3718 if (target_is_async_p ()
3719 && (target_options & TARGET_WNOHANG) != 0
3720 && event_ptid != null_ptid)
3721 async_file_mark ();
3722
3723 return event_ptid;
3724 }
3725
3726 /* Send a signal to an LWP. */
3727
3728 static int
3729 kill_lwp (unsigned long lwpid, int signo)
3730 {
3731 int ret;
3732
3733 errno = 0;
3734 ret = syscall (__NR_tkill, lwpid, signo);
3735 if (errno == ENOSYS)
3736 {
3737 /* If tkill fails, then we are not using nptl threads, a
3738 configuration we no longer support. */
3739 perror_with_name (("tkill"));
3740 }
3741 return ret;
3742 }
3743
3744 void
3745 linux_stop_lwp (struct lwp_info *lwp)
3746 {
3747 send_sigstop (lwp);
3748 }
3749
3750 static void
3751 send_sigstop (struct lwp_info *lwp)
3752 {
3753 int pid;
3754
3755 pid = lwpid_of (get_lwp_thread (lwp));
3756
3757 /* If we already have a pending stop signal for this process, don't
3758 send another. */
3759 if (lwp->stop_expected)
3760 {
3761 if (debug_threads)
3762 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3763
3764 return;
3765 }
3766
3767 if (debug_threads)
3768 debug_printf ("Sending sigstop to lwp %d\n", pid);
3769
3770 lwp->stop_expected = 1;
3771 kill_lwp (pid, SIGSTOP);
3772 }
3773
3774 static void
3775 send_sigstop (thread_info *thread, lwp_info *except)
3776 {
3777 struct lwp_info *lwp = get_thread_lwp (thread);
3778
3779 /* Ignore EXCEPT. */
3780 if (lwp == except)
3781 return;
3782
3783 if (lwp->stopped)
3784 return;
3785
3786 send_sigstop (lwp);
3787 }
3788
3789 /* Increment the suspend count of an LWP, and stop it, if not stopped
3790 yet. */
3791 static void
3792 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3793 {
3794 struct lwp_info *lwp = get_thread_lwp (thread);
3795
3796 /* Ignore EXCEPT. */
3797 if (lwp == except)
3798 return;
3799
3800 lwp_suspended_inc (lwp);
3801
3802 send_sigstop (thread, except);
3803 }
3804
3805 static void
3806 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3807 {
3808 /* Store the exit status for later. */
3809 lwp->status_pending_p = 1;
3810 lwp->status_pending = wstat;
3811
3812 /* Store in waitstatus as well, as there's nothing else to process
3813 for this event. */
3814 if (WIFEXITED (wstat))
3815 lwp->waitstatus.set_exited (WEXITSTATUS (wstat));
3816 else if (WIFSIGNALED (wstat))
3817 lwp->waitstatus.set_signalled (gdb_signal_from_host (WTERMSIG (wstat)));
3818
3819 /* Prevent trying to stop it. */
3820 lwp->stopped = 1;
3821
3822 /* No further stops are expected from a dead lwp. */
3823 lwp->stop_expected = 0;
3824 }
3825
3826 /* Return true if LWP has exited already, and has a pending exit event
3827 to report to GDB. */
3828
3829 static int
3830 lwp_is_marked_dead (struct lwp_info *lwp)
3831 {
3832 return (lwp->status_pending_p
3833 && (WIFEXITED (lwp->status_pending)
3834 || WIFSIGNALED (lwp->status_pending)));
3835 }
3836
3837 void
3838 linux_process_target::wait_for_sigstop ()
3839 {
3840 struct thread_info *saved_thread;
3841 ptid_t saved_tid;
3842 int wstat;
3843 int ret;
3844
3845 saved_thread = current_thread;
3846 if (saved_thread != NULL)
3847 saved_tid = saved_thread->id;
3848 else
3849 saved_tid = null_ptid; /* avoid bogus unused warning */
3850
3851 if (debug_threads)
3852 debug_printf ("wait_for_sigstop: pulling events\n");
3853
3854 /* Passing NULL_PTID as filter indicates we want all events to be
3855 left pending. Eventually this returns when there are no
3856 unwaited-for children left. */
3857 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
3858 gdb_assert (ret == -1);
3859
3860 if (saved_thread == NULL || mythread_alive (saved_tid))
3861 current_thread = saved_thread;
3862 else
3863 {
3864 if (debug_threads)
3865 debug_printf ("Previously current thread died.\n");
3866
3867 /* We can't change the current inferior behind GDB's back,
3868 otherwise, a subsequent command may apply to the wrong
3869 process. */
3870 current_thread = NULL;
3871 }
3872 }
3873
3874 bool
3875 linux_process_target::stuck_in_jump_pad (thread_info *thread)
3876 {
3877 struct lwp_info *lwp = get_thread_lwp (thread);
3878
3879 if (lwp->suspended != 0)
3880 {
3881 internal_error (__FILE__, __LINE__,
3882 "LWP %ld is suspended, suspended=%d\n",
3883 lwpid_of (thread), lwp->suspended);
3884 }
3885 gdb_assert (lwp->stopped);
3886
3887 /* Allow debugging the jump pad, gdb_collect, etc.. */
3888 return (supports_fast_tracepoints ()
3889 && agent_loaded_p ()
3890 && (gdb_breakpoint_here (lwp->stop_pc)
3891 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3892 || thread->last_resume_kind == resume_step)
3893 && (linux_fast_tracepoint_collecting (lwp, NULL)
3894 != fast_tpoint_collect_result::not_collecting));
3895 }
3896
3897 void
3898 linux_process_target::move_out_of_jump_pad (thread_info *thread)
3899 {
3900 struct thread_info *saved_thread;
3901 struct lwp_info *lwp = get_thread_lwp (thread);
3902 int *wstat;
3903
3904 if (lwp->suspended != 0)
3905 {
3906 internal_error (__FILE__, __LINE__,
3907 "LWP %ld is suspended, suspended=%d\n",
3908 lwpid_of (thread), lwp->suspended);
3909 }
3910 gdb_assert (lwp->stopped);
3911
3912 /* For gdb_breakpoint_here. */
3913 saved_thread = current_thread;
3914 current_thread = thread;
3915
3916 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3917
3918 /* Allow debugging the jump pad, gdb_collect, etc. */
3919 if (!gdb_breakpoint_here (lwp->stop_pc)
3920 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3921 && thread->last_resume_kind != resume_step
3922 && maybe_move_out_of_jump_pad (lwp, wstat))
3923 {
3924 if (debug_threads)
3925 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3926 lwpid_of (thread));
3927
3928 if (wstat)
3929 {
3930 lwp->status_pending_p = 0;
3931 enqueue_one_deferred_signal (lwp, wstat);
3932
3933 if (debug_threads)
3934 debug_printf ("Signal %d for LWP %ld deferred "
3935 "(in jump pad)\n",
3936 WSTOPSIG (*wstat), lwpid_of (thread));
3937 }
3938
3939 resume_one_lwp (lwp, 0, 0, NULL);
3940 }
3941 else
3942 lwp_suspended_inc (lwp);
3943
3944 current_thread = saved_thread;
3945 }
3946
3947 static bool
3948 lwp_running (thread_info *thread)
3949 {
3950 struct lwp_info *lwp = get_thread_lwp (thread);
3951
3952 if (lwp_is_marked_dead (lwp))
3953 return false;
3954
3955 return !lwp->stopped;
3956 }
3957
3958 void
3959 linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
3960 {
3961 /* Should not be called recursively. */
3962 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3963
3964 if (debug_threads)
3965 {
3966 debug_enter ();
3967 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3968 suspend ? "stop-and-suspend" : "stop",
3969 (except != NULL
3970 ? target_pid_to_str
3971 (ptid_of (get_lwp_thread (except))).c_str ()
3972 : "none"));
3973 }
3974
3975 stopping_threads = (suspend
3976 ? STOPPING_AND_SUSPENDING_THREADS
3977 : STOPPING_THREADS);
3978
3979 if (suspend)
3980 for_each_thread ([&] (thread_info *thread)
3981 {
3982 suspend_and_send_sigstop (thread, except);
3983 });
3984 else
3985 for_each_thread ([&] (thread_info *thread)
3986 {
3987 send_sigstop (thread, except);
3988 });
3989
3990 wait_for_sigstop ();
3991 stopping_threads = NOT_STOPPING_THREADS;
3992
3993 if (debug_threads)
3994 {
3995 debug_printf ("stop_all_lwps done, setting stopping_threads "
3996 "back to !stopping\n");
3997 debug_exit ();
3998 }
3999 }
4000
4001 /* Enqueue one signal in the chain of signals which need to be
4002 delivered to this process on next resume. */
4003
4004 static void
4005 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4006 {
4007 lwp->pending_signals.emplace_back (signal);
4008 if (info == nullptr)
4009 memset (&lwp->pending_signals.back ().info, 0, sizeof (siginfo_t));
4010 else
4011 lwp->pending_signals.back ().info = *info;
4012 }
4013
4014 void
4015 linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
4016 {
4017 struct thread_info *thread = get_lwp_thread (lwp);
4018 struct regcache *regcache = get_thread_regcache (thread, 1);
4019
4020 scoped_restore save_current_thread = make_scoped_restore (&current_thread);
4021
4022 current_thread = thread;
4023 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
4024
4025 for (CORE_ADDR pc : next_pcs)
4026 set_single_step_breakpoint (pc, current_ptid);
4027 }
4028
4029 int
4030 linux_process_target::single_step (lwp_info* lwp)
4031 {
4032 int step = 0;
4033
4034 if (supports_hardware_single_step ())
4035 {
4036 step = 1;
4037 }
4038 else if (supports_software_single_step ())
4039 {
4040 install_software_single_step_breakpoints (lwp);
4041 step = 0;
4042 }
4043 else
4044 {
4045 if (debug_threads)
4046 debug_printf ("stepping is not implemented on this target");
4047 }
4048
4049 return step;
4050 }
4051
4052 /* The signal can be delivered to the inferior if we are not trying to
4053 finish a fast tracepoint collect. Since signal can be delivered in
4054 the step-over, the program may go to signal handler and trap again
4055 after return from the signal handler. We can live with the spurious
4056 double traps. */
4057
4058 static int
4059 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4060 {
4061 return (lwp->collecting_fast_tracepoint
4062 == fast_tpoint_collect_result::not_collecting);
4063 }
4064
4065 void
4066 linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
4067 int signal, siginfo_t *info)
4068 {
4069 struct thread_info *thread = get_lwp_thread (lwp);
4070 struct thread_info *saved_thread;
4071 int ptrace_request;
4072 struct process_info *proc = get_thread_process (thread);
4073
4074 /* Note that target description may not be initialised
4075 (proc->tdesc == NULL) at this point because the program hasn't
4076 stopped at the first instruction yet. It means GDBserver skips
4077 the extra traps from the wrapper program (see option --wrapper).
4078 Code in this function that requires register access should be
4079 guarded by proc->tdesc == NULL or something else. */
4080
4081 if (lwp->stopped == 0)
4082 return;
4083
4084 gdb_assert (lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
4085
4086 fast_tpoint_collect_result fast_tp_collecting
4087 = lwp->collecting_fast_tracepoint;
4088
4089 gdb_assert (!stabilizing_threads
4090 || (fast_tp_collecting
4091 != fast_tpoint_collect_result::not_collecting));
4092
4093 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4094 user used the "jump" command, or "set $pc = foo"). */
4095 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4096 {
4097 /* Collecting 'while-stepping' actions doesn't make sense
4098 anymore. */
4099 release_while_stepping_state_list (thread);
4100 }
4101
4102 /* If we have pending signals or status, and a new signal, enqueue the
4103 signal. Also enqueue the signal if it can't be delivered to the
4104 inferior right now. */
4105 if (signal != 0
4106 && (lwp->status_pending_p
4107 || !lwp->pending_signals.empty ()
4108 || !lwp_signal_can_be_delivered (lwp)))
4109 {
4110 enqueue_pending_signal (lwp, signal, info);
4111
4112 /* Postpone any pending signal. It was enqueued above. */
4113 signal = 0;
4114 }
4115
4116 if (lwp->status_pending_p)
4117 {
4118 if (debug_threads)
4119 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4120 " has pending status\n",
4121 lwpid_of (thread), step ? "step" : "continue",
4122 lwp->stop_expected ? "expected" : "not expected");
4123 return;
4124 }
4125
4126 saved_thread = current_thread;
4127 current_thread = thread;
4128
4129 /* This bit needs some thinking about. If we get a signal that
4130 we must report while a single-step reinsert is still pending,
4131 we often end up resuming the thread. It might be better to
4132 (ew) allow a stack of pending events; then we could be sure that
4133 the reinsert happened right away and not lose any signals.
4134
4135 Making this stack would also shrink the window in which breakpoints are
4136 uninserted (see comment in linux_wait_for_lwp) but not enough for
4137 complete correctness, so it won't solve that problem. It may be
4138 worthwhile just to solve this one, however. */
4139 if (lwp->bp_reinsert != 0)
4140 {
4141 if (debug_threads)
4142 debug_printf (" pending reinsert at 0x%s\n",
4143 paddress (lwp->bp_reinsert));
4144
4145 if (supports_hardware_single_step ())
4146 {
4147 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4148 {
4149 if (step == 0)
4150 warning ("BAD - reinserting but not stepping.");
4151 if (lwp->suspended)
4152 warning ("BAD - reinserting and suspended(%d).",
4153 lwp->suspended);
4154 }
4155 }
4156
4157 step = maybe_hw_step (thread);
4158 }
4159
4160 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4161 {
4162 if (debug_threads)
4163 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4164 " (exit-jump-pad-bkpt)\n",
4165 lwpid_of (thread));
4166 }
4167 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4168 {
4169 if (debug_threads)
4170 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4171 " single-stepping\n",
4172 lwpid_of (thread));
4173
4174 if (supports_hardware_single_step ())
4175 step = 1;
4176 else
4177 {
4178 internal_error (__FILE__, __LINE__,
4179 "moving out of jump pad single-stepping"
4180 " not implemented on this target");
4181 }
4182 }
4183
4184 /* If we have while-stepping actions in this thread set it stepping.
4185 If we have a signal to deliver, it may or may not be set to
4186 SIG_IGN, we don't know. Assume so, and allow collecting
4187 while-stepping into a signal handler. A possible smart thing to
4188 do would be to set an internal breakpoint at the signal return
4189 address, continue, and carry on catching this while-stepping
4190 action only when that breakpoint is hit. A future
4191 enhancement. */
4192 if (thread->while_stepping != NULL)
4193 {
4194 if (debug_threads)
4195 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4196 lwpid_of (thread));
4197
4198 step = single_step (lwp);
4199 }
4200
4201 if (proc->tdesc != NULL && low_supports_breakpoints ())
4202 {
4203 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4204
4205 lwp->stop_pc = low_get_pc (regcache);
4206
4207 if (debug_threads)
4208 {
4209 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4210 (long) lwp->stop_pc);
4211 }
4212 }
4213
4214 /* If we have pending signals, consume one if it can be delivered to
4215 the inferior. */
4216 if (!lwp->pending_signals.empty () && lwp_signal_can_be_delivered (lwp))
4217 {
4218 const pending_signal &p_sig = lwp->pending_signals.front ();
4219
4220 signal = p_sig.signal;
4221 if (p_sig.info.si_signo != 0)
4222 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4223 &p_sig.info);
4224
4225 lwp->pending_signals.pop_front ();
4226 }
4227
4228 if (debug_threads)
4229 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4230 lwpid_of (thread), step ? "step" : "continue", signal,
4231 lwp->stop_expected ? "expected" : "not expected");
4232
4233 low_prepare_to_resume (lwp);
4234
4235 regcache_invalidate_thread (thread);
4236 errno = 0;
4237 lwp->stepping = step;
4238 if (step)
4239 ptrace_request = PTRACE_SINGLESTEP;
4240 else if (gdb_catching_syscalls_p (lwp))
4241 ptrace_request = PTRACE_SYSCALL;
4242 else
4243 ptrace_request = PTRACE_CONT;
4244 ptrace (ptrace_request,
4245 lwpid_of (thread),
4246 (PTRACE_TYPE_ARG3) 0,
4247 /* Coerce to a uintptr_t first to avoid potential gcc warning
4248 of coercing an 8 byte integer to a 4 byte pointer. */
4249 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4250
4251 current_thread = saved_thread;
4252 if (errno)
4253 perror_with_name ("resuming thread");
4254
4255 /* Successfully resumed. Clear state that no longer makes sense,
4256 and mark the LWP as running. Must not do this before resuming
4257 otherwise if that fails other code will be confused. E.g., we'd
4258 later try to stop the LWP and hang forever waiting for a stop
4259 status. Note that we must not throw after this is cleared,
4260 otherwise handle_zombie_lwp_error would get confused. */
4261 lwp->stopped = 0;
4262 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4263 }
4264
4265 void
4266 linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4267 {
4268 /* Nop. */
4269 }
4270
4271 /* Called when we try to resume a stopped LWP and that errors out. If
4272 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4273 or about to become), discard the error, clear any pending status
4274 the LWP may have, and return true (we'll collect the exit status
4275 soon enough). Otherwise, return false. */
4276
4277 static int
4278 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4279 {
4280 struct thread_info *thread = get_lwp_thread (lp);
4281
4282 /* If we get an error after resuming the LWP successfully, we'd
4283 confuse !T state for the LWP being gone. */
4284 gdb_assert (lp->stopped);
4285
4286 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4287 because even if ptrace failed with ESRCH, the tracee may be "not
4288 yet fully dead", but already refusing ptrace requests. In that
4289 case the tracee has 'R (Running)' state for a little bit
4290 (observed in Linux 3.18). See also the note on ESRCH in the
4291 ptrace(2) man page. Instead, check whether the LWP has any state
4292 other than ptrace-stopped. */
4293
4294 /* Don't assume anything if /proc/PID/status can't be read. */
4295 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4296 {
4297 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4298 lp->status_pending_p = 0;
4299 return 1;
4300 }
4301 return 0;
4302 }
4303
4304 void
4305 linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4306 siginfo_t *info)
4307 {
4308 try
4309 {
4310 resume_one_lwp_throw (lwp, step, signal, info);
4311 }
4312 catch (const gdb_exception_error &ex)
4313 {
4314 if (!check_ptrace_stopped_lwp_gone (lwp))
4315 throw;
4316 }
4317 }
4318
4319 /* This function is called once per thread via for_each_thread.
4320 We look up which resume request applies to THREAD and mark it with a
4321 pointer to the appropriate resume request.
4322
4323 This algorithm is O(threads * resume elements), but resume elements
4324 is small (and will remain small at least until GDB supports thread
4325 suspension). */
4326
4327 static void
4328 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4329 {
4330 struct lwp_info *lwp = get_thread_lwp (thread);
4331
4332 for (int ndx = 0; ndx < n; ndx++)
4333 {
4334 ptid_t ptid = resume[ndx].thread;
4335 if (ptid == minus_one_ptid
4336 || ptid == thread->id
4337 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4338 of PID'. */
4339 || (ptid.pid () == pid_of (thread)
4340 && (ptid.is_pid ()
4341 || ptid.lwp () == -1)))
4342 {
4343 if (resume[ndx].kind == resume_stop
4344 && thread->last_resume_kind == resume_stop)
4345 {
4346 if (debug_threads)
4347 debug_printf ("already %s LWP %ld at GDB's request\n",
4348 (thread->last_status.kind ()
4349 == TARGET_WAITKIND_STOPPED)
4350 ? "stopped"
4351 : "stopping",
4352 lwpid_of (thread));
4353
4354 continue;
4355 }
4356
4357 /* Ignore (wildcard) resume requests for already-resumed
4358 threads. */
4359 if (resume[ndx].kind != resume_stop
4360 && thread->last_resume_kind != resume_stop)
4361 {
4362 if (debug_threads)
4363 debug_printf ("already %s LWP %ld at GDB's request\n",
4364 (thread->last_resume_kind
4365 == resume_step)
4366 ? "stepping"
4367 : "continuing",
4368 lwpid_of (thread));
4369 continue;
4370 }
4371
4372 /* Don't let wildcard resumes resume fork children that GDB
4373 does not yet know are new fork children. */
4374 if (lwp->fork_relative != NULL)
4375 {
4376 struct lwp_info *rel = lwp->fork_relative;
4377
4378 if (rel->status_pending_p
4379 && (rel->waitstatus.kind () == TARGET_WAITKIND_FORKED
4380 || rel->waitstatus.kind () == TARGET_WAITKIND_VFORKED))
4381 {
4382 if (debug_threads)
4383 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4384 lwpid_of (thread));
4385 continue;
4386 }
4387 }
4388
4389 /* If the thread has a pending event that has already been
4390 reported to GDBserver core, but GDB has not pulled the
4391 event out of the vStopped queue yet, likewise, ignore the
4392 (wildcard) resume request. */
4393 if (in_queued_stop_replies (thread->id))
4394 {
4395 if (debug_threads)
4396 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4397 lwpid_of (thread));
4398 continue;
4399 }
4400
4401 lwp->resume = &resume[ndx];
4402 thread->last_resume_kind = lwp->resume->kind;
4403
4404 lwp->step_range_start = lwp->resume->step_range_start;
4405 lwp->step_range_end = lwp->resume->step_range_end;
4406
4407 /* If we had a deferred signal to report, dequeue one now.
4408 This can happen if LWP gets more than one signal while
4409 trying to get out of a jump pad. */
4410 if (lwp->stopped
4411 && !lwp->status_pending_p
4412 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4413 {
4414 lwp->status_pending_p = 1;
4415
4416 if (debug_threads)
4417 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4418 "leaving status pending.\n",
4419 WSTOPSIG (lwp->status_pending),
4420 lwpid_of (thread));
4421 }
4422
4423 return;
4424 }
4425 }
4426
4427 /* No resume action for this thread. */
4428 lwp->resume = NULL;
4429 }
4430
4431 bool
4432 linux_process_target::resume_status_pending (thread_info *thread)
4433 {
4434 struct lwp_info *lwp = get_thread_lwp (thread);
4435
4436 /* LWPs which will not be resumed are not interesting, because
4437 we might not wait for them next time through linux_wait. */
4438 if (lwp->resume == NULL)
4439 return false;
4440
4441 return thread_still_has_status_pending (thread);
4442 }
4443
4444 bool
4445 linux_process_target::thread_needs_step_over (thread_info *thread)
4446 {
4447 struct lwp_info *lwp = get_thread_lwp (thread);
4448 struct thread_info *saved_thread;
4449 CORE_ADDR pc;
4450 struct process_info *proc = get_thread_process (thread);
4451
4452 /* GDBserver is skipping the extra traps from the wrapper program,
4453 don't have to do step over. */
4454 if (proc->tdesc == NULL)
4455 return false;
4456
4457 /* LWPs which will not be resumed are not interesting, because we
4458 might not wait for them next time through linux_wait. */
4459
4460 if (!lwp->stopped)
4461 {
4462 if (debug_threads)
4463 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4464 lwpid_of (thread));
4465 return false;
4466 }
4467
4468 if (thread->last_resume_kind == resume_stop)
4469 {
4470 if (debug_threads)
4471 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4472 " stopped\n",
4473 lwpid_of (thread));
4474 return false;
4475 }
4476
4477 gdb_assert (lwp->suspended >= 0);
4478
4479 if (lwp->suspended)
4480 {
4481 if (debug_threads)
4482 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4483 lwpid_of (thread));
4484 return false;
4485 }
4486
4487 if (lwp->status_pending_p)
4488 {
4489 if (debug_threads)
4490 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4491 " status.\n",
4492 lwpid_of (thread));
4493 return false;
4494 }
4495
4496 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4497 or we have. */
4498 pc = get_pc (lwp);
4499
4500 /* If the PC has changed since we stopped, then don't do anything,
4501 and let the breakpoint/tracepoint be hit. This happens if, for
4502 instance, GDB handled the decr_pc_after_break subtraction itself,
4503 GDB is OOL stepping this thread, or the user has issued a "jump"
4504 command, or poked thread's registers herself. */
4505 if (pc != lwp->stop_pc)
4506 {
4507 if (debug_threads)
4508 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4509 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4510 lwpid_of (thread),
4511 paddress (lwp->stop_pc), paddress (pc));
4512 return false;
4513 }
4514
4515 /* On software single step target, resume the inferior with signal
4516 rather than stepping over. */
4517 if (supports_software_single_step ()
4518 && !lwp->pending_signals.empty ()
4519 && lwp_signal_can_be_delivered (lwp))
4520 {
4521 if (debug_threads)
4522 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4523 " signals.\n",
4524 lwpid_of (thread));
4525
4526 return false;
4527 }
4528
4529 saved_thread = current_thread;
4530 current_thread = thread;
4531
4532 /* We can only step over breakpoints we know about. */
4533 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4534 {
4535 /* Don't step over a breakpoint that GDB expects to hit
4536 though. If the condition is being evaluated on the target's side
4537 and it evaluate to false, step over this breakpoint as well. */
4538 if (gdb_breakpoint_here (pc)
4539 && gdb_condition_true_at_breakpoint (pc)
4540 && gdb_no_commands_at_breakpoint (pc))
4541 {
4542 if (debug_threads)
4543 debug_printf ("Need step over [LWP %ld]? yes, but found"
4544 " GDB breakpoint at 0x%s; skipping step over\n",
4545 lwpid_of (thread), paddress (pc));
4546
4547 current_thread = saved_thread;
4548 return false;
4549 }
4550 else
4551 {
4552 if (debug_threads)
4553 debug_printf ("Need step over [LWP %ld]? yes, "
4554 "found breakpoint at 0x%s\n",
4555 lwpid_of (thread), paddress (pc));
4556
4557 /* We've found an lwp that needs stepping over --- return 1 so
4558 that find_thread stops looking. */
4559 current_thread = saved_thread;
4560
4561 return true;
4562 }
4563 }
4564
4565 current_thread = saved_thread;
4566
4567 if (debug_threads)
4568 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4569 " at 0x%s\n",
4570 lwpid_of (thread), paddress (pc));
4571
4572 return false;
4573 }
4574
4575 void
4576 linux_process_target::start_step_over (lwp_info *lwp)
4577 {
4578 struct thread_info *thread = get_lwp_thread (lwp);
4579 struct thread_info *saved_thread;
4580 CORE_ADDR pc;
4581 int step;
4582
4583 if (debug_threads)
4584 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4585 lwpid_of (thread));
4586
4587 stop_all_lwps (1, lwp);
4588
4589 if (lwp->suspended != 0)
4590 {
4591 internal_error (__FILE__, __LINE__,
4592 "LWP %ld suspended=%d\n", lwpid_of (thread),
4593 lwp->suspended);
4594 }
4595
4596 if (debug_threads)
4597 debug_printf ("Done stopping all threads for step-over.\n");
4598
4599 /* Note, we should always reach here with an already adjusted PC,
4600 either by GDB (if we're resuming due to GDB's request), or by our
4601 caller, if we just finished handling an internal breakpoint GDB
4602 shouldn't care about. */
4603 pc = get_pc (lwp);
4604
4605 saved_thread = current_thread;
4606 current_thread = thread;
4607
4608 lwp->bp_reinsert = pc;
4609 uninsert_breakpoints_at (pc);
4610 uninsert_fast_tracepoint_jumps_at (pc);
4611
4612 step = single_step (lwp);
4613
4614 current_thread = saved_thread;
4615
4616 resume_one_lwp (lwp, step, 0, NULL);
4617
4618 /* Require next event from this LWP. */
4619 step_over_bkpt = thread->id;
4620 }
4621
4622 bool
4623 linux_process_target::finish_step_over (lwp_info *lwp)
4624 {
4625 if (lwp->bp_reinsert != 0)
4626 {
4627 struct thread_info *saved_thread = current_thread;
4628
4629 if (debug_threads)
4630 debug_printf ("Finished step over.\n");
4631
4632 current_thread = get_lwp_thread (lwp);
4633
4634 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4635 may be no breakpoint to reinsert there by now. */
4636 reinsert_breakpoints_at (lwp->bp_reinsert);
4637 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4638
4639 lwp->bp_reinsert = 0;
4640
4641 /* Delete any single-step breakpoints. No longer needed. We
4642 don't have to worry about other threads hitting this trap,
4643 and later not being able to explain it, because we were
4644 stepping over a breakpoint, and we hold all threads but
4645 LWP stopped while doing that. */
4646 if (!supports_hardware_single_step ())
4647 {
4648 gdb_assert (has_single_step_breakpoints (current_thread));
4649 delete_single_step_breakpoints (current_thread);
4650 }
4651
4652 step_over_bkpt = null_ptid;
4653 current_thread = saved_thread;
4654 return true;
4655 }
4656 else
4657 return false;
4658 }
4659
4660 void
4661 linux_process_target::complete_ongoing_step_over ()
4662 {
4663 if (step_over_bkpt != null_ptid)
4664 {
4665 struct lwp_info *lwp;
4666 int wstat;
4667 int ret;
4668
4669 if (debug_threads)
4670 debug_printf ("detach: step over in progress, finish it first\n");
4671
4672 /* Passing NULL_PTID as filter indicates we want all events to
4673 be left pending. Eventually this returns when there are no
4674 unwaited-for children left. */
4675 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4676 __WALL);
4677 gdb_assert (ret == -1);
4678
4679 lwp = find_lwp_pid (step_over_bkpt);
4680 if (lwp != NULL)
4681 {
4682 finish_step_over (lwp);
4683
4684 /* If we got our step SIGTRAP, don't leave it pending,
4685 otherwise we would report it to GDB as a spurious
4686 SIGTRAP. */
4687 gdb_assert (lwp->status_pending_p);
4688 if (WIFSTOPPED (lwp->status_pending)
4689 && WSTOPSIG (lwp->status_pending) == SIGTRAP)
4690 {
4691 thread_info *thread = get_lwp_thread (lwp);
4692 if (thread->last_resume_kind != resume_step)
4693 {
4694 if (debug_threads)
4695 debug_printf ("detach: discard step-over SIGTRAP\n");
4696
4697 lwp->status_pending_p = 0;
4698 lwp->status_pending = 0;
4699 resume_one_lwp (lwp, lwp->stepping, 0, NULL);
4700 }
4701 else
4702 {
4703 if (debug_threads)
4704 debug_printf ("detach: resume_step, "
4705 "not discarding step-over SIGTRAP\n");
4706 }
4707 }
4708 }
4709 step_over_bkpt = null_ptid;
4710 unsuspend_all_lwps (lwp);
4711 }
4712 }
4713
4714 void
4715 linux_process_target::resume_one_thread (thread_info *thread,
4716 bool leave_all_stopped)
4717 {
4718 struct lwp_info *lwp = get_thread_lwp (thread);
4719 int leave_pending;
4720
4721 if (lwp->resume == NULL)
4722 return;
4723
4724 if (lwp->resume->kind == resume_stop)
4725 {
4726 if (debug_threads)
4727 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4728
4729 if (!lwp->stopped)
4730 {
4731 if (debug_threads)
4732 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4733
4734 /* Stop the thread, and wait for the event asynchronously,
4735 through the event loop. */
4736 send_sigstop (lwp);
4737 }
4738 else
4739 {
4740 if (debug_threads)
4741 debug_printf ("already stopped LWP %ld\n",
4742 lwpid_of (thread));
4743
4744 /* The LWP may have been stopped in an internal event that
4745 was not meant to be notified back to GDB (e.g., gdbserver
4746 breakpoint), so we should be reporting a stop event in
4747 this case too. */
4748
4749 /* If the thread already has a pending SIGSTOP, this is a
4750 no-op. Otherwise, something later will presumably resume
4751 the thread and this will cause it to cancel any pending
4752 operation, due to last_resume_kind == resume_stop. If
4753 the thread already has a pending status to report, we
4754 will still report it the next time we wait - see
4755 status_pending_p_callback. */
4756
4757 /* If we already have a pending signal to report, then
4758 there's no need to queue a SIGSTOP, as this means we're
4759 midway through moving the LWP out of the jumppad, and we
4760 will report the pending signal as soon as that is
4761 finished. */
4762 if (lwp->pending_signals_to_report.empty ())
4763 send_sigstop (lwp);
4764 }
4765
4766 /* For stop requests, we're done. */
4767 lwp->resume = NULL;
4768 thread->last_status.set_ignore ();
4769 return;
4770 }
4771
4772 /* If this thread which is about to be resumed has a pending status,
4773 then don't resume it - we can just report the pending status.
4774 Likewise if it is suspended, because e.g., another thread is
4775 stepping past a breakpoint. Make sure to queue any signals that
4776 would otherwise be sent. In all-stop mode, we do this decision
4777 based on if *any* thread has a pending status. If there's a
4778 thread that needs the step-over-breakpoint dance, then don't
4779 resume any other thread but that particular one. */
4780 leave_pending = (lwp->suspended
4781 || lwp->status_pending_p
4782 || leave_all_stopped);
4783
4784 /* If we have a new signal, enqueue the signal. */
4785 if (lwp->resume->sig != 0)
4786 {
4787 siginfo_t info, *info_p;
4788
4789 /* If this is the same signal we were previously stopped by,
4790 make sure to queue its siginfo. */
4791 if (WIFSTOPPED (lwp->last_status)
4792 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4793 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4794 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4795 info_p = &info;
4796 else
4797 info_p = NULL;
4798
4799 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4800 }
4801
4802 if (!leave_pending)
4803 {
4804 if (debug_threads)
4805 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4806
4807 proceed_one_lwp (thread, NULL);
4808 }
4809 else
4810 {
4811 if (debug_threads)
4812 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4813 }
4814
4815 thread->last_status.set_ignore ();
4816 lwp->resume = NULL;
4817 }
4818
4819 void
4820 linux_process_target::resume (thread_resume *resume_info, size_t n)
4821 {
4822 struct thread_info *need_step_over = NULL;
4823
4824 if (debug_threads)
4825 {
4826 debug_enter ();
4827 debug_printf ("linux_resume:\n");
4828 }
4829
4830 for_each_thread ([&] (thread_info *thread)
4831 {
4832 linux_set_resume_request (thread, resume_info, n);
4833 });
4834
4835 /* If there is a thread which would otherwise be resumed, which has
4836 a pending status, then don't resume any threads - we can just
4837 report the pending status. Make sure to queue any signals that
4838 would otherwise be sent. In non-stop mode, we'll apply this
4839 logic to each thread individually. We consume all pending events
4840 before considering to start a step-over (in all-stop). */
4841 bool any_pending = false;
4842 if (!non_stop)
4843 any_pending = find_thread ([this] (thread_info *thread)
4844 {
4845 return resume_status_pending (thread);
4846 }) != nullptr;
4847
4848 /* If there is a thread which would otherwise be resumed, which is
4849 stopped at a breakpoint that needs stepping over, then don't
4850 resume any threads - have it step over the breakpoint with all
4851 other threads stopped, then resume all threads again. Make sure
4852 to queue any signals that would otherwise be delivered or
4853 queued. */
4854 if (!any_pending && low_supports_breakpoints ())
4855 need_step_over = find_thread ([this] (thread_info *thread)
4856 {
4857 return thread_needs_step_over (thread);
4858 });
4859
4860 bool leave_all_stopped = (need_step_over != NULL || any_pending);
4861
4862 if (debug_threads)
4863 {
4864 if (need_step_over != NULL)
4865 debug_printf ("Not resuming all, need step over\n");
4866 else if (any_pending)
4867 debug_printf ("Not resuming, all-stop and found "
4868 "an LWP with pending status\n");
4869 else
4870 debug_printf ("Resuming, no pending status or step over needed\n");
4871 }
4872
4873 /* Even if we're leaving threads stopped, queue all signals we'd
4874 otherwise deliver. */
4875 for_each_thread ([&] (thread_info *thread)
4876 {
4877 resume_one_thread (thread, leave_all_stopped);
4878 });
4879
4880 if (need_step_over)
4881 start_step_over (get_thread_lwp (need_step_over));
4882
4883 if (debug_threads)
4884 {
4885 debug_printf ("linux_resume done\n");
4886 debug_exit ();
4887 }
4888
4889 /* We may have events that were pending that can/should be sent to
4890 the client now. Trigger a linux_wait call. */
4891 if (target_is_async_p ())
4892 async_file_mark ();
4893 }
4894
4895 void
4896 linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
4897 {
4898 struct lwp_info *lwp = get_thread_lwp (thread);
4899 int step;
4900
4901 if (lwp == except)
4902 return;
4903
4904 if (debug_threads)
4905 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4906
4907 if (!lwp->stopped)
4908 {
4909 if (debug_threads)
4910 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4911 return;
4912 }
4913
4914 if (thread->last_resume_kind == resume_stop
4915 && thread->last_status.kind () != TARGET_WAITKIND_IGNORE)
4916 {
4917 if (debug_threads)
4918 debug_printf (" client wants LWP to remain %ld stopped\n",
4919 lwpid_of (thread));
4920 return;
4921 }
4922
4923 if (lwp->status_pending_p)
4924 {
4925 if (debug_threads)
4926 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4927 lwpid_of (thread));
4928 return;
4929 }
4930
4931 gdb_assert (lwp->suspended >= 0);
4932
4933 if (lwp->suspended)
4934 {
4935 if (debug_threads)
4936 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4937 return;
4938 }
4939
4940 if (thread->last_resume_kind == resume_stop
4941 && lwp->pending_signals_to_report.empty ()
4942 && (lwp->collecting_fast_tracepoint
4943 == fast_tpoint_collect_result::not_collecting))
4944 {
4945 /* We haven't reported this LWP as stopped yet (otherwise, the
4946 last_status.kind check above would catch it, and we wouldn't
4947 reach here. This LWP may have been momentarily paused by a
4948 stop_all_lwps call while handling for example, another LWP's
4949 step-over. In that case, the pending expected SIGSTOP signal
4950 that was queued at vCont;t handling time will have already
4951 been consumed by wait_for_sigstop, and so we need to requeue
4952 another one here. Note that if the LWP already has a SIGSTOP
4953 pending, this is a no-op. */
4954
4955 if (debug_threads)
4956 debug_printf ("Client wants LWP %ld to stop. "
4957 "Making sure it has a SIGSTOP pending\n",
4958 lwpid_of (thread));
4959
4960 send_sigstop (lwp);
4961 }
4962
4963 if (thread->last_resume_kind == resume_step)
4964 {
4965 if (debug_threads)
4966 debug_printf (" stepping LWP %ld, client wants it stepping\n",
4967 lwpid_of (thread));
4968
4969 /* If resume_step is requested by GDB, install single-step
4970 breakpoints when the thread is about to be actually resumed if
4971 the single-step breakpoints weren't removed. */
4972 if (supports_software_single_step ()
4973 && !has_single_step_breakpoints (thread))
4974 install_software_single_step_breakpoints (lwp);
4975
4976 step = maybe_hw_step (thread);
4977 }
4978 else if (lwp->bp_reinsert != 0)
4979 {
4980 if (debug_threads)
4981 debug_printf (" stepping LWP %ld, reinsert set\n",
4982 lwpid_of (thread));
4983
4984 step = maybe_hw_step (thread);
4985 }
4986 else
4987 step = 0;
4988
4989 resume_one_lwp (lwp, step, 0, NULL);
4990 }
4991
4992 void
4993 linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
4994 lwp_info *except)
4995 {
4996 struct lwp_info *lwp = get_thread_lwp (thread);
4997
4998 if (lwp == except)
4999 return;
5000
5001 lwp_suspended_decr (lwp);
5002
5003 proceed_one_lwp (thread, except);
5004 }
5005
5006 void
5007 linux_process_target::proceed_all_lwps ()
5008 {
5009 struct thread_info *need_step_over;
5010
5011 /* If there is a thread which would otherwise be resumed, which is
5012 stopped at a breakpoint that needs stepping over, then don't
5013 resume any threads - have it step over the breakpoint with all
5014 other threads stopped, then resume all threads again. */
5015
5016 if (low_supports_breakpoints ())
5017 {
5018 need_step_over = find_thread ([this] (thread_info *thread)
5019 {
5020 return thread_needs_step_over (thread);
5021 });
5022
5023 if (need_step_over != NULL)
5024 {
5025 if (debug_threads)
5026 debug_printf ("proceed_all_lwps: found "
5027 "thread %ld needing a step-over\n",
5028 lwpid_of (need_step_over));
5029
5030 start_step_over (get_thread_lwp (need_step_over));
5031 return;
5032 }
5033 }
5034
5035 if (debug_threads)
5036 debug_printf ("Proceeding, no step-over needed\n");
5037
5038 for_each_thread ([this] (thread_info *thread)
5039 {
5040 proceed_one_lwp (thread, NULL);
5041 });
5042 }
5043
5044 void
5045 linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
5046 {
5047 if (debug_threads)
5048 {
5049 debug_enter ();
5050 if (except)
5051 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5052 lwpid_of (get_lwp_thread (except)));
5053 else
5054 debug_printf ("unstopping all lwps\n");
5055 }
5056
5057 if (unsuspend)
5058 for_each_thread ([&] (thread_info *thread)
5059 {
5060 unsuspend_and_proceed_one_lwp (thread, except);
5061 });
5062 else
5063 for_each_thread ([&] (thread_info *thread)
5064 {
5065 proceed_one_lwp (thread, except);
5066 });
5067
5068 if (debug_threads)
5069 {
5070 debug_printf ("unstop_all_lwps done\n");
5071 debug_exit ();
5072 }
5073 }
5074
5075
5076 #ifdef HAVE_LINUX_REGSETS
5077
5078 #define use_linux_regsets 1
5079
5080 /* Returns true if REGSET has been disabled. */
5081
5082 static int
5083 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5084 {
5085 return (info->disabled_regsets != NULL
5086 && info->disabled_regsets[regset - info->regsets]);
5087 }
5088
5089 /* Disable REGSET. */
5090
5091 static void
5092 disable_regset (struct regsets_info *info, struct regset_info *regset)
5093 {
5094 int dr_offset;
5095
5096 dr_offset = regset - info->regsets;
5097 if (info->disabled_regsets == NULL)
5098 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5099 info->disabled_regsets[dr_offset] = 1;
5100 }
5101
5102 static int
5103 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5104 struct regcache *regcache)
5105 {
5106 struct regset_info *regset;
5107 int saw_general_regs = 0;
5108 int pid;
5109 struct iovec iov;
5110
5111 pid = lwpid_of (current_thread);
5112 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5113 {
5114 void *buf, *data;
5115 int nt_type, res;
5116
5117 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5118 continue;
5119
5120 buf = xmalloc (regset->size);
5121
5122 nt_type = regset->nt_type;
5123 if (nt_type)
5124 {
5125 iov.iov_base = buf;
5126 iov.iov_len = regset->size;
5127 data = (void *) &iov;
5128 }
5129 else
5130 data = buf;
5131
5132 #ifndef __sparc__
5133 res = ptrace (regset->get_request, pid,
5134 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5135 #else
5136 res = ptrace (regset->get_request, pid, data, nt_type);
5137 #endif
5138 if (res < 0)
5139 {
5140 if (errno == EIO
5141 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5142 {
5143 /* If we get EIO on a regset, or an EINVAL and the regset is
5144 optional, do not try it again for this process mode. */
5145 disable_regset (regsets_info, regset);
5146 }
5147 else if (errno == ENODATA)
5148 {
5149 /* ENODATA may be returned if the regset is currently
5150 not "active". This can happen in normal operation,
5151 so suppress the warning in this case. */
5152 }
5153 else if (errno == ESRCH)
5154 {
5155 /* At this point, ESRCH should mean the process is
5156 already gone, in which case we simply ignore attempts
5157 to read its registers. */
5158 }
5159 else
5160 {
5161 char s[256];
5162 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5163 pid);
5164 perror (s);
5165 }
5166 }
5167 else
5168 {
5169 if (regset->type == GENERAL_REGS)
5170 saw_general_regs = 1;
5171 regset->store_function (regcache, buf);
5172 }
5173 free (buf);
5174 }
5175 if (saw_general_regs)
5176 return 0;
5177 else
5178 return 1;
5179 }
5180
5181 static int
5182 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5183 struct regcache *regcache)
5184 {
5185 struct regset_info *regset;
5186 int saw_general_regs = 0;
5187 int pid;
5188 struct iovec iov;
5189
5190 pid = lwpid_of (current_thread);
5191 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5192 {
5193 void *buf, *data;
5194 int nt_type, res;
5195
5196 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5197 || regset->fill_function == NULL)
5198 continue;
5199
5200 buf = xmalloc (regset->size);
5201
5202 /* First fill the buffer with the current register set contents,
5203 in case there are any items in the kernel's regset that are
5204 not in gdbserver's regcache. */
5205
5206 nt_type = regset->nt_type;
5207 if (nt_type)
5208 {
5209 iov.iov_base = buf;
5210 iov.iov_len = regset->size;
5211 data = (void *) &iov;
5212 }
5213 else
5214 data = buf;
5215
5216 #ifndef __sparc__
5217 res = ptrace (regset->get_request, pid,
5218 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5219 #else
5220 res = ptrace (regset->get_request, pid, data, nt_type);
5221 #endif
5222
5223 if (res == 0)
5224 {
5225 /* Then overlay our cached registers on that. */
5226 regset->fill_function (regcache, buf);
5227
5228 /* Only now do we write the register set. */
5229 #ifndef __sparc__
5230 res = ptrace (regset->set_request, pid,
5231 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5232 #else
5233 res = ptrace (regset->set_request, pid, data, nt_type);
5234 #endif
5235 }
5236
5237 if (res < 0)
5238 {
5239 if (errno == EIO
5240 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5241 {
5242 /* If we get EIO on a regset, or an EINVAL and the regset is
5243 optional, do not try it again for this process mode. */
5244 disable_regset (regsets_info, regset);
5245 }
5246 else if (errno == ESRCH)
5247 {
5248 /* At this point, ESRCH should mean the process is
5249 already gone, in which case we simply ignore attempts
5250 to change its registers. See also the related
5251 comment in resume_one_lwp. */
5252 free (buf);
5253 return 0;
5254 }
5255 else
5256 {
5257 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5258 }
5259 }
5260 else if (regset->type == GENERAL_REGS)
5261 saw_general_regs = 1;
5262 free (buf);
5263 }
5264 if (saw_general_regs)
5265 return 0;
5266 else
5267 return 1;
5268 }
5269
5270 #else /* !HAVE_LINUX_REGSETS */
5271
5272 #define use_linux_regsets 0
5273 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5274 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5275
5276 #endif
5277
5278 /* Return 1 if register REGNO is supported by one of the regset ptrace
5279 calls or 0 if it has to be transferred individually. */
5280
5281 static int
5282 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5283 {
5284 unsigned char mask = 1 << (regno % 8);
5285 size_t index = regno / 8;
5286
5287 return (use_linux_regsets
5288 && (regs_info->regset_bitmap == NULL
5289 || (regs_info->regset_bitmap[index] & mask) != 0));
5290 }
5291
5292 #ifdef HAVE_LINUX_USRREGS
5293
5294 static int
5295 register_addr (const struct usrregs_info *usrregs, int regnum)
5296 {
5297 int addr;
5298
5299 if (regnum < 0 || regnum >= usrregs->num_regs)
5300 error ("Invalid register number %d.", regnum);
5301
5302 addr = usrregs->regmap[regnum];
5303
5304 return addr;
5305 }
5306
5307
5308 void
5309 linux_process_target::fetch_register (const usrregs_info *usrregs,
5310 regcache *regcache, int regno)
5311 {
5312 CORE_ADDR regaddr;
5313 int i, size;
5314 char *buf;
5315 int pid;
5316
5317 if (regno >= usrregs->num_regs)
5318 return;
5319 if (low_cannot_fetch_register (regno))
5320 return;
5321
5322 regaddr = register_addr (usrregs, regno);
5323 if (regaddr == -1)
5324 return;
5325
5326 size = ((register_size (regcache->tdesc, regno)
5327 + sizeof (PTRACE_XFER_TYPE) - 1)
5328 & -sizeof (PTRACE_XFER_TYPE));
5329 buf = (char *) alloca (size);
5330
5331 pid = lwpid_of (current_thread);
5332 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5333 {
5334 errno = 0;
5335 *(PTRACE_XFER_TYPE *) (buf + i) =
5336 ptrace (PTRACE_PEEKUSER, pid,
5337 /* Coerce to a uintptr_t first to avoid potential gcc warning
5338 of coercing an 8 byte integer to a 4 byte pointer. */
5339 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5340 regaddr += sizeof (PTRACE_XFER_TYPE);
5341 if (errno != 0)
5342 {
5343 /* Mark register REGNO unavailable. */
5344 supply_register (regcache, regno, NULL);
5345 return;
5346 }
5347 }
5348
5349 low_supply_ptrace_register (regcache, regno, buf);
5350 }
5351
5352 void
5353 linux_process_target::store_register (const usrregs_info *usrregs,
5354 regcache *regcache, int regno)
5355 {
5356 CORE_ADDR regaddr;
5357 int i, size;
5358 char *buf;
5359 int pid;
5360
5361 if (regno >= usrregs->num_regs)
5362 return;
5363 if (low_cannot_store_register (regno))
5364 return;
5365
5366 regaddr = register_addr (usrregs, regno);
5367 if (regaddr == -1)
5368 return;
5369
5370 size = ((register_size (regcache->tdesc, regno)
5371 + sizeof (PTRACE_XFER_TYPE) - 1)
5372 & -sizeof (PTRACE_XFER_TYPE));
5373 buf = (char *) alloca (size);
5374 memset (buf, 0, size);
5375
5376 low_collect_ptrace_register (regcache, regno, buf);
5377
5378 pid = lwpid_of (current_thread);
5379 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5380 {
5381 errno = 0;
5382 ptrace (PTRACE_POKEUSER, pid,
5383 /* Coerce to a uintptr_t first to avoid potential gcc warning
5384 about coercing an 8 byte integer to a 4 byte pointer. */
5385 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5386 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5387 if (errno != 0)
5388 {
5389 /* At this point, ESRCH should mean the process is
5390 already gone, in which case we simply ignore attempts
5391 to change its registers. See also the related
5392 comment in resume_one_lwp. */
5393 if (errno == ESRCH)
5394 return;
5395
5396
5397 if (!low_cannot_store_register (regno))
5398 error ("writing register %d: %s", regno, safe_strerror (errno));
5399 }
5400 regaddr += sizeof (PTRACE_XFER_TYPE);
5401 }
5402 }
5403 #endif /* HAVE_LINUX_USRREGS */
5404
5405 void
5406 linux_process_target::low_collect_ptrace_register (regcache *regcache,
5407 int regno, char *buf)
5408 {
5409 collect_register (regcache, regno, buf);
5410 }
5411
5412 void
5413 linux_process_target::low_supply_ptrace_register (regcache *regcache,
5414 int regno, const char *buf)
5415 {
5416 supply_register (regcache, regno, buf);
5417 }
5418
5419 void
5420 linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5421 regcache *regcache,
5422 int regno, int all)
5423 {
5424 #ifdef HAVE_LINUX_USRREGS
5425 struct usrregs_info *usr = regs_info->usrregs;
5426
5427 if (regno == -1)
5428 {
5429 for (regno = 0; regno < usr->num_regs; regno++)
5430 if (all || !linux_register_in_regsets (regs_info, regno))
5431 fetch_register (usr, regcache, regno);
5432 }
5433 else
5434 fetch_register (usr, regcache, regno);
5435 #endif
5436 }
5437
5438 void
5439 linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5440 regcache *regcache,
5441 int regno, int all)
5442 {
5443 #ifdef HAVE_LINUX_USRREGS
5444 struct usrregs_info *usr = regs_info->usrregs;
5445
5446 if (regno == -1)
5447 {
5448 for (regno = 0; regno < usr->num_regs; regno++)
5449 if (all || !linux_register_in_regsets (regs_info, regno))
5450 store_register (usr, regcache, regno);
5451 }
5452 else
5453 store_register (usr, regcache, regno);
5454 #endif
5455 }
5456
5457 void
5458 linux_process_target::fetch_registers (regcache *regcache, int regno)
5459 {
5460 int use_regsets;
5461 int all = 0;
5462 const regs_info *regs_info = get_regs_info ();
5463
5464 if (regno == -1)
5465 {
5466 if (regs_info->usrregs != NULL)
5467 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5468 low_fetch_register (regcache, regno);
5469
5470 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5471 if (regs_info->usrregs != NULL)
5472 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5473 }
5474 else
5475 {
5476 if (low_fetch_register (regcache, regno))
5477 return;
5478
5479 use_regsets = linux_register_in_regsets (regs_info, regno);
5480 if (use_regsets)
5481 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5482 regcache);
5483 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5484 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5485 }
5486 }
5487
5488 void
5489 linux_process_target::store_registers (regcache *regcache, int regno)
5490 {
5491 int use_regsets;
5492 int all = 0;
5493 const regs_info *regs_info = get_regs_info ();
5494
5495 if (regno == -1)
5496 {
5497 all = regsets_store_inferior_registers (regs_info->regsets_info,
5498 regcache);
5499 if (regs_info->usrregs != NULL)
5500 usr_store_inferior_registers (regs_info, regcache, regno, all);
5501 }
5502 else
5503 {
5504 use_regsets = linux_register_in_regsets (regs_info, regno);
5505 if (use_regsets)
5506 all = regsets_store_inferior_registers (regs_info->regsets_info,
5507 regcache);
5508 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5509 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5510 }
5511 }
5512
5513 bool
5514 linux_process_target::low_fetch_register (regcache *regcache, int regno)
5515 {
5516 return false;
5517 }
5518
5519 /* A wrapper for the read_memory target op. */
5520
5521 static int
5522 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5523 {
5524 return the_target->read_memory (memaddr, myaddr, len);
5525 }
5526
5527 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5528 to debugger memory starting at MYADDR. */
5529
5530 int
5531 linux_process_target::read_memory (CORE_ADDR memaddr,
5532 unsigned char *myaddr, int len)
5533 {
5534 int pid = lwpid_of (current_thread);
5535 PTRACE_XFER_TYPE *buffer;
5536 CORE_ADDR addr;
5537 int count;
5538 char filename[64];
5539 int i;
5540 int ret;
5541 int fd;
5542
5543 /* Try using /proc. Don't bother for one word. */
5544 if (len >= 3 * sizeof (long))
5545 {
5546 int bytes;
5547
5548 /* We could keep this file open and cache it - possibly one per
5549 thread. That requires some juggling, but is even faster. */
5550 sprintf (filename, "/proc/%d/mem", pid);
5551 fd = open (filename, O_RDONLY | O_LARGEFILE);
5552 if (fd == -1)
5553 goto no_proc;
5554
5555 /* If pread64 is available, use it. It's faster if the kernel
5556 supports it (only one syscall), and it's 64-bit safe even on
5557 32-bit platforms (for instance, SPARC debugging a SPARC64
5558 application). */
5559 #ifdef HAVE_PREAD64
5560 bytes = pread64 (fd, myaddr, len, memaddr);
5561 #else
5562 bytes = -1;
5563 if (lseek (fd, memaddr, SEEK_SET) != -1)
5564 bytes = read (fd, myaddr, len);
5565 #endif
5566
5567 close (fd);
5568 if (bytes == len)
5569 return 0;
5570
5571 /* Some data was read, we'll try to get the rest with ptrace. */
5572 if (bytes > 0)
5573 {
5574 memaddr += bytes;
5575 myaddr += bytes;
5576 len -= bytes;
5577 }
5578 }
5579
5580 no_proc:
5581 /* Round starting address down to longword boundary. */
5582 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5583 /* Round ending address up; get number of longwords that makes. */
5584 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5585 / sizeof (PTRACE_XFER_TYPE));
5586 /* Allocate buffer of that many longwords. */
5587 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5588
5589 /* Read all the longwords */
5590 errno = 0;
5591 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5592 {
5593 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5594 about coercing an 8 byte integer to a 4 byte pointer. */
5595 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5596 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5597 (PTRACE_TYPE_ARG4) 0);
5598 if (errno)
5599 break;
5600 }
5601 ret = errno;
5602
5603 /* Copy appropriate bytes out of the buffer. */
5604 if (i > 0)
5605 {
5606 i *= sizeof (PTRACE_XFER_TYPE);
5607 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5608 memcpy (myaddr,
5609 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5610 i < len ? i : len);
5611 }
5612
5613 return ret;
5614 }
5615
5616 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5617 memory at MEMADDR. On failure (cannot write to the inferior)
5618 returns the value of errno. Always succeeds if LEN is zero. */
5619
5620 int
5621 linux_process_target::write_memory (CORE_ADDR memaddr,
5622 const unsigned char *myaddr, int len)
5623 {
5624 int i;
5625 /* Round starting address down to longword boundary. */
5626 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5627 /* Round ending address up; get number of longwords that makes. */
5628 int count
5629 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5630 / sizeof (PTRACE_XFER_TYPE);
5631
5632 /* Allocate buffer of that many longwords. */
5633 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5634
5635 int pid = lwpid_of (current_thread);
5636
5637 if (len == 0)
5638 {
5639 /* Zero length write always succeeds. */
5640 return 0;
5641 }
5642
5643 if (debug_threads)
5644 {
5645 /* Dump up to four bytes. */
5646 char str[4 * 2 + 1];
5647 char *p = str;
5648 int dump = len < 4 ? len : 4;
5649
5650 for (i = 0; i < dump; i++)
5651 {
5652 sprintf (p, "%02x", myaddr[i]);
5653 p += 2;
5654 }
5655 *p = '\0';
5656
5657 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5658 str, (long) memaddr, pid);
5659 }
5660
5661 /* Fill start and end extra bytes of buffer with existing memory data. */
5662
5663 errno = 0;
5664 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5665 about coercing an 8 byte integer to a 4 byte pointer. */
5666 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5667 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5668 (PTRACE_TYPE_ARG4) 0);
5669 if (errno)
5670 return errno;
5671
5672 if (count > 1)
5673 {
5674 errno = 0;
5675 buffer[count - 1]
5676 = ptrace (PTRACE_PEEKTEXT, pid,
5677 /* Coerce to a uintptr_t first to avoid potential gcc warning
5678 about coercing an 8 byte integer to a 4 byte pointer. */
5679 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5680 * sizeof (PTRACE_XFER_TYPE)),
5681 (PTRACE_TYPE_ARG4) 0);
5682 if (errno)
5683 return errno;
5684 }
5685
5686 /* Copy data to be written over corresponding part of buffer. */
5687
5688 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5689 myaddr, len);
5690
5691 /* Write the entire buffer. */
5692
5693 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5694 {
5695 errno = 0;
5696 ptrace (PTRACE_POKETEXT, pid,
5697 /* Coerce to a uintptr_t first to avoid potential gcc warning
5698 about coercing an 8 byte integer to a 4 byte pointer. */
5699 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5700 (PTRACE_TYPE_ARG4) buffer[i]);
5701 if (errno)
5702 return errno;
5703 }
5704
5705 return 0;
5706 }
5707
5708 void
5709 linux_process_target::look_up_symbols ()
5710 {
5711 #ifdef USE_THREAD_DB
5712 struct process_info *proc = current_process ();
5713
5714 if (proc->priv->thread_db != NULL)
5715 return;
5716
5717 thread_db_init ();
5718 #endif
5719 }
5720
5721 void
5722 linux_process_target::request_interrupt ()
5723 {
5724 /* Send a SIGINT to the process group. This acts just like the user
5725 typed a ^C on the controlling terminal. */
5726 ::kill (-signal_pid, SIGINT);
5727 }
5728
5729 bool
5730 linux_process_target::supports_read_auxv ()
5731 {
5732 return true;
5733 }
5734
5735 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5736 to debugger memory starting at MYADDR. */
5737
5738 int
5739 linux_process_target::read_auxv (CORE_ADDR offset, unsigned char *myaddr,
5740 unsigned int len)
5741 {
5742 char filename[PATH_MAX];
5743 int fd, n;
5744 int pid = lwpid_of (current_thread);
5745
5746 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5747
5748 fd = open (filename, O_RDONLY);
5749 if (fd < 0)
5750 return -1;
5751
5752 if (offset != (CORE_ADDR) 0
5753 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5754 n = -1;
5755 else
5756 n = read (fd, myaddr, len);
5757
5758 close (fd);
5759
5760 return n;
5761 }
5762
5763 int
5764 linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5765 int size, raw_breakpoint *bp)
5766 {
5767 if (type == raw_bkpt_type_sw)
5768 return insert_memory_breakpoint (bp);
5769 else
5770 return low_insert_point (type, addr, size, bp);
5771 }
5772
5773 int
5774 linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5775 int size, raw_breakpoint *bp)
5776 {
5777 /* Unsupported (see target.h). */
5778 return 1;
5779 }
5780
5781 int
5782 linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5783 int size, raw_breakpoint *bp)
5784 {
5785 if (type == raw_bkpt_type_sw)
5786 return remove_memory_breakpoint (bp);
5787 else
5788 return low_remove_point (type, addr, size, bp);
5789 }
5790
5791 int
5792 linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5793 int size, raw_breakpoint *bp)
5794 {
5795 /* Unsupported (see target.h). */
5796 return 1;
5797 }
5798
5799 /* Implement the stopped_by_sw_breakpoint target_ops
5800 method. */
5801
5802 bool
5803 linux_process_target::stopped_by_sw_breakpoint ()
5804 {
5805 struct lwp_info *lwp = get_thread_lwp (current_thread);
5806
5807 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5808 }
5809
5810 /* Implement the supports_stopped_by_sw_breakpoint target_ops
5811 method. */
5812
5813 bool
5814 linux_process_target::supports_stopped_by_sw_breakpoint ()
5815 {
5816 return USE_SIGTRAP_SIGINFO;
5817 }
5818
5819 /* Implement the stopped_by_hw_breakpoint target_ops
5820 method. */
5821
5822 bool
5823 linux_process_target::stopped_by_hw_breakpoint ()
5824 {
5825 struct lwp_info *lwp = get_thread_lwp (current_thread);
5826
5827 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5828 }
5829
5830 /* Implement the supports_stopped_by_hw_breakpoint target_ops
5831 method. */
5832
5833 bool
5834 linux_process_target::supports_stopped_by_hw_breakpoint ()
5835 {
5836 return USE_SIGTRAP_SIGINFO;
5837 }
5838
5839 /* Implement the supports_hardware_single_step target_ops method. */
5840
5841 bool
5842 linux_process_target::supports_hardware_single_step ()
5843 {
5844 return true;
5845 }
5846
5847 bool
5848 linux_process_target::stopped_by_watchpoint ()
5849 {
5850 struct lwp_info *lwp = get_thread_lwp (current_thread);
5851
5852 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5853 }
5854
5855 CORE_ADDR
5856 linux_process_target::stopped_data_address ()
5857 {
5858 struct lwp_info *lwp = get_thread_lwp (current_thread);
5859
5860 return lwp->stopped_data_address;
5861 }
5862
5863 /* This is only used for targets that define PT_TEXT_ADDR,
5864 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5865 the target has different ways of acquiring this information, like
5866 loadmaps. */
5867
5868 bool
5869 linux_process_target::supports_read_offsets ()
5870 {
5871 #ifdef SUPPORTS_READ_OFFSETS
5872 return true;
5873 #else
5874 return false;
5875 #endif
5876 }
5877
5878 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5879 to tell gdb about. */
5880
5881 int
5882 linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5883 {
5884 #ifdef SUPPORTS_READ_OFFSETS
5885 unsigned long text, text_end, data;
5886 int pid = lwpid_of (current_thread);
5887
5888 errno = 0;
5889
5890 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5891 (PTRACE_TYPE_ARG4) 0);
5892 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5893 (PTRACE_TYPE_ARG4) 0);
5894 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5895 (PTRACE_TYPE_ARG4) 0);
5896
5897 if (errno == 0)
5898 {
5899 /* Both text and data offsets produced at compile-time (and so
5900 used by gdb) are relative to the beginning of the program,
5901 with the data segment immediately following the text segment.
5902 However, the actual runtime layout in memory may put the data
5903 somewhere else, so when we send gdb a data base-address, we
5904 use the real data base address and subtract the compile-time
5905 data base-address from it (which is just the length of the
5906 text segment). BSS immediately follows data in both
5907 cases. */
5908 *text_p = text;
5909 *data_p = data - (text_end - text);
5910
5911 return 1;
5912 }
5913 return 0;
5914 #else
5915 gdb_assert_not_reached ("target op read_offsets not supported");
5916 #endif
5917 }
5918
5919 bool
5920 linux_process_target::supports_get_tls_address ()
5921 {
5922 #ifdef USE_THREAD_DB
5923 return true;
5924 #else
5925 return false;
5926 #endif
5927 }
5928
5929 int
5930 linux_process_target::get_tls_address (thread_info *thread,
5931 CORE_ADDR offset,
5932 CORE_ADDR load_module,
5933 CORE_ADDR *address)
5934 {
5935 #ifdef USE_THREAD_DB
5936 return thread_db_get_tls_address (thread, offset, load_module, address);
5937 #else
5938 return -1;
5939 #endif
5940 }
5941
5942 bool
5943 linux_process_target::supports_qxfer_osdata ()
5944 {
5945 return true;
5946 }
5947
5948 int
5949 linux_process_target::qxfer_osdata (const char *annex,
5950 unsigned char *readbuf,
5951 unsigned const char *writebuf,
5952 CORE_ADDR offset, int len)
5953 {
5954 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5955 }
5956
5957 void
5958 linux_process_target::siginfo_fixup (siginfo_t *siginfo,
5959 gdb_byte *inf_siginfo, int direction)
5960 {
5961 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
5962
5963 /* If there was no callback, or the callback didn't do anything,
5964 then just do a straight memcpy. */
5965 if (!done)
5966 {
5967 if (direction == 1)
5968 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5969 else
5970 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5971 }
5972 }
5973
5974 bool
5975 linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
5976 int direction)
5977 {
5978 return false;
5979 }
5980
5981 bool
5982 linux_process_target::supports_qxfer_siginfo ()
5983 {
5984 return true;
5985 }
5986
5987 int
5988 linux_process_target::qxfer_siginfo (const char *annex,
5989 unsigned char *readbuf,
5990 unsigned const char *writebuf,
5991 CORE_ADDR offset, int len)
5992 {
5993 int pid;
5994 siginfo_t siginfo;
5995 gdb_byte inf_siginfo[sizeof (siginfo_t)];
5996
5997 if (current_thread == NULL)
5998 return -1;
5999
6000 pid = lwpid_of (current_thread);
6001
6002 if (debug_threads)
6003 debug_printf ("%s siginfo for lwp %d.\n",
6004 readbuf != NULL ? "Reading" : "Writing",
6005 pid);
6006
6007 if (offset >= sizeof (siginfo))
6008 return -1;
6009
6010 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6011 return -1;
6012
6013 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6014 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6015 inferior with a 64-bit GDBSERVER should look the same as debugging it
6016 with a 32-bit GDBSERVER, we need to convert it. */
6017 siginfo_fixup (&siginfo, inf_siginfo, 0);
6018
6019 if (offset + len > sizeof (siginfo))
6020 len = sizeof (siginfo) - offset;
6021
6022 if (readbuf != NULL)
6023 memcpy (readbuf, inf_siginfo + offset, len);
6024 else
6025 {
6026 memcpy (inf_siginfo + offset, writebuf, len);
6027
6028 /* Convert back to ptrace layout before flushing it out. */
6029 siginfo_fixup (&siginfo, inf_siginfo, 1);
6030
6031 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6032 return -1;
6033 }
6034
6035 return len;
6036 }
6037
6038 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6039 so we notice when children change state; as the handler for the
6040 sigsuspend in my_waitpid. */
6041
6042 static void
6043 sigchld_handler (int signo)
6044 {
6045 int old_errno = errno;
6046
6047 if (debug_threads)
6048 {
6049 do
6050 {
6051 /* Use the async signal safe debug function. */
6052 if (debug_write ("sigchld_handler\n",
6053 sizeof ("sigchld_handler\n") - 1) < 0)
6054 break; /* just ignore */
6055 } while (0);
6056 }
6057
6058 if (target_is_async_p ())
6059 async_file_mark (); /* trigger a linux_wait */
6060
6061 errno = old_errno;
6062 }
6063
6064 bool
6065 linux_process_target::supports_non_stop ()
6066 {
6067 return true;
6068 }
6069
6070 bool
6071 linux_process_target::async (bool enable)
6072 {
6073 bool previous = target_is_async_p ();
6074
6075 if (debug_threads)
6076 debug_printf ("linux_async (%d), previous=%d\n",
6077 enable, previous);
6078
6079 if (previous != enable)
6080 {
6081 sigset_t mask;
6082 sigemptyset (&mask);
6083 sigaddset (&mask, SIGCHLD);
6084
6085 gdb_sigmask (SIG_BLOCK, &mask, NULL);
6086
6087 if (enable)
6088 {
6089 if (pipe (linux_event_pipe) == -1)
6090 {
6091 linux_event_pipe[0] = -1;
6092 linux_event_pipe[1] = -1;
6093 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
6094
6095 warning ("creating event pipe failed.");
6096 return previous;
6097 }
6098
6099 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6100 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6101
6102 /* Register the event loop handler. */
6103 add_file_handler (linux_event_pipe[0],
6104 handle_target_event, NULL,
6105 "linux-low");
6106
6107 /* Always trigger a linux_wait. */
6108 async_file_mark ();
6109 }
6110 else
6111 {
6112 delete_file_handler (linux_event_pipe[0]);
6113
6114 close (linux_event_pipe[0]);
6115 close (linux_event_pipe[1]);
6116 linux_event_pipe[0] = -1;
6117 linux_event_pipe[1] = -1;
6118 }
6119
6120 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
6121 }
6122
6123 return previous;
6124 }
6125
6126 int
6127 linux_process_target::start_non_stop (bool nonstop)
6128 {
6129 /* Register or unregister from event-loop accordingly. */
6130 target_async (nonstop);
6131
6132 if (target_is_async_p () != (nonstop != false))
6133 return -1;
6134
6135 return 0;
6136 }
6137
6138 bool
6139 linux_process_target::supports_multi_process ()
6140 {
6141 return true;
6142 }
6143
6144 /* Check if fork events are supported. */
6145
6146 bool
6147 linux_process_target::supports_fork_events ()
6148 {
6149 return linux_supports_tracefork ();
6150 }
6151
6152 /* Check if vfork events are supported. */
6153
6154 bool
6155 linux_process_target::supports_vfork_events ()
6156 {
6157 return linux_supports_tracefork ();
6158 }
6159
6160 /* Check if exec events are supported. */
6161
6162 bool
6163 linux_process_target::supports_exec_events ()
6164 {
6165 return linux_supports_traceexec ();
6166 }
6167
6168 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6169 ptrace flags for all inferiors. This is in case the new GDB connection
6170 doesn't support the same set of events that the previous one did. */
6171
6172 void
6173 linux_process_target::handle_new_gdb_connection ()
6174 {
6175 /* Request that all the lwps reset their ptrace options. */
6176 for_each_thread ([] (thread_info *thread)
6177 {
6178 struct lwp_info *lwp = get_thread_lwp (thread);
6179
6180 if (!lwp->stopped)
6181 {
6182 /* Stop the lwp so we can modify its ptrace options. */
6183 lwp->must_set_ptrace_flags = 1;
6184 linux_stop_lwp (lwp);
6185 }
6186 else
6187 {
6188 /* Already stopped; go ahead and set the ptrace options. */
6189 struct process_info *proc = find_process_pid (pid_of (thread));
6190 int options = linux_low_ptrace_options (proc->attached);
6191
6192 linux_enable_event_reporting (lwpid_of (thread), options);
6193 lwp->must_set_ptrace_flags = 0;
6194 }
6195 });
6196 }
6197
6198 int
6199 linux_process_target::handle_monitor_command (char *mon)
6200 {
6201 #ifdef USE_THREAD_DB
6202 return thread_db_handle_monitor_command (mon);
6203 #else
6204 return 0;
6205 #endif
6206 }
6207
6208 int
6209 linux_process_target::core_of_thread (ptid_t ptid)
6210 {
6211 return linux_common_core_of_thread (ptid);
6212 }
6213
6214 bool
6215 linux_process_target::supports_disable_randomization ()
6216 {
6217 return true;
6218 }
6219
6220 bool
6221 linux_process_target::supports_agent ()
6222 {
6223 return true;
6224 }
6225
6226 bool
6227 linux_process_target::supports_range_stepping ()
6228 {
6229 if (supports_software_single_step ())
6230 return true;
6231
6232 return low_supports_range_stepping ();
6233 }
6234
6235 bool
6236 linux_process_target::low_supports_range_stepping ()
6237 {
6238 return false;
6239 }
6240
6241 bool
6242 linux_process_target::supports_pid_to_exec_file ()
6243 {
6244 return true;
6245 }
6246
6247 const char *
6248 linux_process_target::pid_to_exec_file (int pid)
6249 {
6250 return linux_proc_pid_to_exec_file (pid);
6251 }
6252
6253 bool
6254 linux_process_target::supports_multifs ()
6255 {
6256 return true;
6257 }
6258
6259 int
6260 linux_process_target::multifs_open (int pid, const char *filename,
6261 int flags, mode_t mode)
6262 {
6263 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6264 }
6265
6266 int
6267 linux_process_target::multifs_unlink (int pid, const char *filename)
6268 {
6269 return linux_mntns_unlink (pid, filename);
6270 }
6271
6272 ssize_t
6273 linux_process_target::multifs_readlink (int pid, const char *filename,
6274 char *buf, size_t bufsiz)
6275 {
6276 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6277 }
6278
6279 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6280 struct target_loadseg
6281 {
6282 /* Core address to which the segment is mapped. */
6283 Elf32_Addr addr;
6284 /* VMA recorded in the program header. */
6285 Elf32_Addr p_vaddr;
6286 /* Size of this segment in memory. */
6287 Elf32_Word p_memsz;
6288 };
6289
6290 # if defined PT_GETDSBT
6291 struct target_loadmap
6292 {
6293 /* Protocol version number, must be zero. */
6294 Elf32_Word version;
6295 /* Pointer to the DSBT table, its size, and the DSBT index. */
6296 unsigned *dsbt_table;
6297 unsigned dsbt_size, dsbt_index;
6298 /* Number of segments in this map. */
6299 Elf32_Word nsegs;
6300 /* The actual memory map. */
6301 struct target_loadseg segs[/*nsegs*/];
6302 };
6303 # define LINUX_LOADMAP PT_GETDSBT
6304 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6305 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6306 # else
6307 struct target_loadmap
6308 {
6309 /* Protocol version number, must be zero. */
6310 Elf32_Half version;
6311 /* Number of segments in this map. */
6312 Elf32_Half nsegs;
6313 /* The actual memory map. */
6314 struct target_loadseg segs[/*nsegs*/];
6315 };
6316 # define LINUX_LOADMAP PTRACE_GETFDPIC
6317 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6318 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6319 # endif
6320
6321 bool
6322 linux_process_target::supports_read_loadmap ()
6323 {
6324 return true;
6325 }
6326
6327 int
6328 linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6329 unsigned char *myaddr, unsigned int len)
6330 {
6331 int pid = lwpid_of (current_thread);
6332 int addr = -1;
6333 struct target_loadmap *data = NULL;
6334 unsigned int actual_length, copy_length;
6335
6336 if (strcmp (annex, "exec") == 0)
6337 addr = (int) LINUX_LOADMAP_EXEC;
6338 else if (strcmp (annex, "interp") == 0)
6339 addr = (int) LINUX_LOADMAP_INTERP;
6340 else
6341 return -1;
6342
6343 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6344 return -1;
6345
6346 if (data == NULL)
6347 return -1;
6348
6349 actual_length = sizeof (struct target_loadmap)
6350 + sizeof (struct target_loadseg) * data->nsegs;
6351
6352 if (offset < 0 || offset > actual_length)
6353 return -1;
6354
6355 copy_length = actual_length - offset < len ? actual_length - offset : len;
6356 memcpy (myaddr, (char *) data + offset, copy_length);
6357 return copy_length;
6358 }
6359 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6360
6361 bool
6362 linux_process_target::supports_catch_syscall ()
6363 {
6364 return (low_supports_catch_syscall ()
6365 && linux_supports_tracesysgood ());
6366 }
6367
6368 bool
6369 linux_process_target::low_supports_catch_syscall ()
6370 {
6371 return false;
6372 }
6373
6374 CORE_ADDR
6375 linux_process_target::read_pc (regcache *regcache)
6376 {
6377 if (!low_supports_breakpoints ())
6378 return 0;
6379
6380 return low_get_pc (regcache);
6381 }
6382
6383 void
6384 linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
6385 {
6386 gdb_assert (low_supports_breakpoints ());
6387
6388 low_set_pc (regcache, pc);
6389 }
6390
6391 bool
6392 linux_process_target::supports_thread_stopped ()
6393 {
6394 return true;
6395 }
6396
6397 bool
6398 linux_process_target::thread_stopped (thread_info *thread)
6399 {
6400 return get_thread_lwp (thread)->stopped;
6401 }
6402
6403 /* This exposes stop-all-threads functionality to other modules. */
6404
6405 void
6406 linux_process_target::pause_all (bool freeze)
6407 {
6408 stop_all_lwps (freeze, NULL);
6409 }
6410
6411 /* This exposes unstop-all-threads functionality to other gdbserver
6412 modules. */
6413
6414 void
6415 linux_process_target::unpause_all (bool unfreeze)
6416 {
6417 unstop_all_lwps (unfreeze, NULL);
6418 }
6419
6420 int
6421 linux_process_target::prepare_to_access_memory ()
6422 {
6423 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6424 running LWP. */
6425 if (non_stop)
6426 target_pause_all (true);
6427 return 0;
6428 }
6429
6430 void
6431 linux_process_target::done_accessing_memory ()
6432 {
6433 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6434 running LWP. */
6435 if (non_stop)
6436 target_unpause_all (true);
6437 }
6438
6439 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6440
6441 static int
6442 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6443 CORE_ADDR *phdr_memaddr, int *num_phdr)
6444 {
6445 char filename[PATH_MAX];
6446 int fd;
6447 const int auxv_size = is_elf64
6448 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6449 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6450
6451 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6452
6453 fd = open (filename, O_RDONLY);
6454 if (fd < 0)
6455 return 1;
6456
6457 *phdr_memaddr = 0;
6458 *num_phdr = 0;
6459 while (read (fd, buf, auxv_size) == auxv_size
6460 && (*phdr_memaddr == 0 || *num_phdr == 0))
6461 {
6462 if (is_elf64)
6463 {
6464 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6465
6466 switch (aux->a_type)
6467 {
6468 case AT_PHDR:
6469 *phdr_memaddr = aux->a_un.a_val;
6470 break;
6471 case AT_PHNUM:
6472 *num_phdr = aux->a_un.a_val;
6473 break;
6474 }
6475 }
6476 else
6477 {
6478 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6479
6480 switch (aux->a_type)
6481 {
6482 case AT_PHDR:
6483 *phdr_memaddr = aux->a_un.a_val;
6484 break;
6485 case AT_PHNUM:
6486 *num_phdr = aux->a_un.a_val;
6487 break;
6488 }
6489 }
6490 }
6491
6492 close (fd);
6493
6494 if (*phdr_memaddr == 0 || *num_phdr == 0)
6495 {
6496 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6497 "phdr_memaddr = %ld, phdr_num = %d",
6498 (long) *phdr_memaddr, *num_phdr);
6499 return 2;
6500 }
6501
6502 return 0;
6503 }
6504
6505 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6506
6507 static CORE_ADDR
6508 get_dynamic (const int pid, const int is_elf64)
6509 {
6510 CORE_ADDR phdr_memaddr, relocation;
6511 int num_phdr, i;
6512 unsigned char *phdr_buf;
6513 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6514
6515 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6516 return 0;
6517
6518 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6519 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6520
6521 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6522 return 0;
6523
6524 /* Compute relocation: it is expected to be 0 for "regular" executables,
6525 non-zero for PIE ones. */
6526 relocation = -1;
6527 for (i = 0; relocation == -1 && i < num_phdr; i++)
6528 if (is_elf64)
6529 {
6530 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6531
6532 if (p->p_type == PT_PHDR)
6533 relocation = phdr_memaddr - p->p_vaddr;
6534 }
6535 else
6536 {
6537 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6538
6539 if (p->p_type == PT_PHDR)
6540 relocation = phdr_memaddr - p->p_vaddr;
6541 }
6542
6543 if (relocation == -1)
6544 {
6545 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6546 any real world executables, including PIE executables, have always
6547 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6548 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6549 or present DT_DEBUG anyway (fpc binaries are statically linked).
6550
6551 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6552
6553 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6554
6555 return 0;
6556 }
6557
6558 for (i = 0; i < num_phdr; i++)
6559 {
6560 if (is_elf64)
6561 {
6562 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6563
6564 if (p->p_type == PT_DYNAMIC)
6565 return p->p_vaddr + relocation;
6566 }
6567 else
6568 {
6569 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6570
6571 if (p->p_type == PT_DYNAMIC)
6572 return p->p_vaddr + relocation;
6573 }
6574 }
6575
6576 return 0;
6577 }
6578
6579 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6580 can be 0 if the inferior does not yet have the library list initialized.
6581 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6582 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6583
6584 static CORE_ADDR
6585 get_r_debug (const int pid, const int is_elf64)
6586 {
6587 CORE_ADDR dynamic_memaddr;
6588 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6589 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6590 CORE_ADDR map = -1;
6591
6592 dynamic_memaddr = get_dynamic (pid, is_elf64);
6593 if (dynamic_memaddr == 0)
6594 return map;
6595
6596 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6597 {
6598 if (is_elf64)
6599 {
6600 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6601 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6602 union
6603 {
6604 Elf64_Xword map;
6605 unsigned char buf[sizeof (Elf64_Xword)];
6606 }
6607 rld_map;
6608 #endif
6609 #ifdef DT_MIPS_RLD_MAP
6610 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6611 {
6612 if (linux_read_memory (dyn->d_un.d_val,
6613 rld_map.buf, sizeof (rld_map.buf)) == 0)
6614 return rld_map.map;
6615 else
6616 break;
6617 }
6618 #endif /* DT_MIPS_RLD_MAP */
6619 #ifdef DT_MIPS_RLD_MAP_REL
6620 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6621 {
6622 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6623 rld_map.buf, sizeof (rld_map.buf)) == 0)
6624 return rld_map.map;
6625 else
6626 break;
6627 }
6628 #endif /* DT_MIPS_RLD_MAP_REL */
6629
6630 if (dyn->d_tag == DT_DEBUG && map == -1)
6631 map = dyn->d_un.d_val;
6632
6633 if (dyn->d_tag == DT_NULL)
6634 break;
6635 }
6636 else
6637 {
6638 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6639 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6640 union
6641 {
6642 Elf32_Word map;
6643 unsigned char buf[sizeof (Elf32_Word)];
6644 }
6645 rld_map;
6646 #endif
6647 #ifdef DT_MIPS_RLD_MAP
6648 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6649 {
6650 if (linux_read_memory (dyn->d_un.d_val,
6651 rld_map.buf, sizeof (rld_map.buf)) == 0)
6652 return rld_map.map;
6653 else
6654 break;
6655 }
6656 #endif /* DT_MIPS_RLD_MAP */
6657 #ifdef DT_MIPS_RLD_MAP_REL
6658 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6659 {
6660 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6661 rld_map.buf, sizeof (rld_map.buf)) == 0)
6662 return rld_map.map;
6663 else
6664 break;
6665 }
6666 #endif /* DT_MIPS_RLD_MAP_REL */
6667
6668 if (dyn->d_tag == DT_DEBUG && map == -1)
6669 map = dyn->d_un.d_val;
6670
6671 if (dyn->d_tag == DT_NULL)
6672 break;
6673 }
6674
6675 dynamic_memaddr += dyn_size;
6676 }
6677
6678 return map;
6679 }
6680
6681 /* Read one pointer from MEMADDR in the inferior. */
6682
6683 static int
6684 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6685 {
6686 int ret;
6687
6688 /* Go through a union so this works on either big or little endian
6689 hosts, when the inferior's pointer size is smaller than the size
6690 of CORE_ADDR. It is assumed the inferior's endianness is the
6691 same of the superior's. */
6692 union
6693 {
6694 CORE_ADDR core_addr;
6695 unsigned int ui;
6696 unsigned char uc;
6697 } addr;
6698
6699 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6700 if (ret == 0)
6701 {
6702 if (ptr_size == sizeof (CORE_ADDR))
6703 *ptr = addr.core_addr;
6704 else if (ptr_size == sizeof (unsigned int))
6705 *ptr = addr.ui;
6706 else
6707 gdb_assert_not_reached ("unhandled pointer size");
6708 }
6709 return ret;
6710 }
6711
6712 bool
6713 linux_process_target::supports_qxfer_libraries_svr4 ()
6714 {
6715 return true;
6716 }
6717
6718 struct link_map_offsets
6719 {
6720 /* Offset and size of r_debug.r_version. */
6721 int r_version_offset;
6722
6723 /* Offset and size of r_debug.r_map. */
6724 int r_map_offset;
6725
6726 /* Offset to l_addr field in struct link_map. */
6727 int l_addr_offset;
6728
6729 /* Offset to l_name field in struct link_map. */
6730 int l_name_offset;
6731
6732 /* Offset to l_ld field in struct link_map. */
6733 int l_ld_offset;
6734
6735 /* Offset to l_next field in struct link_map. */
6736 int l_next_offset;
6737
6738 /* Offset to l_prev field in struct link_map. */
6739 int l_prev_offset;
6740 };
6741
6742 /* Construct qXfer:libraries-svr4:read reply. */
6743
6744 int
6745 linux_process_target::qxfer_libraries_svr4 (const char *annex,
6746 unsigned char *readbuf,
6747 unsigned const char *writebuf,
6748 CORE_ADDR offset, int len)
6749 {
6750 struct process_info_private *const priv = current_process ()->priv;
6751 char filename[PATH_MAX];
6752 int pid, is_elf64;
6753
6754 static const struct link_map_offsets lmo_32bit_offsets =
6755 {
6756 0, /* r_version offset. */
6757 4, /* r_debug.r_map offset. */
6758 0, /* l_addr offset in link_map. */
6759 4, /* l_name offset in link_map. */
6760 8, /* l_ld offset in link_map. */
6761 12, /* l_next offset in link_map. */
6762 16 /* l_prev offset in link_map. */
6763 };
6764
6765 static const struct link_map_offsets lmo_64bit_offsets =
6766 {
6767 0, /* r_version offset. */
6768 8, /* r_debug.r_map offset. */
6769 0, /* l_addr offset in link_map. */
6770 8, /* l_name offset in link_map. */
6771 16, /* l_ld offset in link_map. */
6772 24, /* l_next offset in link_map. */
6773 32 /* l_prev offset in link_map. */
6774 };
6775 const struct link_map_offsets *lmo;
6776 unsigned int machine;
6777 int ptr_size;
6778 CORE_ADDR lm_addr = 0, lm_prev = 0;
6779 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6780 int header_done = 0;
6781
6782 if (writebuf != NULL)
6783 return -2;
6784 if (readbuf == NULL)
6785 return -1;
6786
6787 pid = lwpid_of (current_thread);
6788 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6789 is_elf64 = elf_64_file_p (filename, &machine);
6790 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6791 ptr_size = is_elf64 ? 8 : 4;
6792
6793 while (annex[0] != '\0')
6794 {
6795 const char *sep;
6796 CORE_ADDR *addrp;
6797 int name_len;
6798
6799 sep = strchr (annex, '=');
6800 if (sep == NULL)
6801 break;
6802
6803 name_len = sep - annex;
6804 if (name_len == 5 && startswith (annex, "start"))
6805 addrp = &lm_addr;
6806 else if (name_len == 4 && startswith (annex, "prev"))
6807 addrp = &lm_prev;
6808 else
6809 {
6810 annex = strchr (sep, ';');
6811 if (annex == NULL)
6812 break;
6813 annex++;
6814 continue;
6815 }
6816
6817 annex = decode_address_to_semicolon (addrp, sep + 1);
6818 }
6819
6820 if (lm_addr == 0)
6821 {
6822 int r_version = 0;
6823
6824 if (priv->r_debug == 0)
6825 priv->r_debug = get_r_debug (pid, is_elf64);
6826
6827 /* We failed to find DT_DEBUG. Such situation will not change
6828 for this inferior - do not retry it. Report it to GDB as
6829 E01, see for the reasons at the GDB solib-svr4.c side. */
6830 if (priv->r_debug == (CORE_ADDR) -1)
6831 return -1;
6832
6833 if (priv->r_debug != 0)
6834 {
6835 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6836 (unsigned char *) &r_version,
6837 sizeof (r_version)) != 0
6838 || r_version < 1)
6839 {
6840 warning ("unexpected r_debug version %d", r_version);
6841 }
6842 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6843 &lm_addr, ptr_size) != 0)
6844 {
6845 warning ("unable to read r_map from 0x%lx",
6846 (long) priv->r_debug + lmo->r_map_offset);
6847 }
6848 }
6849 }
6850
6851 std::string document = "<library-list-svr4 version=\"1.0\"";
6852
6853 while (lm_addr
6854 && read_one_ptr (lm_addr + lmo->l_name_offset,
6855 &l_name, ptr_size) == 0
6856 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6857 &l_addr, ptr_size) == 0
6858 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6859 &l_ld, ptr_size) == 0
6860 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6861 &l_prev, ptr_size) == 0
6862 && read_one_ptr (lm_addr + lmo->l_next_offset,
6863 &l_next, ptr_size) == 0)
6864 {
6865 unsigned char libname[PATH_MAX];
6866
6867 if (lm_prev != l_prev)
6868 {
6869 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6870 (long) lm_prev, (long) l_prev);
6871 break;
6872 }
6873
6874 /* Ignore the first entry even if it has valid name as the first entry
6875 corresponds to the main executable. The first entry should not be
6876 skipped if the dynamic loader was loaded late by a static executable
6877 (see solib-svr4.c parameter ignore_first). But in such case the main
6878 executable does not have PT_DYNAMIC present and this function already
6879 exited above due to failed get_r_debug. */
6880 if (lm_prev == 0)
6881 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6882 else
6883 {
6884 /* Not checking for error because reading may stop before
6885 we've got PATH_MAX worth of characters. */
6886 libname[0] = '\0';
6887 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6888 libname[sizeof (libname) - 1] = '\0';
6889 if (libname[0] != '\0')
6890 {
6891 if (!header_done)
6892 {
6893 /* Terminate `<library-list-svr4'. */
6894 document += '>';
6895 header_done = 1;
6896 }
6897
6898 string_appendf (document, "<library name=\"");
6899 xml_escape_text_append (&document, (char *) libname);
6900 string_appendf (document, "\" lm=\"0x%lx\" "
6901 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6902 (unsigned long) lm_addr, (unsigned long) l_addr,
6903 (unsigned long) l_ld);
6904 }
6905 }
6906
6907 lm_prev = lm_addr;
6908 lm_addr = l_next;
6909 }
6910
6911 if (!header_done)
6912 {
6913 /* Empty list; terminate `<library-list-svr4'. */
6914 document += "/>";
6915 }
6916 else
6917 document += "</library-list-svr4>";
6918
6919 int document_len = document.length ();
6920 if (offset < document_len)
6921 document_len -= offset;
6922 else
6923 document_len = 0;
6924 if (len > document_len)
6925 len = document_len;
6926
6927 memcpy (readbuf, document.data () + offset, len);
6928
6929 return len;
6930 }
6931
6932 #ifdef HAVE_LINUX_BTRACE
6933
6934 btrace_target_info *
6935 linux_process_target::enable_btrace (ptid_t ptid,
6936 const btrace_config *conf)
6937 {
6938 return linux_enable_btrace (ptid, conf);
6939 }
6940
6941 /* See to_disable_btrace target method. */
6942
6943 int
6944 linux_process_target::disable_btrace (btrace_target_info *tinfo)
6945 {
6946 enum btrace_error err;
6947
6948 err = linux_disable_btrace (tinfo);
6949 return (err == BTRACE_ERR_NONE ? 0 : -1);
6950 }
6951
6952 /* Encode an Intel Processor Trace configuration. */
6953
6954 static void
6955 linux_low_encode_pt_config (struct buffer *buffer,
6956 const struct btrace_data_pt_config *config)
6957 {
6958 buffer_grow_str (buffer, "<pt-config>\n");
6959
6960 switch (config->cpu.vendor)
6961 {
6962 case CV_INTEL:
6963 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6964 "model=\"%u\" stepping=\"%u\"/>\n",
6965 config->cpu.family, config->cpu.model,
6966 config->cpu.stepping);
6967 break;
6968
6969 default:
6970 break;
6971 }
6972
6973 buffer_grow_str (buffer, "</pt-config>\n");
6974 }
6975
6976 /* Encode a raw buffer. */
6977
6978 static void
6979 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
6980 unsigned int size)
6981 {
6982 if (size == 0)
6983 return;
6984
6985 /* We use hex encoding - see gdbsupport/rsp-low.h. */
6986 buffer_grow_str (buffer, "<raw>\n");
6987
6988 while (size-- > 0)
6989 {
6990 char elem[2];
6991
6992 elem[0] = tohex ((*data >> 4) & 0xf);
6993 elem[1] = tohex (*data++ & 0xf);
6994
6995 buffer_grow (buffer, elem, 2);
6996 }
6997
6998 buffer_grow_str (buffer, "</raw>\n");
6999 }
7000
7001 /* See to_read_btrace target method. */
7002
7003 int
7004 linux_process_target::read_btrace (btrace_target_info *tinfo,
7005 buffer *buffer,
7006 enum btrace_read_type type)
7007 {
7008 struct btrace_data btrace;
7009 enum btrace_error err;
7010
7011 err = linux_read_btrace (&btrace, tinfo, type);
7012 if (err != BTRACE_ERR_NONE)
7013 {
7014 if (err == BTRACE_ERR_OVERFLOW)
7015 buffer_grow_str0 (buffer, "E.Overflow.");
7016 else
7017 buffer_grow_str0 (buffer, "E.Generic Error.");
7018
7019 return -1;
7020 }
7021
7022 switch (btrace.format)
7023 {
7024 case BTRACE_FORMAT_NONE:
7025 buffer_grow_str0 (buffer, "E.No Trace.");
7026 return -1;
7027
7028 case BTRACE_FORMAT_BTS:
7029 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7030 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7031
7032 for (const btrace_block &block : *btrace.variant.bts.blocks)
7033 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7034 paddress (block.begin), paddress (block.end));
7035
7036 buffer_grow_str0 (buffer, "</btrace>\n");
7037 break;
7038
7039 case BTRACE_FORMAT_PT:
7040 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7041 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7042 buffer_grow_str (buffer, "<pt>\n");
7043
7044 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7045
7046 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7047 btrace.variant.pt.size);
7048
7049 buffer_grow_str (buffer, "</pt>\n");
7050 buffer_grow_str0 (buffer, "</btrace>\n");
7051 break;
7052
7053 default:
7054 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7055 return -1;
7056 }
7057
7058 return 0;
7059 }
7060
7061 /* See to_btrace_conf target method. */
7062
7063 int
7064 linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
7065 buffer *buffer)
7066 {
7067 const struct btrace_config *conf;
7068
7069 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7070 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7071
7072 conf = linux_btrace_conf (tinfo);
7073 if (conf != NULL)
7074 {
7075 switch (conf->format)
7076 {
7077 case BTRACE_FORMAT_NONE:
7078 break;
7079
7080 case BTRACE_FORMAT_BTS:
7081 buffer_xml_printf (buffer, "<bts");
7082 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7083 buffer_xml_printf (buffer, " />\n");
7084 break;
7085
7086 case BTRACE_FORMAT_PT:
7087 buffer_xml_printf (buffer, "<pt");
7088 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7089 buffer_xml_printf (buffer, "/>\n");
7090 break;
7091 }
7092 }
7093
7094 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7095 return 0;
7096 }
7097 #endif /* HAVE_LINUX_BTRACE */
7098
7099 /* See nat/linux-nat.h. */
7100
7101 ptid_t
7102 current_lwp_ptid (void)
7103 {
7104 return ptid_of (current_thread);
7105 }
7106
7107 const char *
7108 linux_process_target::thread_name (ptid_t thread)
7109 {
7110 return linux_proc_tid_get_name (thread);
7111 }
7112
7113 #if USE_THREAD_DB
7114 bool
7115 linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
7116 int *handle_len)
7117 {
7118 return thread_db_thread_handle (ptid, handle, handle_len);
7119 }
7120 #endif
7121
7122 thread_info *
7123 linux_process_target::thread_pending_parent (thread_info *thread)
7124 {
7125 lwp_info *parent = get_thread_lwp (thread)->pending_parent ();
7126
7127 if (parent == nullptr)
7128 return nullptr;
7129
7130 return get_lwp_thread (parent);
7131 }
7132
7133 thread_info *
7134 linux_process_target::thread_pending_child (thread_info *thread)
7135 {
7136 lwp_info *child = get_thread_lwp (thread)->pending_child ();
7137
7138 if (child == nullptr)
7139 return nullptr;
7140
7141 return get_lwp_thread (child);
7142 }
7143
7144 /* Default implementation of linux_target_ops method "set_pc" for
7145 32-bit pc register which is literally named "pc". */
7146
7147 void
7148 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7149 {
7150 uint32_t newpc = pc;
7151
7152 supply_register_by_name (regcache, "pc", &newpc);
7153 }
7154
7155 /* Default implementation of linux_target_ops method "get_pc" for
7156 32-bit pc register which is literally named "pc". */
7157
7158 CORE_ADDR
7159 linux_get_pc_32bit (struct regcache *regcache)
7160 {
7161 uint32_t pc;
7162
7163 collect_register_by_name (regcache, "pc", &pc);
7164 if (debug_threads)
7165 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7166 return pc;
7167 }
7168
7169 /* Default implementation of linux_target_ops method "set_pc" for
7170 64-bit pc register which is literally named "pc". */
7171
7172 void
7173 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7174 {
7175 uint64_t newpc = pc;
7176
7177 supply_register_by_name (regcache, "pc", &newpc);
7178 }
7179
7180 /* Default implementation of linux_target_ops method "get_pc" for
7181 64-bit pc register which is literally named "pc". */
7182
7183 CORE_ADDR
7184 linux_get_pc_64bit (struct regcache *regcache)
7185 {
7186 uint64_t pc;
7187
7188 collect_register_by_name (regcache, "pc", &pc);
7189 if (debug_threads)
7190 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7191 return pc;
7192 }
7193
7194 /* See linux-low.h. */
7195
7196 int
7197 linux_get_auxv (int wordsize, CORE_ADDR match, CORE_ADDR *valp)
7198 {
7199 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7200 int offset = 0;
7201
7202 gdb_assert (wordsize == 4 || wordsize == 8);
7203
7204 while (the_target->read_auxv (offset, data, 2 * wordsize) == 2 * wordsize)
7205 {
7206 if (wordsize == 4)
7207 {
7208 uint32_t *data_p = (uint32_t *) data;
7209 if (data_p[0] == match)
7210 {
7211 *valp = data_p[1];
7212 return 1;
7213 }
7214 }
7215 else
7216 {
7217 uint64_t *data_p = (uint64_t *) data;
7218 if (data_p[0] == match)
7219 {
7220 *valp = data_p[1];
7221 return 1;
7222 }
7223 }
7224
7225 offset += 2 * wordsize;
7226 }
7227
7228 return 0;
7229 }
7230
7231 /* See linux-low.h. */
7232
7233 CORE_ADDR
7234 linux_get_hwcap (int wordsize)
7235 {
7236 CORE_ADDR hwcap = 0;
7237 linux_get_auxv (wordsize, AT_HWCAP, &hwcap);
7238 return hwcap;
7239 }
7240
7241 /* See linux-low.h. */
7242
7243 CORE_ADDR
7244 linux_get_hwcap2 (int wordsize)
7245 {
7246 CORE_ADDR hwcap2 = 0;
7247 linux_get_auxv (wordsize, AT_HWCAP2, &hwcap2);
7248 return hwcap2;
7249 }
7250
7251 #ifdef HAVE_LINUX_REGSETS
7252 void
7253 initialize_regsets_info (struct regsets_info *info)
7254 {
7255 for (info->num_regsets = 0;
7256 info->regsets[info->num_regsets].size >= 0;
7257 info->num_regsets++)
7258 ;
7259 }
7260 #endif
7261
7262 void
7263 initialize_low (void)
7264 {
7265 struct sigaction sigchld_action;
7266
7267 memset (&sigchld_action, 0, sizeof (sigchld_action));
7268 set_target_ops (the_linux_target);
7269
7270 linux_ptrace_init_warnings ();
7271 linux_proc_init_warnings ();
7272
7273 sigchld_action.sa_handler = sigchld_handler;
7274 sigemptyset (&sigchld_action.sa_mask);
7275 sigchld_action.sa_flags = SA_RESTART;
7276 sigaction (SIGCHLD, &sigchld_action, NULL);
7277
7278 initialize_low_arch ();
7279
7280 linux_check_ptrace_features ();
7281 }