Filter invalid encodings from Linux thread names
[binutils-gdb.git] / gdbserver / linux-low.cc
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2023 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "gdbsupport/agent.h"
23 #include "tdesc.h"
24 #include "gdbsupport/event-loop.h"
25 #include "gdbsupport/event-pipe.h"
26 #include "gdbsupport/rsp-low.h"
27 #include "gdbsupport/signals-state-save-restore.h"
28 #include "nat/linux-nat.h"
29 #include "nat/linux-waitpid.h"
30 #include "gdbsupport/gdb_wait.h"
31 #include "nat/gdb_ptrace.h"
32 #include "nat/linux-ptrace.h"
33 #include "nat/linux-procfs.h"
34 #include "nat/linux-personality.h"
35 #include <signal.h>
36 #include <sys/ioctl.h>
37 #include <fcntl.h>
38 #include <unistd.h>
39 #include <sys/syscall.h>
40 #include <sched.h>
41 #include <pwd.h>
42 #include <sys/types.h>
43 #include <dirent.h>
44 #include <sys/stat.h>
45 #include <sys/vfs.h>
46 #include <sys/uio.h>
47 #include <langinfo.h>
48 #include <iconv.h>
49 #include "gdbsupport/filestuff.h"
50 #include "gdbsupport/gdb-safe-ctype.h"
51 #include "tracepoint.h"
52 #include <inttypes.h>
53 #include "gdbsupport/common-inferior.h"
54 #include "nat/fork-inferior.h"
55 #include "gdbsupport/environ.h"
56 #include "gdbsupport/gdb-sigmask.h"
57 #include "gdbsupport/scoped_restore.h"
58 #ifndef ELFMAG0
59 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
60 then ELFMAG0 will have been defined. If it didn't get included by
61 gdb_proc_service.h then including it will likely introduce a duplicate
62 definition of elf_fpregset_t. */
63 #include <elf.h>
64 #endif
65 #include "nat/linux-namespaces.h"
66
67 #ifndef O_LARGEFILE
68 #define O_LARGEFILE 0
69 #endif
70
71 #ifndef AT_HWCAP2
72 #define AT_HWCAP2 26
73 #endif
74
75 /* Some targets did not define these ptrace constants from the start,
76 so gdbserver defines them locally here. In the future, these may
77 be removed after they are added to asm/ptrace.h. */
78 #if !(defined(PT_TEXT_ADDR) \
79 || defined(PT_DATA_ADDR) \
80 || defined(PT_TEXT_END_ADDR))
81 #if defined(__mcoldfire__)
82 /* These are still undefined in 3.10 kernels. */
83 #define PT_TEXT_ADDR 49*4
84 #define PT_DATA_ADDR 50*4
85 #define PT_TEXT_END_ADDR 51*4
86 /* These are still undefined in 3.10 kernels. */
87 #elif defined(__TMS320C6X__)
88 #define PT_TEXT_ADDR (0x10000*4)
89 #define PT_DATA_ADDR (0x10004*4)
90 #define PT_TEXT_END_ADDR (0x10008*4)
91 #endif
92 #endif
93
94 #if (defined(__UCLIBC__) \
95 && defined(HAS_NOMMU) \
96 && defined(PT_TEXT_ADDR) \
97 && defined(PT_DATA_ADDR) \
98 && defined(PT_TEXT_END_ADDR))
99 #define SUPPORTS_READ_OFFSETS
100 #endif
101
102 #ifdef HAVE_LINUX_BTRACE
103 # include "nat/linux-btrace.h"
104 # include "gdbsupport/btrace-common.h"
105 #endif
106
107 #ifndef HAVE_ELF32_AUXV_T
108 /* Copied from glibc's elf.h. */
109 typedef struct
110 {
111 uint32_t a_type; /* Entry type */
112 union
113 {
114 uint32_t a_val; /* Integer value */
115 /* We use to have pointer elements added here. We cannot do that,
116 though, since it does not work when using 32-bit definitions
117 on 64-bit platforms and vice versa. */
118 } a_un;
119 } Elf32_auxv_t;
120 #endif
121
122 #ifndef HAVE_ELF64_AUXV_T
123 /* Copied from glibc's elf.h. */
124 typedef struct
125 {
126 uint64_t a_type; /* Entry type */
127 union
128 {
129 uint64_t a_val; /* Integer value */
130 /* We use to have pointer elements added here. We cannot do that,
131 though, since it does not work when using 32-bit definitions
132 on 64-bit platforms and vice versa. */
133 } a_un;
134 } Elf64_auxv_t;
135 #endif
136
137 /* Does the current host support PTRACE_GETREGSET? */
138 int have_ptrace_getregset = -1;
139
140 /* Return TRUE if THREAD is the leader thread of the process. */
141
142 static bool
143 is_leader (thread_info *thread)
144 {
145 ptid_t ptid = ptid_of (thread);
146 return ptid.pid () == ptid.lwp ();
147 }
148
149 /* Return true if we should report thread exit events to GDB, for
150 THR. */
151
152 static bool
153 report_exit_events_for (thread_info *thr)
154 {
155 client_state &cs = get_client_state ();
156
157 return (cs.report_thread_events
158 || (thr->thread_options & GDB_THREAD_OPTION_EXIT) != 0);
159 }
160
161 /* LWP accessors. */
162
163 /* See nat/linux-nat.h. */
164
165 ptid_t
166 ptid_of_lwp (struct lwp_info *lwp)
167 {
168 return ptid_of (get_lwp_thread (lwp));
169 }
170
171 /* See nat/linux-nat.h. */
172
173 void
174 lwp_set_arch_private_info (struct lwp_info *lwp,
175 struct arch_lwp_info *info)
176 {
177 lwp->arch_private = info;
178 }
179
180 /* See nat/linux-nat.h. */
181
182 struct arch_lwp_info *
183 lwp_arch_private_info (struct lwp_info *lwp)
184 {
185 return lwp->arch_private;
186 }
187
188 /* See nat/linux-nat.h. */
189
190 int
191 lwp_is_stopped (struct lwp_info *lwp)
192 {
193 return lwp->stopped;
194 }
195
196 /* See nat/linux-nat.h. */
197
198 enum target_stop_reason
199 lwp_stop_reason (struct lwp_info *lwp)
200 {
201 return lwp->stop_reason;
202 }
203
204 /* See nat/linux-nat.h. */
205
206 int
207 lwp_is_stepping (struct lwp_info *lwp)
208 {
209 return lwp->stepping;
210 }
211
212 /* A list of all unknown processes which receive stop signals. Some
213 other process will presumably claim each of these as forked
214 children momentarily. */
215
216 struct simple_pid_list
217 {
218 /* The process ID. */
219 int pid;
220
221 /* The status as reported by waitpid. */
222 int status;
223
224 /* Next in chain. */
225 struct simple_pid_list *next;
226 };
227 static struct simple_pid_list *stopped_pids;
228
229 /* Trivial list manipulation functions to keep track of a list of new
230 stopped processes. */
231
232 static void
233 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
234 {
235 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
236
237 new_pid->pid = pid;
238 new_pid->status = status;
239 new_pid->next = *listp;
240 *listp = new_pid;
241 }
242
243 static int
244 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
245 {
246 struct simple_pid_list **p;
247
248 for (p = listp; *p != NULL; p = &(*p)->next)
249 if ((*p)->pid == pid)
250 {
251 struct simple_pid_list *next = (*p)->next;
252
253 *statusp = (*p)->status;
254 xfree (*p);
255 *p = next;
256 return 1;
257 }
258 return 0;
259 }
260
261 enum stopping_threads_kind
262 {
263 /* Not stopping threads presently. */
264 NOT_STOPPING_THREADS,
265
266 /* Stopping threads. */
267 STOPPING_THREADS,
268
269 /* Stopping and suspending threads. */
270 STOPPING_AND_SUSPENDING_THREADS
271 };
272
273 /* This is set while stop_all_lwps is in effect. */
274 static stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
275
276 /* FIXME make into a target method? */
277 int using_threads = 1;
278
279 /* True if we're presently stabilizing threads (moving them out of
280 jump pads). */
281 static int stabilizing_threads;
282
283 static void unsuspend_all_lwps (struct lwp_info *except);
284 static void mark_lwp_dead (struct lwp_info *lwp, int wstat,
285 bool thread_event);
286 static int lwp_is_marked_dead (struct lwp_info *lwp);
287 static int kill_lwp (unsigned long lwpid, int signo);
288 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
289 static int linux_low_ptrace_options (int attached);
290 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
291
292 /* When the event-loop is doing a step-over, this points at the thread
293 being stepped. */
294 static ptid_t step_over_bkpt;
295
296 bool
297 linux_process_target::low_supports_breakpoints ()
298 {
299 return false;
300 }
301
302 CORE_ADDR
303 linux_process_target::low_get_pc (regcache *regcache)
304 {
305 return 0;
306 }
307
308 void
309 linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
310 {
311 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
312 }
313
314 std::vector<CORE_ADDR>
315 linux_process_target::low_get_next_pcs (regcache *regcache)
316 {
317 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
318 "implemented");
319 }
320
321 int
322 linux_process_target::low_decr_pc_after_break ()
323 {
324 return 0;
325 }
326
327 /* True if LWP is stopped in its stepping range. */
328
329 static int
330 lwp_in_step_range (struct lwp_info *lwp)
331 {
332 CORE_ADDR pc = lwp->stop_pc;
333
334 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
335 }
336
337 /* The event pipe registered as a waitable file in the event loop. */
338 static event_pipe linux_event_pipe;
339
340 /* True if we're currently in async mode. */
341 #define target_is_async_p() (linux_event_pipe.is_open ())
342
343 static void send_sigstop (struct lwp_info *lwp);
344
345 /* Return non-zero if HEADER is a 64-bit ELF file. */
346
347 static int
348 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
349 {
350 if (header->e_ident[EI_MAG0] == ELFMAG0
351 && header->e_ident[EI_MAG1] == ELFMAG1
352 && header->e_ident[EI_MAG2] == ELFMAG2
353 && header->e_ident[EI_MAG3] == ELFMAG3)
354 {
355 *machine = header->e_machine;
356 return header->e_ident[EI_CLASS] == ELFCLASS64;
357
358 }
359 *machine = EM_NONE;
360 return -1;
361 }
362
363 /* Return non-zero if FILE is a 64-bit ELF file,
364 zero if the file is not a 64-bit ELF file,
365 and -1 if the file is not accessible or doesn't exist. */
366
367 static int
368 elf_64_file_p (const char *file, unsigned int *machine)
369 {
370 Elf64_Ehdr header;
371 int fd;
372
373 fd = open (file, O_RDONLY);
374 if (fd < 0)
375 return -1;
376
377 if (read (fd, &header, sizeof (header)) != sizeof (header))
378 {
379 close (fd);
380 return 0;
381 }
382 close (fd);
383
384 return elf_64_header_p (&header, machine);
385 }
386
387 /* Accepts an integer PID; Returns true if the executable PID is
388 running is a 64-bit ELF file.. */
389
390 int
391 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
392 {
393 char file[PATH_MAX];
394
395 sprintf (file, "/proc/%d/exe", pid);
396 return elf_64_file_p (file, machine);
397 }
398
399 void
400 linux_process_target::delete_lwp (lwp_info *lwp)
401 {
402 struct thread_info *thr = get_lwp_thread (lwp);
403
404 threads_debug_printf ("deleting %ld", lwpid_of (thr));
405
406 remove_thread (thr);
407
408 low_delete_thread (lwp->arch_private);
409
410 delete lwp;
411 }
412
413 void
414 linux_process_target::low_delete_thread (arch_lwp_info *info)
415 {
416 /* Default implementation should be overridden if architecture-specific
417 info is being used. */
418 gdb_assert (info == nullptr);
419 }
420
421 /* Open the /proc/PID/mem file for PROC. */
422
423 static void
424 open_proc_mem_file (process_info *proc)
425 {
426 gdb_assert (proc->priv->mem_fd == -1);
427
428 char filename[64];
429 xsnprintf (filename, sizeof filename, "/proc/%d/mem", proc->pid);
430
431 proc->priv->mem_fd
432 = gdb_open_cloexec (filename, O_RDWR | O_LARGEFILE, 0).release ();
433 }
434
435 process_info *
436 linux_process_target::add_linux_process_no_mem_file (int pid, int attached)
437 {
438 struct process_info *proc;
439
440 proc = add_process (pid, attached);
441 proc->priv = XCNEW (struct process_info_private);
442
443 proc->priv->arch_private = low_new_process ();
444 proc->priv->mem_fd = -1;
445
446 return proc;
447 }
448
449
450 process_info *
451 linux_process_target::add_linux_process (int pid, int attached)
452 {
453 process_info *proc = add_linux_process_no_mem_file (pid, attached);
454 open_proc_mem_file (proc);
455 return proc;
456 }
457
458 void
459 linux_process_target::remove_linux_process (process_info *proc)
460 {
461 if (proc->priv->mem_fd >= 0)
462 close (proc->priv->mem_fd);
463
464 this->low_delete_process (proc->priv->arch_private);
465
466 xfree (proc->priv);
467 proc->priv = nullptr;
468
469 remove_process (proc);
470 }
471
472 arch_process_info *
473 linux_process_target::low_new_process ()
474 {
475 return nullptr;
476 }
477
478 void
479 linux_process_target::low_delete_process (arch_process_info *info)
480 {
481 /* Default implementation must be overridden if architecture-specific
482 info exists. */
483 gdb_assert (info == nullptr);
484 }
485
486 void
487 linux_process_target::low_new_fork (process_info *parent, process_info *child)
488 {
489 /* Nop. */
490 }
491
492 void
493 linux_process_target::arch_setup_thread (thread_info *thread)
494 {
495 scoped_restore_current_thread restore_thread;
496 switch_to_thread (thread);
497
498 low_arch_setup ();
499 }
500
501 int
502 linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
503 int wstat)
504 {
505 client_state &cs = get_client_state ();
506 struct lwp_info *event_lwp = *orig_event_lwp;
507 int event = linux_ptrace_get_extended_event (wstat);
508 struct thread_info *event_thr = get_lwp_thread (event_lwp);
509
510 gdb_assert (event_lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
511
512 /* All extended events we currently use are mid-syscall. Only
513 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
514 you have to be using PTRACE_SEIZE to get that. */
515 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
516
517 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
518 || (event == PTRACE_EVENT_CLONE))
519 {
520 unsigned long new_pid;
521 int ret, status;
522
523 /* Get the pid of the new lwp. */
524 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
525 &new_pid);
526
527 /* If we haven't already seen the new PID stop, wait for it now. */
528 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
529 {
530 /* The new child has a pending SIGSTOP. We can't affect it until it
531 hits the SIGSTOP, but we're already attached. */
532
533 ret = my_waitpid (new_pid, &status, __WALL);
534
535 if (ret == -1)
536 perror_with_name ("waiting for new child");
537 else if (ret != new_pid)
538 warning ("wait returned unexpected PID %d", ret);
539 else if (!WIFSTOPPED (status))
540 warning ("wait returned unexpected status 0x%x", status);
541 }
542
543 if (debug_threads)
544 {
545 debug_printf ("HEW: Got %s event from LWP %ld, new child is %ld\n",
546 (event == PTRACE_EVENT_FORK ? "fork"
547 : event == PTRACE_EVENT_VFORK ? "vfork"
548 : event == PTRACE_EVENT_CLONE ? "clone"
549 : "???"),
550 ptid_of (event_thr).lwp (),
551 new_pid);
552 }
553
554 ptid_t child_ptid = (event != PTRACE_EVENT_CLONE
555 ? ptid_t (new_pid, new_pid)
556 : ptid_t (ptid_of (event_thr).pid (), new_pid));
557
558 lwp_info *child_lwp = add_lwp (child_ptid);
559 gdb_assert (child_lwp != NULL);
560 child_lwp->stopped = 1;
561 if (event != PTRACE_EVENT_CLONE)
562 child_lwp->must_set_ptrace_flags = 1;
563 child_lwp->status_pending_p = 0;
564
565 thread_info *child_thr = get_lwp_thread (child_lwp);
566
567 /* If we're suspending all threads, leave this one suspended
568 too. If the fork/clone parent is stepping over a breakpoint,
569 all other threads have been suspended already. Leave the
570 child suspended too. */
571 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
572 || event_lwp->bp_reinsert != 0)
573 {
574 threads_debug_printf ("leaving child suspended");
575 child_lwp->suspended = 1;
576 }
577
578 if (event_lwp->bp_reinsert != 0
579 && supports_software_single_step ()
580 && event == PTRACE_EVENT_VFORK)
581 {
582 /* If we leave single-step breakpoints there, child will
583 hit it, so uninsert single-step breakpoints from parent
584 (and child). Once vfork child is done, reinsert
585 them back to parent. */
586 uninsert_single_step_breakpoints (event_thr);
587 }
588
589 if (event != PTRACE_EVENT_CLONE)
590 {
591 /* Add the new process to the tables and clone the breakpoint
592 lists of the parent. We need to do this even if the new process
593 will be detached, since we will need the process object and the
594 breakpoints to remove any breakpoints from memory when we
595 detach, and the client side will access registers. */
596 process_info *child_proc = add_linux_process (new_pid, 0);
597 gdb_assert (child_proc != NULL);
598
599 process_info *parent_proc = get_thread_process (event_thr);
600 child_proc->attached = parent_proc->attached;
601
602 clone_all_breakpoints (child_thr, event_thr);
603
604 target_desc_up tdesc = allocate_target_description ();
605 copy_target_description (tdesc.get (), parent_proc->tdesc);
606 child_proc->tdesc = tdesc.release ();
607
608 /* Clone arch-specific process data. */
609 low_new_fork (parent_proc, child_proc);
610 }
611
612 /* Save fork/clone info in the parent thread. */
613 if (event == PTRACE_EVENT_FORK)
614 event_lwp->waitstatus.set_forked (child_ptid);
615 else if (event == PTRACE_EVENT_VFORK)
616 event_lwp->waitstatus.set_vforked (child_ptid);
617 else if (event == PTRACE_EVENT_CLONE
618 && (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) != 0)
619 event_lwp->waitstatus.set_thread_cloned (child_ptid);
620
621 if (event != PTRACE_EVENT_CLONE
622 || (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) != 0)
623 {
624 /* The status_pending field contains bits denoting the
625 extended event, so when the pending event is handled, the
626 handler will look at lwp->waitstatus. */
627 event_lwp->status_pending_p = 1;
628 event_lwp->status_pending = wstat;
629
630 /* Link the threads until the parent's event is passed on to
631 GDB. */
632 event_lwp->relative = child_lwp;
633 child_lwp->relative = event_lwp;
634 }
635
636 /* If the parent thread is doing step-over with single-step
637 breakpoints, the list of single-step breakpoints are cloned
638 from the parent's. Remove them from the child process.
639 In case of vfork, we'll reinsert them back once vforked
640 child is done. */
641 if (event_lwp->bp_reinsert != 0
642 && supports_software_single_step ())
643 {
644 /* The child process is forked and stopped, so it is safe
645 to access its memory without stopping all other threads
646 from other processes. */
647 delete_single_step_breakpoints (child_thr);
648
649 gdb_assert (has_single_step_breakpoints (event_thr));
650 gdb_assert (!has_single_step_breakpoints (child_thr));
651 }
652
653 /* Normally we will get the pending SIGSTOP. But in some cases
654 we might get another signal delivered to the group first.
655 If we do get another signal, be sure not to lose it. */
656 if (WSTOPSIG (status) != SIGSTOP)
657 {
658 child_lwp->stop_expected = 1;
659 child_lwp->status_pending_p = 1;
660 child_lwp->status_pending = status;
661 }
662 else if (event == PTRACE_EVENT_CLONE && cs.report_thread_events)
663 {
664 child_lwp->waitstatus.set_thread_created ();
665 child_lwp->status_pending_p = 1;
666 child_lwp->status_pending = status;
667 }
668
669 if (event == PTRACE_EVENT_CLONE)
670 {
671 #ifdef USE_THREAD_DB
672 thread_db_notice_clone (event_thr, child_ptid);
673 #endif
674 }
675
676 if (event == PTRACE_EVENT_CLONE
677 && (event_thr->thread_options & GDB_THREAD_OPTION_CLONE) == 0)
678 {
679 threads_debug_printf
680 ("not reporting clone event from LWP %ld, new child is %ld\n",
681 ptid_of (event_thr).lwp (),
682 new_pid);
683 return 1;
684 }
685
686 /* Leave the child stopped until GDB processes the parent
687 event. */
688 child_thr->last_resume_kind = resume_stop;
689 child_thr->last_status.set_stopped (GDB_SIGNAL_0);
690
691 /* Report the event. */
692 threads_debug_printf
693 ("reporting %s event from LWP %ld, new child is %ld\n",
694 (event == PTRACE_EVENT_FORK ? "fork"
695 : event == PTRACE_EVENT_VFORK ? "vfork"
696 : event == PTRACE_EVENT_CLONE ? "clone"
697 : "???"),
698 ptid_of (event_thr).lwp (),
699 new_pid);
700 return 0;
701 }
702 else if (event == PTRACE_EVENT_VFORK_DONE)
703 {
704 event_lwp->waitstatus.set_vfork_done ();
705
706 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
707 {
708 reinsert_single_step_breakpoints (event_thr);
709
710 gdb_assert (has_single_step_breakpoints (event_thr));
711 }
712
713 /* Report the event. */
714 return 0;
715 }
716 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
717 {
718 struct process_info *proc;
719 std::vector<int> syscalls_to_catch;
720 ptid_t event_ptid;
721 pid_t event_pid;
722
723 threads_debug_printf ("Got exec event from LWP %ld",
724 lwpid_of (event_thr));
725
726 /* Get the event ptid. */
727 event_ptid = ptid_of (event_thr);
728 event_pid = event_ptid.pid ();
729
730 /* Save the syscall list from the execing process. */
731 proc = get_thread_process (event_thr);
732 syscalls_to_catch = std::move (proc->syscalls_to_catch);
733
734 /* Delete the execing process and all its threads. */
735 mourn (proc);
736 switch_to_thread (nullptr);
737
738 /* Create a new process/lwp/thread. */
739 proc = add_linux_process (event_pid, 0);
740 event_lwp = add_lwp (event_ptid);
741 event_thr = get_lwp_thread (event_lwp);
742 gdb_assert (current_thread == event_thr);
743 arch_setup_thread (event_thr);
744
745 /* Set the event status. */
746 event_lwp->waitstatus.set_execd
747 (make_unique_xstrdup
748 (linux_proc_pid_to_exec_file (lwpid_of (event_thr))));
749
750 /* Mark the exec status as pending. */
751 event_lwp->stopped = 1;
752 event_lwp->status_pending_p = 1;
753 event_lwp->status_pending = wstat;
754 event_thr->last_resume_kind = resume_continue;
755 event_thr->last_status.set_ignore ();
756
757 /* Update syscall state in the new lwp, effectively mid-syscall too. */
758 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
759
760 /* Restore the list to catch. Don't rely on the client, which is free
761 to avoid sending a new list when the architecture doesn't change.
762 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
763 proc->syscalls_to_catch = std::move (syscalls_to_catch);
764
765 /* Report the event. */
766 *orig_event_lwp = event_lwp;
767 return 0;
768 }
769
770 internal_error (_("unknown ptrace event %d"), event);
771 }
772
773 CORE_ADDR
774 linux_process_target::get_pc (lwp_info *lwp)
775 {
776 process_info *proc = get_thread_process (get_lwp_thread (lwp));
777 gdb_assert (!proc->starting_up);
778
779 if (!low_supports_breakpoints ())
780 return 0;
781
782 scoped_restore_current_thread restore_thread;
783 switch_to_thread (get_lwp_thread (lwp));
784
785 struct regcache *regcache = get_thread_regcache (current_thread, 1);
786 CORE_ADDR pc = low_get_pc (regcache);
787
788 threads_debug_printf ("pc is 0x%lx", (long) pc);
789
790 return pc;
791 }
792
793 void
794 linux_process_target::get_syscall_trapinfo (lwp_info *lwp, int *sysno)
795 {
796 struct regcache *regcache;
797
798 scoped_restore_current_thread restore_thread;
799 switch_to_thread (get_lwp_thread (lwp));
800
801 regcache = get_thread_regcache (current_thread, 1);
802 low_get_syscall_trapinfo (regcache, sysno);
803
804 threads_debug_printf ("get_syscall_trapinfo sysno %d", *sysno);
805 }
806
807 void
808 linux_process_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
809 {
810 /* By default, report an unknown system call number. */
811 *sysno = UNKNOWN_SYSCALL;
812 }
813
814 bool
815 linux_process_target::save_stop_reason (lwp_info *lwp)
816 {
817 CORE_ADDR pc;
818 CORE_ADDR sw_breakpoint_pc;
819 #if USE_SIGTRAP_SIGINFO
820 siginfo_t siginfo;
821 #endif
822
823 if (!low_supports_breakpoints ())
824 return false;
825
826 process_info *proc = get_thread_process (get_lwp_thread (lwp));
827 if (proc->starting_up)
828 {
829 /* Claim we have the stop PC so that the caller doesn't try to
830 fetch it itself. */
831 return true;
832 }
833
834 pc = get_pc (lwp);
835 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
836
837 /* breakpoint_at reads from the current thread. */
838 scoped_restore_current_thread restore_thread;
839 switch_to_thread (get_lwp_thread (lwp));
840
841 #if USE_SIGTRAP_SIGINFO
842 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
843 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
844 {
845 if (siginfo.si_signo == SIGTRAP)
846 {
847 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
848 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
849 {
850 /* The si_code is ambiguous on this arch -- check debug
851 registers. */
852 if (!check_stopped_by_watchpoint (lwp))
853 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
854 }
855 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
856 {
857 /* If we determine the LWP stopped for a SW breakpoint,
858 trust it. Particularly don't check watchpoint
859 registers, because at least on s390, we'd find
860 stopped-by-watchpoint as long as there's a watchpoint
861 set. */
862 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
863 }
864 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
865 {
866 /* This can indicate either a hardware breakpoint or
867 hardware watchpoint. Check debug registers. */
868 if (!check_stopped_by_watchpoint (lwp))
869 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
870 }
871 else if (siginfo.si_code == TRAP_TRACE)
872 {
873 /* We may have single stepped an instruction that
874 triggered a watchpoint. In that case, on some
875 architectures (such as x86), instead of TRAP_HWBKPT,
876 si_code indicates TRAP_TRACE, and we need to check
877 the debug registers separately. */
878 if (!check_stopped_by_watchpoint (lwp))
879 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
880 }
881 }
882 }
883 #else
884 /* We may have just stepped a breakpoint instruction. E.g., in
885 non-stop mode, GDB first tells the thread A to step a range, and
886 then the user inserts a breakpoint inside the range. In that
887 case we need to report the breakpoint PC. */
888 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
889 && low_breakpoint_at (sw_breakpoint_pc))
890 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
891
892 if (hardware_breakpoint_inserted_here (pc))
893 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
894
895 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
896 check_stopped_by_watchpoint (lwp);
897 #endif
898
899 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
900 {
901 threads_debug_printf
902 ("%s stopped by software breakpoint",
903 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
904
905 /* Back up the PC if necessary. */
906 if (pc != sw_breakpoint_pc)
907 {
908 struct regcache *regcache
909 = get_thread_regcache (current_thread, 1);
910 low_set_pc (regcache, sw_breakpoint_pc);
911 }
912
913 /* Update this so we record the correct stop PC below. */
914 pc = sw_breakpoint_pc;
915 }
916 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
917 threads_debug_printf
918 ("%s stopped by hardware breakpoint",
919 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
920 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
921 threads_debug_printf
922 ("%s stopped by hardware watchpoint",
923 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
924 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
925 threads_debug_printf
926 ("%s stopped by trace",
927 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
928
929 lwp->stop_pc = pc;
930 return true;
931 }
932
933 lwp_info *
934 linux_process_target::add_lwp (ptid_t ptid)
935 {
936 lwp_info *lwp = new lwp_info;
937
938 lwp->thread = add_thread (ptid, lwp);
939
940 low_new_thread (lwp);
941
942 return lwp;
943 }
944
945 void
946 linux_process_target::low_new_thread (lwp_info *info)
947 {
948 /* Nop. */
949 }
950
951 /* Callback to be used when calling fork_inferior, responsible for
952 actually initiating the tracing of the inferior. */
953
954 static void
955 linux_ptrace_fun ()
956 {
957 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
958 (PTRACE_TYPE_ARG4) 0) < 0)
959 trace_start_error_with_name ("ptrace");
960
961 if (setpgid (0, 0) < 0)
962 trace_start_error_with_name ("setpgid");
963
964 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
965 stdout to stderr so that inferior i/o doesn't corrupt the connection.
966 Also, redirect stdin to /dev/null. */
967 if (remote_connection_is_stdio ())
968 {
969 if (close (0) < 0)
970 trace_start_error_with_name ("close");
971 if (open ("/dev/null", O_RDONLY) < 0)
972 trace_start_error_with_name ("open");
973 if (dup2 (2, 1) < 0)
974 trace_start_error_with_name ("dup2");
975 if (write (2, "stdin/stdout redirected\n",
976 sizeof ("stdin/stdout redirected\n") - 1) < 0)
977 {
978 /* Errors ignored. */;
979 }
980 }
981 }
982
983 /* Start an inferior process and returns its pid.
984 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
985 are its arguments. */
986
987 int
988 linux_process_target::create_inferior (const char *program,
989 const std::vector<char *> &program_args)
990 {
991 client_state &cs = get_client_state ();
992 struct lwp_info *new_lwp;
993 int pid;
994 ptid_t ptid;
995
996 {
997 maybe_disable_address_space_randomization restore_personality
998 (cs.disable_randomization);
999 std::string str_program_args = construct_inferior_arguments (program_args);
1000
1001 pid = fork_inferior (program,
1002 str_program_args.c_str (),
1003 get_environ ()->envp (), linux_ptrace_fun,
1004 NULL, NULL, NULL, NULL);
1005 }
1006
1007 /* When spawning a new process, we can't open the mem file yet. We
1008 still have to nurse the process through the shell, and that execs
1009 a couple times. The address space a /proc/PID/mem file is
1010 accessing is destroyed on exec. */
1011 process_info *proc = add_linux_process_no_mem_file (pid, 0);
1012
1013 ptid = ptid_t (pid, pid);
1014 new_lwp = add_lwp (ptid);
1015 new_lwp->must_set_ptrace_flags = 1;
1016
1017 post_fork_inferior (pid, program);
1018
1019 /* PROC is now past the shell running the program we want, so we can
1020 open the /proc/PID/mem file. */
1021 open_proc_mem_file (proc);
1022
1023 return pid;
1024 }
1025
1026 /* Implement the post_create_inferior target_ops method. */
1027
1028 void
1029 linux_process_target::post_create_inferior ()
1030 {
1031 struct lwp_info *lwp = get_thread_lwp (current_thread);
1032
1033 low_arch_setup ();
1034
1035 if (lwp->must_set_ptrace_flags)
1036 {
1037 struct process_info *proc = current_process ();
1038 int options = linux_low_ptrace_options (proc->attached);
1039
1040 linux_enable_event_reporting (lwpid_of (current_thread), options);
1041 lwp->must_set_ptrace_flags = 0;
1042 }
1043 }
1044
1045 int
1046 linux_process_target::attach_lwp (ptid_t ptid)
1047 {
1048 struct lwp_info *new_lwp;
1049 int lwpid = ptid.lwp ();
1050
1051 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1052 != 0)
1053 return errno;
1054
1055 new_lwp = add_lwp (ptid);
1056
1057 /* We need to wait for SIGSTOP before being able to make the next
1058 ptrace call on this LWP. */
1059 new_lwp->must_set_ptrace_flags = 1;
1060
1061 if (linux_proc_pid_is_stopped (lwpid))
1062 {
1063 threads_debug_printf ("Attached to a stopped process");
1064
1065 /* The process is definitely stopped. It is in a job control
1066 stop, unless the kernel predates the TASK_STOPPED /
1067 TASK_TRACED distinction, in which case it might be in a
1068 ptrace stop. Make sure it is in a ptrace stop; from there we
1069 can kill it, signal it, et cetera.
1070
1071 First make sure there is a pending SIGSTOP. Since we are
1072 already attached, the process can not transition from stopped
1073 to running without a PTRACE_CONT; so we know this signal will
1074 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1075 probably already in the queue (unless this kernel is old
1076 enough to use TASK_STOPPED for ptrace stops); but since
1077 SIGSTOP is not an RT signal, it can only be queued once. */
1078 kill_lwp (lwpid, SIGSTOP);
1079
1080 /* Finally, resume the stopped process. This will deliver the
1081 SIGSTOP (or a higher priority signal, just like normal
1082 PTRACE_ATTACH), which we'll catch later on. */
1083 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1084 }
1085
1086 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1087 brings it to a halt.
1088
1089 There are several cases to consider here:
1090
1091 1) gdbserver has already attached to the process and is being notified
1092 of a new thread that is being created.
1093 In this case we should ignore that SIGSTOP and resume the
1094 process. This is handled below by setting stop_expected = 1,
1095 and the fact that add_thread sets last_resume_kind ==
1096 resume_continue.
1097
1098 2) This is the first thread (the process thread), and we're attaching
1099 to it via attach_inferior.
1100 In this case we want the process thread to stop.
1101 This is handled by having linux_attach set last_resume_kind ==
1102 resume_stop after we return.
1103
1104 If the pid we are attaching to is also the tgid, we attach to and
1105 stop all the existing threads. Otherwise, we attach to pid and
1106 ignore any other threads in the same group as this pid.
1107
1108 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1109 existing threads.
1110 In this case we want the thread to stop.
1111 FIXME: This case is currently not properly handled.
1112 We should wait for the SIGSTOP but don't. Things work apparently
1113 because enough time passes between when we ptrace (ATTACH) and when
1114 gdb makes the next ptrace call on the thread.
1115
1116 On the other hand, if we are currently trying to stop all threads, we
1117 should treat the new thread as if we had sent it a SIGSTOP. This works
1118 because we are guaranteed that the add_lwp call above added us to the
1119 end of the list, and so the new thread has not yet reached
1120 wait_for_sigstop (but will). */
1121 new_lwp->stop_expected = 1;
1122
1123 return 0;
1124 }
1125
1126 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1127 already attached. Returns true if a new LWP is found, false
1128 otherwise. */
1129
1130 static int
1131 attach_proc_task_lwp_callback (ptid_t ptid)
1132 {
1133 /* Is this a new thread? */
1134 if (find_thread_ptid (ptid) == NULL)
1135 {
1136 int lwpid = ptid.lwp ();
1137 int err;
1138
1139 threads_debug_printf ("Found new lwp %d", lwpid);
1140
1141 err = the_linux_target->attach_lwp (ptid);
1142
1143 /* Be quiet if we simply raced with the thread exiting. EPERM
1144 is returned if the thread's task still exists, and is marked
1145 as exited or zombie, as well as other conditions, so in that
1146 case, confirm the status in /proc/PID/status. */
1147 if (err == ESRCH
1148 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1149 threads_debug_printf
1150 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1151 lwpid, err, safe_strerror (err));
1152 else if (err != 0)
1153 {
1154 std::string reason
1155 = linux_ptrace_attach_fail_reason_string (ptid, err);
1156
1157 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1158 }
1159
1160 return 1;
1161 }
1162 return 0;
1163 }
1164
1165 static void async_file_mark (void);
1166
1167 /* Attach to PID. If PID is the tgid, attach to it and all
1168 of its threads. */
1169
1170 int
1171 linux_process_target::attach (unsigned long pid)
1172 {
1173 struct process_info *proc;
1174 struct thread_info *initial_thread;
1175 ptid_t ptid = ptid_t (pid, pid);
1176 int err;
1177
1178 /* Delay opening the /proc/PID/mem file until we've successfully
1179 attached. */
1180 proc = add_linux_process_no_mem_file (pid, 1);
1181
1182 /* Attach to PID. We will check for other threads
1183 soon. */
1184 err = attach_lwp (ptid);
1185 if (err != 0)
1186 {
1187 this->remove_linux_process (proc);
1188
1189 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1190 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1191 }
1192
1193 open_proc_mem_file (proc);
1194
1195 /* Don't ignore the initial SIGSTOP if we just attached to this
1196 process. It will be collected by wait shortly. */
1197 initial_thread = find_thread_ptid (ptid_t (pid, pid));
1198 gdb_assert (initial_thread != nullptr);
1199 initial_thread->last_resume_kind = resume_stop;
1200
1201 /* We must attach to every LWP. If /proc is mounted, use that to
1202 find them now. On the one hand, the inferior may be using raw
1203 clone instead of using pthreads. On the other hand, even if it
1204 is using pthreads, GDB may not be connected yet (thread_db needs
1205 to do symbol lookups, through qSymbol). Also, thread_db walks
1206 structures in the inferior's address space to find the list of
1207 threads/LWPs, and those structures may well be corrupted. Note
1208 that once thread_db is loaded, we'll still use it to list threads
1209 and associate pthread info with each LWP. */
1210 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1211
1212 /* GDB will shortly read the xml target description for this
1213 process, to figure out the process' architecture. But the target
1214 description is only filled in when the first process/thread in
1215 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1216 that now, otherwise, if GDB is fast enough, it could read the
1217 target description _before_ that initial stop. */
1218 if (non_stop)
1219 {
1220 struct lwp_info *lwp;
1221 int wstat, lwpid;
1222 ptid_t pid_ptid = ptid_t (pid);
1223
1224 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
1225 gdb_assert (lwpid > 0);
1226
1227 lwp = find_lwp_pid (ptid_t (lwpid));
1228 gdb_assert (lwp != nullptr);
1229
1230 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1231 {
1232 lwp->status_pending_p = 1;
1233 lwp->status_pending = wstat;
1234 }
1235
1236 initial_thread->last_resume_kind = resume_continue;
1237
1238 async_file_mark ();
1239
1240 gdb_assert (proc->tdesc != NULL);
1241 }
1242
1243 return 0;
1244 }
1245
1246 static int
1247 last_thread_of_process_p (int pid)
1248 {
1249 bool seen_one = false;
1250
1251 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1252 {
1253 if (!seen_one)
1254 {
1255 /* This is the first thread of this process we see. */
1256 seen_one = true;
1257 return false;
1258 }
1259 else
1260 {
1261 /* This is the second thread of this process we see. */
1262 return true;
1263 }
1264 });
1265
1266 return thread == NULL;
1267 }
1268
1269 /* Kill LWP. */
1270
1271 static void
1272 linux_kill_one_lwp (struct lwp_info *lwp)
1273 {
1274 struct thread_info *thr = get_lwp_thread (lwp);
1275 int pid = lwpid_of (thr);
1276
1277 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1278 there is no signal context, and ptrace(PTRACE_KILL) (or
1279 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1280 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1281 alternative is to kill with SIGKILL. We only need one SIGKILL
1282 per process, not one for each thread. But since we still support
1283 support debugging programs using raw clone without CLONE_THREAD,
1284 we send one for each thread. For years, we used PTRACE_KILL
1285 only, so we're being a bit paranoid about some old kernels where
1286 PTRACE_KILL might work better (dubious if there are any such, but
1287 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1288 second, and so we're fine everywhere. */
1289
1290 errno = 0;
1291 kill_lwp (pid, SIGKILL);
1292 if (debug_threads)
1293 {
1294 int save_errno = errno;
1295
1296 threads_debug_printf ("kill_lwp (SIGKILL) %s, 0, 0 (%s)",
1297 target_pid_to_str (ptid_of (thr)).c_str (),
1298 save_errno ? safe_strerror (save_errno) : "OK");
1299 }
1300
1301 errno = 0;
1302 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1303 if (debug_threads)
1304 {
1305 int save_errno = errno;
1306
1307 threads_debug_printf ("PTRACE_KILL %s, 0, 0 (%s)",
1308 target_pid_to_str (ptid_of (thr)).c_str (),
1309 save_errno ? safe_strerror (save_errno) : "OK");
1310 }
1311 }
1312
1313 /* Kill LWP and wait for it to die. */
1314
1315 static void
1316 kill_wait_lwp (struct lwp_info *lwp)
1317 {
1318 struct thread_info *thr = get_lwp_thread (lwp);
1319 int pid = ptid_of (thr).pid ();
1320 int lwpid = ptid_of (thr).lwp ();
1321 int wstat;
1322 int res;
1323
1324 threads_debug_printf ("killing lwp %d, for pid: %d", lwpid, pid);
1325
1326 do
1327 {
1328 linux_kill_one_lwp (lwp);
1329
1330 /* Make sure it died. Notes:
1331
1332 - The loop is most likely unnecessary.
1333
1334 - We don't use wait_for_event as that could delete lwps
1335 while we're iterating over them. We're not interested in
1336 any pending status at this point, only in making sure all
1337 wait status on the kernel side are collected until the
1338 process is reaped.
1339
1340 - We don't use __WALL here as the __WALL emulation relies on
1341 SIGCHLD, and killing a stopped process doesn't generate
1342 one, nor an exit status.
1343 */
1344 res = my_waitpid (lwpid, &wstat, 0);
1345 if (res == -1 && errno == ECHILD)
1346 res = my_waitpid (lwpid, &wstat, __WCLONE);
1347 } while (res > 0 && WIFSTOPPED (wstat));
1348
1349 /* Even if it was stopped, the child may have already disappeared.
1350 E.g., if it was killed by SIGKILL. */
1351 if (res < 0 && errno != ECHILD)
1352 perror_with_name ("kill_wait_lwp");
1353 }
1354
1355 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1356 except the leader. */
1357
1358 static void
1359 kill_one_lwp_callback (thread_info *thread, int pid)
1360 {
1361 struct lwp_info *lwp = get_thread_lwp (thread);
1362
1363 /* We avoid killing the first thread here, because of a Linux kernel (at
1364 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1365 the children get a chance to be reaped, it will remain a zombie
1366 forever. */
1367
1368 if (lwpid_of (thread) == pid)
1369 {
1370 threads_debug_printf ("is last of process %s",
1371 target_pid_to_str (thread->id).c_str ());
1372 return;
1373 }
1374
1375 kill_wait_lwp (lwp);
1376 }
1377
1378 int
1379 linux_process_target::kill (process_info *process)
1380 {
1381 int pid = process->pid;
1382
1383 /* If we're killing a running inferior, make sure it is stopped
1384 first, as PTRACE_KILL will not work otherwise. */
1385 stop_all_lwps (0, NULL);
1386
1387 for_each_thread (pid, [&] (thread_info *thread)
1388 {
1389 kill_one_lwp_callback (thread, pid);
1390 });
1391
1392 /* See the comment in linux_kill_one_lwp. We did not kill the first
1393 thread in the list, so do so now. */
1394 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1395
1396 if (lwp == NULL)
1397 threads_debug_printf ("cannot find lwp for pid: %d", pid);
1398 else
1399 kill_wait_lwp (lwp);
1400
1401 mourn (process);
1402
1403 /* Since we presently can only stop all lwps of all processes, we
1404 need to unstop lwps of other processes. */
1405 unstop_all_lwps (0, NULL);
1406 return 0;
1407 }
1408
1409 /* Get pending signal of THREAD, for detaching purposes. This is the
1410 signal the thread last stopped for, which we need to deliver to the
1411 thread when detaching, otherwise, it'd be suppressed/lost. */
1412
1413 static int
1414 get_detach_signal (struct thread_info *thread)
1415 {
1416 client_state &cs = get_client_state ();
1417 enum gdb_signal signo = GDB_SIGNAL_0;
1418 int status;
1419 struct lwp_info *lp = get_thread_lwp (thread);
1420
1421 if (lp->status_pending_p)
1422 status = lp->status_pending;
1423 else
1424 {
1425 /* If the thread had been suspended by gdbserver, and it stopped
1426 cleanly, then it'll have stopped with SIGSTOP. But we don't
1427 want to deliver that SIGSTOP. */
1428 if (thread->last_status.kind () != TARGET_WAITKIND_STOPPED
1429 || thread->last_status.sig () == GDB_SIGNAL_0)
1430 return 0;
1431
1432 /* Otherwise, we may need to deliver the signal we
1433 intercepted. */
1434 status = lp->last_status;
1435 }
1436
1437 if (!WIFSTOPPED (status))
1438 {
1439 threads_debug_printf ("lwp %s hasn't stopped: no pending signal",
1440 target_pid_to_str (ptid_of (thread)).c_str ());
1441 return 0;
1442 }
1443
1444 /* Extended wait statuses aren't real SIGTRAPs. */
1445 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1446 {
1447 threads_debug_printf ("lwp %s had stopped with extended "
1448 "status: no pending signal",
1449 target_pid_to_str (ptid_of (thread)).c_str ());
1450 return 0;
1451 }
1452
1453 signo = gdb_signal_from_host (WSTOPSIG (status));
1454
1455 if (cs.program_signals_p && !cs.program_signals[signo])
1456 {
1457 threads_debug_printf ("lwp %s had signal %s, but it is in nopass state",
1458 target_pid_to_str (ptid_of (thread)).c_str (),
1459 gdb_signal_to_string (signo));
1460 return 0;
1461 }
1462 else if (!cs.program_signals_p
1463 /* If we have no way to know which signals GDB does not
1464 want to have passed to the program, assume
1465 SIGTRAP/SIGINT, which is GDB's default. */
1466 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1467 {
1468 threads_debug_printf ("lwp %s had signal %s, "
1469 "but we don't know if we should pass it. "
1470 "Default to not.",
1471 target_pid_to_str (ptid_of (thread)).c_str (),
1472 gdb_signal_to_string (signo));
1473 return 0;
1474 }
1475 else
1476 {
1477 threads_debug_printf ("lwp %s has pending signal %s: delivering it",
1478 target_pid_to_str (ptid_of (thread)).c_str (),
1479 gdb_signal_to_string (signo));
1480
1481 return WSTOPSIG (status);
1482 }
1483 }
1484
1485 void
1486 linux_process_target::detach_one_lwp (lwp_info *lwp)
1487 {
1488 struct thread_info *thread = get_lwp_thread (lwp);
1489 int sig;
1490 int lwpid;
1491
1492 /* If there is a pending SIGSTOP, get rid of it. */
1493 if (lwp->stop_expected)
1494 {
1495 threads_debug_printf ("Sending SIGCONT to %s",
1496 target_pid_to_str (ptid_of (thread)).c_str ());
1497
1498 kill_lwp (lwpid_of (thread), SIGCONT);
1499 lwp->stop_expected = 0;
1500 }
1501
1502 /* Pass on any pending signal for this thread. */
1503 sig = get_detach_signal (thread);
1504
1505 /* Preparing to resume may try to write registers, and fail if the
1506 lwp is zombie. If that happens, ignore the error. We'll handle
1507 it below, when detach fails with ESRCH. */
1508 try
1509 {
1510 /* Flush any pending changes to the process's registers. */
1511 regcache_invalidate_thread (thread);
1512
1513 /* Finally, let it resume. */
1514 low_prepare_to_resume (lwp);
1515 }
1516 catch (const gdb_exception_error &ex)
1517 {
1518 if (!check_ptrace_stopped_lwp_gone (lwp))
1519 throw;
1520 }
1521
1522 lwpid = lwpid_of (thread);
1523 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1524 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1525 {
1526 int save_errno = errno;
1527
1528 /* We know the thread exists, so ESRCH must mean the lwp is
1529 zombie. This can happen if one of the already-detached
1530 threads exits the whole thread group. In that case we're
1531 still attached, and must reap the lwp. */
1532 if (save_errno == ESRCH)
1533 {
1534 int ret, status;
1535
1536 ret = my_waitpid (lwpid, &status, __WALL);
1537 if (ret == -1)
1538 {
1539 warning (_("Couldn't reap LWP %d while detaching: %s"),
1540 lwpid, safe_strerror (errno));
1541 }
1542 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1543 {
1544 warning (_("Reaping LWP %d while detaching "
1545 "returned unexpected status 0x%x"),
1546 lwpid, status);
1547 }
1548 }
1549 else
1550 {
1551 error (_("Can't detach %s: %s"),
1552 target_pid_to_str (ptid_of (thread)).c_str (),
1553 safe_strerror (save_errno));
1554 }
1555 }
1556 else
1557 threads_debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)",
1558 target_pid_to_str (ptid_of (thread)).c_str (),
1559 strsignal (sig));
1560
1561 delete_lwp (lwp);
1562 }
1563
1564 int
1565 linux_process_target::detach (process_info *process)
1566 {
1567 struct lwp_info *main_lwp;
1568
1569 /* As there's a step over already in progress, let it finish first,
1570 otherwise nesting a stabilize_threads operation on top gets real
1571 messy. */
1572 complete_ongoing_step_over ();
1573
1574 /* Stop all threads before detaching. First, ptrace requires that
1575 the thread is stopped to successfully detach. Second, thread_db
1576 may need to uninstall thread event breakpoints from memory, which
1577 only works with a stopped process anyway. */
1578 stop_all_lwps (0, NULL);
1579
1580 #ifdef USE_THREAD_DB
1581 thread_db_detach (process);
1582 #endif
1583
1584 /* Stabilize threads (move out of jump pads). */
1585 target_stabilize_threads ();
1586
1587 /* Detach from the clone lwps first. If the thread group exits just
1588 while we're detaching, we must reap the clone lwps before we're
1589 able to reap the leader. */
1590 for_each_thread (process->pid, [this] (thread_info *thread)
1591 {
1592 /* We don't actually detach from the thread group leader just yet.
1593 If the thread group exits, we must reap the zombie clone lwps
1594 before we're able to reap the leader. */
1595 if (thread->id.pid () == thread->id.lwp ())
1596 return;
1597
1598 lwp_info *lwp = get_thread_lwp (thread);
1599 detach_one_lwp (lwp);
1600 });
1601
1602 main_lwp = find_lwp_pid (ptid_t (process->pid));
1603 gdb_assert (main_lwp != nullptr);
1604 detach_one_lwp (main_lwp);
1605
1606 mourn (process);
1607
1608 /* Since we presently can only stop all lwps of all processes, we
1609 need to unstop lwps of other processes. */
1610 unstop_all_lwps (0, NULL);
1611 return 0;
1612 }
1613
1614 /* Remove all LWPs that belong to process PROC from the lwp list. */
1615
1616 void
1617 linux_process_target::mourn (process_info *process)
1618 {
1619 #ifdef USE_THREAD_DB
1620 thread_db_mourn (process);
1621 #endif
1622
1623 for_each_thread (process->pid, [this] (thread_info *thread)
1624 {
1625 delete_lwp (get_thread_lwp (thread));
1626 });
1627
1628 this->remove_linux_process (process);
1629 }
1630
1631 void
1632 linux_process_target::join (int pid)
1633 {
1634 int status, ret;
1635
1636 do {
1637 ret = my_waitpid (pid, &status, 0);
1638 if (WIFEXITED (status) || WIFSIGNALED (status))
1639 break;
1640 } while (ret != -1 || errno != ECHILD);
1641 }
1642
1643 /* Return true if the given thread is still alive. */
1644
1645 bool
1646 linux_process_target::thread_alive (ptid_t ptid)
1647 {
1648 struct lwp_info *lwp = find_lwp_pid (ptid);
1649
1650 /* We assume we always know if a thread exits. If a whole process
1651 exited but we still haven't been able to report it to GDB, we'll
1652 hold on to the last lwp of the dead process. */
1653 if (lwp != NULL)
1654 return !lwp_is_marked_dead (lwp);
1655 else
1656 return 0;
1657 }
1658
1659 bool
1660 linux_process_target::thread_still_has_status_pending (thread_info *thread)
1661 {
1662 struct lwp_info *lp = get_thread_lwp (thread);
1663
1664 if (!lp->status_pending_p)
1665 return 0;
1666
1667 if (thread->last_resume_kind != resume_stop
1668 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1669 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1670 {
1671 CORE_ADDR pc;
1672 int discard = 0;
1673
1674 gdb_assert (lp->last_status != 0);
1675
1676 pc = get_pc (lp);
1677
1678 scoped_restore_current_thread restore_thread;
1679 switch_to_thread (thread);
1680
1681 if (pc != lp->stop_pc)
1682 {
1683 threads_debug_printf ("PC of %ld changed",
1684 lwpid_of (thread));
1685 discard = 1;
1686 }
1687
1688 #if !USE_SIGTRAP_SIGINFO
1689 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1690 && !low_breakpoint_at (pc))
1691 {
1692 threads_debug_printf ("previous SW breakpoint of %ld gone",
1693 lwpid_of (thread));
1694 discard = 1;
1695 }
1696 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1697 && !hardware_breakpoint_inserted_here (pc))
1698 {
1699 threads_debug_printf ("previous HW breakpoint of %ld gone",
1700 lwpid_of (thread));
1701 discard = 1;
1702 }
1703 #endif
1704
1705 if (discard)
1706 {
1707 threads_debug_printf ("discarding pending breakpoint status");
1708 lp->status_pending_p = 0;
1709 return 0;
1710 }
1711 }
1712
1713 return 1;
1714 }
1715
1716 /* Returns true if LWP is resumed from the client's perspective. */
1717
1718 static int
1719 lwp_resumed (struct lwp_info *lwp)
1720 {
1721 struct thread_info *thread = get_lwp_thread (lwp);
1722
1723 if (thread->last_resume_kind != resume_stop)
1724 return 1;
1725
1726 /* Did gdb send us a `vCont;t', but we haven't reported the
1727 corresponding stop to gdb yet? If so, the thread is still
1728 resumed/running from gdb's perspective. */
1729 if (thread->last_resume_kind == resume_stop
1730 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
1731 return 1;
1732
1733 return 0;
1734 }
1735
1736 bool
1737 linux_process_target::status_pending_p_callback (thread_info *thread,
1738 ptid_t ptid)
1739 {
1740 struct lwp_info *lp = get_thread_lwp (thread);
1741
1742 /* Check if we're only interested in events from a specific process
1743 or a specific LWP. */
1744 if (!thread->id.matches (ptid))
1745 return 0;
1746
1747 if (!lwp_resumed (lp))
1748 return 0;
1749
1750 if (lp->status_pending_p
1751 && !thread_still_has_status_pending (thread))
1752 {
1753 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1754 return 0;
1755 }
1756
1757 return lp->status_pending_p;
1758 }
1759
1760 struct lwp_info *
1761 find_lwp_pid (ptid_t ptid)
1762 {
1763 long lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1764 thread_info *thread = find_thread ([lwp] (thread_info *thr_arg)
1765 {
1766 return thr_arg->id.lwp () == lwp;
1767 });
1768
1769 if (thread == NULL)
1770 return NULL;
1771
1772 return get_thread_lwp (thread);
1773 }
1774
1775 /* Return the number of known LWPs in the tgid given by PID. */
1776
1777 static int
1778 num_lwps (int pid)
1779 {
1780 int count = 0;
1781
1782 for_each_thread (pid, [&] (thread_info *thread)
1783 {
1784 count++;
1785 });
1786
1787 return count;
1788 }
1789
1790 /* See nat/linux-nat.h. */
1791
1792 struct lwp_info *
1793 iterate_over_lwps (ptid_t filter,
1794 gdb::function_view<iterate_over_lwps_ftype> callback)
1795 {
1796 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1797 {
1798 lwp_info *lwp = get_thread_lwp (thr_arg);
1799
1800 return callback (lwp);
1801 });
1802
1803 if (thread == NULL)
1804 return NULL;
1805
1806 return get_thread_lwp (thread);
1807 }
1808
1809 bool
1810 linux_process_target::check_zombie_leaders ()
1811 {
1812 bool new_pending_event = false;
1813
1814 for_each_process ([&] (process_info *proc)
1815 {
1816 pid_t leader_pid = pid_of (proc);
1817 lwp_info *leader_lp = find_lwp_pid (ptid_t (leader_pid));
1818
1819 threads_debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1820 "num_lwps=%d, zombie=%d",
1821 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1822 linux_proc_pid_is_zombie (leader_pid));
1823
1824 if (leader_lp != NULL && !leader_lp->stopped
1825 /* Check if there are other threads in the group, as we may
1826 have raced with the inferior simply exiting. Note this
1827 isn't a watertight check. If the inferior is
1828 multi-threaded and is exiting, it may be we see the
1829 leader as zombie before we reap all the non-leader
1830 threads. See comments below. */
1831 && !last_thread_of_process_p (leader_pid)
1832 && linux_proc_pid_is_zombie (leader_pid))
1833 {
1834 /* A zombie leader in a multi-threaded program can mean one
1835 of three things:
1836
1837 #1 - Only the leader exited, not the whole program, e.g.,
1838 with pthread_exit. Since we can't reap the leader's exit
1839 status until all other threads are gone and reaped too,
1840 we want to delete the zombie leader right away, as it
1841 can't be debugged, we can't read its registers, etc.
1842 This is the main reason we check for zombie leaders
1843 disappearing.
1844
1845 #2 - The whole thread-group/process exited (a group exit,
1846 via e.g. exit(3), and there is (or will be shortly) an
1847 exit reported for each thread in the process, and then
1848 finally an exit for the leader once the non-leaders are
1849 reaped.
1850
1851 #3 - There are 3 or more threads in the group, and a
1852 thread other than the leader exec'd. See comments on
1853 exec events at the top of the file.
1854
1855 Ideally we would never delete the leader for case #2.
1856 Instead, we want to collect the exit status of each
1857 non-leader thread, and then finally collect the exit
1858 status of the leader as normal and use its exit code as
1859 whole-process exit code. Unfortunately, there's no
1860 race-free way to distinguish cases #1 and #2. We can't
1861 assume the exit events for the non-leaders threads are
1862 already pending in the kernel, nor can we assume the
1863 non-leader threads are in zombie state already. Between
1864 the leader becoming zombie and the non-leaders exiting
1865 and becoming zombie themselves, there's a small time
1866 window, so such a check would be racy. Temporarily
1867 pausing all threads and checking to see if all threads
1868 exit or not before re-resuming them would work in the
1869 case that all threads are running right now, but it
1870 wouldn't work if some thread is currently already
1871 ptrace-stopped, e.g., due to scheduler-locking.
1872
1873 So what we do is we delete the leader anyhow, and then
1874 later on when we see its exit status, we re-add it back.
1875 We also make sure that we only report a whole-process
1876 exit when we see the leader exiting, as opposed to when
1877 the last LWP in the LWP list exits, which can be a
1878 non-leader if we deleted the leader here. */
1879 threads_debug_printf ("Thread group leader %d zombie "
1880 "(it exited, or another thread execd), "
1881 "deleting it.",
1882 leader_pid);
1883
1884 thread_info *leader_thread = get_lwp_thread (leader_lp);
1885 if (report_exit_events_for (leader_thread))
1886 {
1887 mark_lwp_dead (leader_lp, W_EXITCODE (0, 0), true);
1888 new_pending_event = true;
1889 }
1890 else
1891 delete_lwp (leader_lp);
1892 }
1893 });
1894
1895 return new_pending_event;
1896 }
1897
1898 /* Callback for `find_thread'. Returns the first LWP that is not
1899 stopped. */
1900
1901 static bool
1902 not_stopped_callback (thread_info *thread, ptid_t filter)
1903 {
1904 if (!thread->id.matches (filter))
1905 return false;
1906
1907 lwp_info *lwp = get_thread_lwp (thread);
1908
1909 return !lwp->stopped;
1910 }
1911
1912 /* Increment LWP's suspend count. */
1913
1914 static void
1915 lwp_suspended_inc (struct lwp_info *lwp)
1916 {
1917 lwp->suspended++;
1918
1919 if (lwp->suspended > 4)
1920 threads_debug_printf
1921 ("LWP %ld has a suspiciously high suspend count, suspended=%d",
1922 lwpid_of (get_lwp_thread (lwp)), lwp->suspended);
1923 }
1924
1925 /* Decrement LWP's suspend count. */
1926
1927 static void
1928 lwp_suspended_decr (struct lwp_info *lwp)
1929 {
1930 lwp->suspended--;
1931
1932 if (lwp->suspended < 0)
1933 {
1934 struct thread_info *thread = get_lwp_thread (lwp);
1935
1936 internal_error ("unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1937 lwp->suspended);
1938 }
1939 }
1940
1941 /* This function should only be called if the LWP got a SIGTRAP.
1942
1943 Handle any tracepoint steps or hits. Return true if a tracepoint
1944 event was handled, 0 otherwise. */
1945
1946 static int
1947 handle_tracepoints (struct lwp_info *lwp)
1948 {
1949 struct thread_info *tinfo = get_lwp_thread (lwp);
1950 int tpoint_related_event = 0;
1951
1952 gdb_assert (lwp->suspended == 0);
1953
1954 /* If this tracepoint hit causes a tracing stop, we'll immediately
1955 uninsert tracepoints. To do this, we temporarily pause all
1956 threads, unpatch away, and then unpause threads. We need to make
1957 sure the unpausing doesn't resume LWP too. */
1958 lwp_suspended_inc (lwp);
1959
1960 /* And we need to be sure that any all-threads-stopping doesn't try
1961 to move threads out of the jump pads, as it could deadlock the
1962 inferior (LWP could be in the jump pad, maybe even holding the
1963 lock.) */
1964
1965 /* Do any necessary step collect actions. */
1966 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1967
1968 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1969
1970 /* See if we just hit a tracepoint and do its main collect
1971 actions. */
1972 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1973
1974 lwp_suspended_decr (lwp);
1975
1976 gdb_assert (lwp->suspended == 0);
1977 gdb_assert (!stabilizing_threads
1978 || (lwp->collecting_fast_tracepoint
1979 != fast_tpoint_collect_result::not_collecting));
1980
1981 if (tpoint_related_event)
1982 {
1983 threads_debug_printf ("got a tracepoint event");
1984 return 1;
1985 }
1986
1987 return 0;
1988 }
1989
1990 fast_tpoint_collect_result
1991 linux_process_target::linux_fast_tracepoint_collecting
1992 (lwp_info *lwp, fast_tpoint_collect_status *status)
1993 {
1994 CORE_ADDR thread_area;
1995 struct thread_info *thread = get_lwp_thread (lwp);
1996
1997 /* Get the thread area address. This is used to recognize which
1998 thread is which when tracing with the in-process agent library.
1999 We don't read anything from the address, and treat it as opaque;
2000 it's the address itself that we assume is unique per-thread. */
2001 if (low_get_thread_area (lwpid_of (thread), &thread_area) == -1)
2002 return fast_tpoint_collect_result::not_collecting;
2003
2004 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2005 }
2006
2007 int
2008 linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
2009 {
2010 return -1;
2011 }
2012
2013 bool
2014 linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
2015 {
2016 scoped_restore_current_thread restore_thread;
2017 switch_to_thread (get_lwp_thread (lwp));
2018
2019 if ((wstat == NULL
2020 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2021 && supports_fast_tracepoints ()
2022 && agent_loaded_p ())
2023 {
2024 struct fast_tpoint_collect_status status;
2025
2026 threads_debug_printf
2027 ("Checking whether LWP %ld needs to move out of the jump pad.",
2028 lwpid_of (current_thread));
2029
2030 fast_tpoint_collect_result r
2031 = linux_fast_tracepoint_collecting (lwp, &status);
2032
2033 if (wstat == NULL
2034 || (WSTOPSIG (*wstat) != SIGILL
2035 && WSTOPSIG (*wstat) != SIGFPE
2036 && WSTOPSIG (*wstat) != SIGSEGV
2037 && WSTOPSIG (*wstat) != SIGBUS))
2038 {
2039 lwp->collecting_fast_tracepoint = r;
2040
2041 if (r != fast_tpoint_collect_result::not_collecting)
2042 {
2043 if (r == fast_tpoint_collect_result::before_insn
2044 && lwp->exit_jump_pad_bkpt == NULL)
2045 {
2046 /* Haven't executed the original instruction yet.
2047 Set breakpoint there, and wait till it's hit,
2048 then single-step until exiting the jump pad. */
2049 lwp->exit_jump_pad_bkpt
2050 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2051 }
2052
2053 threads_debug_printf
2054 ("Checking whether LWP %ld needs to move out of the jump pad..."
2055 " it does", lwpid_of (current_thread));
2056
2057 return true;
2058 }
2059 }
2060 else
2061 {
2062 /* If we get a synchronous signal while collecting, *and*
2063 while executing the (relocated) original instruction,
2064 reset the PC to point at the tpoint address, before
2065 reporting to GDB. Otherwise, it's an IPA lib bug: just
2066 report the signal to GDB, and pray for the best. */
2067
2068 lwp->collecting_fast_tracepoint
2069 = fast_tpoint_collect_result::not_collecting;
2070
2071 if (r != fast_tpoint_collect_result::not_collecting
2072 && (status.adjusted_insn_addr <= lwp->stop_pc
2073 && lwp->stop_pc < status.adjusted_insn_addr_end))
2074 {
2075 siginfo_t info;
2076 struct regcache *regcache;
2077
2078 /* The si_addr on a few signals references the address
2079 of the faulting instruction. Adjust that as
2080 well. */
2081 if ((WSTOPSIG (*wstat) == SIGILL
2082 || WSTOPSIG (*wstat) == SIGFPE
2083 || WSTOPSIG (*wstat) == SIGBUS
2084 || WSTOPSIG (*wstat) == SIGSEGV)
2085 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2086 (PTRACE_TYPE_ARG3) 0, &info) == 0
2087 /* Final check just to make sure we don't clobber
2088 the siginfo of non-kernel-sent signals. */
2089 && (uintptr_t) info.si_addr == lwp->stop_pc)
2090 {
2091 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2092 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2093 (PTRACE_TYPE_ARG3) 0, &info);
2094 }
2095
2096 regcache = get_thread_regcache (current_thread, 1);
2097 low_set_pc (regcache, status.tpoint_addr);
2098 lwp->stop_pc = status.tpoint_addr;
2099
2100 /* Cancel any fast tracepoint lock this thread was
2101 holding. */
2102 force_unlock_trace_buffer ();
2103 }
2104
2105 if (lwp->exit_jump_pad_bkpt != NULL)
2106 {
2107 threads_debug_printf
2108 ("Cancelling fast exit-jump-pad: removing bkpt."
2109 "stopping all threads momentarily.");
2110
2111 stop_all_lwps (1, lwp);
2112
2113 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2114 lwp->exit_jump_pad_bkpt = NULL;
2115
2116 unstop_all_lwps (1, lwp);
2117
2118 gdb_assert (lwp->suspended >= 0);
2119 }
2120 }
2121 }
2122
2123 threads_debug_printf
2124 ("Checking whether LWP %ld needs to move out of the jump pad... no",
2125 lwpid_of (current_thread));
2126
2127 return false;
2128 }
2129
2130 /* Enqueue one signal in the "signals to report later when out of the
2131 jump pad" list. */
2132
2133 static void
2134 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2135 {
2136 struct thread_info *thread = get_lwp_thread (lwp);
2137
2138 threads_debug_printf ("Deferring signal %d for LWP %ld.",
2139 WSTOPSIG (*wstat), lwpid_of (thread));
2140
2141 if (debug_threads)
2142 {
2143 for (const auto &sig : lwp->pending_signals_to_report)
2144 threads_debug_printf (" Already queued %d", sig.signal);
2145
2146 threads_debug_printf (" (no more currently queued signals)");
2147 }
2148
2149 /* Don't enqueue non-RT signals if they are already in the deferred
2150 queue. (SIGSTOP being the easiest signal to see ending up here
2151 twice) */
2152 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2153 {
2154 for (const auto &sig : lwp->pending_signals_to_report)
2155 {
2156 if (sig.signal == WSTOPSIG (*wstat))
2157 {
2158 threads_debug_printf
2159 ("Not requeuing already queued non-RT signal %d for LWP %ld",
2160 sig.signal, lwpid_of (thread));
2161 return;
2162 }
2163 }
2164 }
2165
2166 lwp->pending_signals_to_report.emplace_back (WSTOPSIG (*wstat));
2167
2168 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2169 &lwp->pending_signals_to_report.back ().info);
2170 }
2171
2172 /* Dequeue one signal from the "signals to report later when out of
2173 the jump pad" list. */
2174
2175 static int
2176 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2177 {
2178 struct thread_info *thread = get_lwp_thread (lwp);
2179
2180 if (!lwp->pending_signals_to_report.empty ())
2181 {
2182 const pending_signal &p_sig = lwp->pending_signals_to_report.front ();
2183
2184 *wstat = W_STOPCODE (p_sig.signal);
2185 if (p_sig.info.si_signo != 0)
2186 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2187 &p_sig.info);
2188
2189 lwp->pending_signals_to_report.pop_front ();
2190
2191 threads_debug_printf ("Reporting deferred signal %d for LWP %ld.",
2192 WSTOPSIG (*wstat), lwpid_of (thread));
2193
2194 if (debug_threads)
2195 {
2196 for (const auto &sig : lwp->pending_signals_to_report)
2197 threads_debug_printf (" Still queued %d", sig.signal);
2198
2199 threads_debug_printf (" (no more queued signals)");
2200 }
2201
2202 return 1;
2203 }
2204
2205 return 0;
2206 }
2207
2208 bool
2209 linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
2210 {
2211 scoped_restore_current_thread restore_thread;
2212 switch_to_thread (get_lwp_thread (child));
2213
2214 if (low_stopped_by_watchpoint ())
2215 {
2216 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2217 child->stopped_data_address = low_stopped_data_address ();
2218 }
2219
2220 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2221 }
2222
2223 bool
2224 linux_process_target::low_stopped_by_watchpoint ()
2225 {
2226 return false;
2227 }
2228
2229 CORE_ADDR
2230 linux_process_target::low_stopped_data_address ()
2231 {
2232 return 0;
2233 }
2234
2235 /* Return the ptrace options that we want to try to enable. */
2236
2237 static int
2238 linux_low_ptrace_options (int attached)
2239 {
2240 client_state &cs = get_client_state ();
2241 int options = 0;
2242
2243 if (!attached)
2244 options |= PTRACE_O_EXITKILL;
2245
2246 if (cs.report_fork_events)
2247 options |= PTRACE_O_TRACEFORK;
2248
2249 if (cs.report_vfork_events)
2250 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2251
2252 if (cs.report_exec_events)
2253 options |= PTRACE_O_TRACEEXEC;
2254
2255 options |= PTRACE_O_TRACESYSGOOD;
2256
2257 return options;
2258 }
2259
2260 void
2261 linux_process_target::filter_event (int lwpid, int wstat)
2262 {
2263 struct lwp_info *child;
2264 struct thread_info *thread;
2265 int have_stop_pc = 0;
2266
2267 child = find_lwp_pid (ptid_t (lwpid));
2268
2269 /* Check for events reported by anything not in our LWP list. */
2270 if (child == nullptr)
2271 {
2272 if (WIFSTOPPED (wstat))
2273 {
2274 if (WSTOPSIG (wstat) == SIGTRAP
2275 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2276 {
2277 /* A non-leader thread exec'ed after we've seen the
2278 leader zombie, and removed it from our lists (in
2279 check_zombie_leaders). The non-leader thread changes
2280 its tid to the tgid. */
2281 threads_debug_printf
2282 ("Re-adding thread group leader LWP %d after exec.",
2283 lwpid);
2284
2285 child = add_lwp (ptid_t (lwpid, lwpid));
2286 child->stopped = 1;
2287 switch_to_thread (child->thread);
2288 }
2289 else
2290 {
2291 /* A process we are controlling has forked and the new
2292 child's stop was reported to us by the kernel. Save
2293 its PID and go back to waiting for the fork event to
2294 be reported - the stopped process might be returned
2295 from waitpid before or after the fork event is. */
2296 threads_debug_printf
2297 ("Saving LWP %d status %s in stopped_pids list",
2298 lwpid, status_to_str (wstat).c_str ());
2299 add_to_pid_list (&stopped_pids, lwpid, wstat);
2300 }
2301 }
2302 else
2303 {
2304 /* Don't report an event for the exit of an LWP not in our
2305 list, i.e. not part of any inferior we're debugging.
2306 This can happen if we detach from a program we originally
2307 forked and then it exits. However, note that we may have
2308 earlier deleted a leader of an inferior we're debugging,
2309 in check_zombie_leaders. Re-add it back here if so. */
2310 find_process ([&] (process_info *proc)
2311 {
2312 if (proc->pid == lwpid)
2313 {
2314 threads_debug_printf
2315 ("Re-adding thread group leader LWP %d after exit.",
2316 lwpid);
2317
2318 child = add_lwp (ptid_t (lwpid, lwpid));
2319 return true;
2320 }
2321 return false;
2322 });
2323 }
2324
2325 if (child == nullptr)
2326 return;
2327 }
2328
2329 thread = get_lwp_thread (child);
2330
2331 child->stopped = 1;
2332
2333 child->last_status = wstat;
2334
2335 /* Check if the thread has exited. */
2336 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2337 {
2338 threads_debug_printf ("%d exited", lwpid);
2339
2340 if (finish_step_over (child))
2341 {
2342 /* Unsuspend all other LWPs, and set them back running again. */
2343 unsuspend_all_lwps (child);
2344 }
2345
2346 /* If this is not the leader LWP, then the exit signal was not
2347 the end of the debugged application and should be ignored,
2348 unless GDB wants to hear about thread exits. */
2349 if (report_exit_events_for (thread) || is_leader (thread))
2350 {
2351 /* Since events are serialized to GDB core, and we can't
2352 report this one right now. Leave the status pending for
2353 the next time we're able to report it. */
2354 mark_lwp_dead (child, wstat, false);
2355 return;
2356 }
2357 else
2358 {
2359 delete_lwp (child);
2360 return;
2361 }
2362 }
2363
2364 gdb_assert (WIFSTOPPED (wstat));
2365
2366 if (WIFSTOPPED (wstat))
2367 {
2368 struct process_info *proc;
2369
2370 /* Architecture-specific setup after inferior is running. */
2371 proc = find_process_pid (pid_of (thread));
2372 if (proc->tdesc == NULL)
2373 {
2374 if (proc->attached)
2375 {
2376 /* This needs to happen after we have attached to the
2377 inferior and it is stopped for the first time, but
2378 before we access any inferior registers. */
2379 arch_setup_thread (thread);
2380 }
2381 else
2382 {
2383 /* The process is started, but GDBserver will do
2384 architecture-specific setup after the program stops at
2385 the first instruction. */
2386 child->status_pending_p = 1;
2387 child->status_pending = wstat;
2388 return;
2389 }
2390 }
2391 }
2392
2393 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2394 {
2395 struct process_info *proc = find_process_pid (pid_of (thread));
2396 int options = linux_low_ptrace_options (proc->attached);
2397
2398 linux_enable_event_reporting (lwpid, options);
2399 child->must_set_ptrace_flags = 0;
2400 }
2401
2402 /* Always update syscall_state, even if it will be filtered later. */
2403 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2404 {
2405 child->syscall_state
2406 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2407 ? TARGET_WAITKIND_SYSCALL_RETURN
2408 : TARGET_WAITKIND_SYSCALL_ENTRY);
2409 }
2410 else
2411 {
2412 /* Almost all other ptrace-stops are known to be outside of system
2413 calls, with further exceptions in handle_extended_wait. */
2414 child->syscall_state = TARGET_WAITKIND_IGNORE;
2415 }
2416
2417 /* Be careful to not overwrite stop_pc until save_stop_reason is
2418 called. */
2419 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2420 && linux_is_extended_waitstatus (wstat))
2421 {
2422 child->stop_pc = get_pc (child);
2423 if (handle_extended_wait (&child, wstat))
2424 {
2425 /* The event has been handled, so just return without
2426 reporting it. */
2427 return;
2428 }
2429 }
2430
2431 if (linux_wstatus_maybe_breakpoint (wstat))
2432 {
2433 if (save_stop_reason (child))
2434 have_stop_pc = 1;
2435 }
2436
2437 if (!have_stop_pc)
2438 child->stop_pc = get_pc (child);
2439
2440 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2441 && child->stop_expected)
2442 {
2443 threads_debug_printf ("Expected stop.");
2444
2445 child->stop_expected = 0;
2446
2447 if (thread->last_resume_kind == resume_stop)
2448 {
2449 /* We want to report the stop to the core. Treat the
2450 SIGSTOP as a normal event. */
2451 threads_debug_printf ("resume_stop SIGSTOP caught for %s.",
2452 target_pid_to_str (ptid_of (thread)).c_str ());
2453 }
2454 else if (stopping_threads != NOT_STOPPING_THREADS)
2455 {
2456 /* Stopping threads. We don't want this SIGSTOP to end up
2457 pending. */
2458 threads_debug_printf ("SIGSTOP caught for %s while stopping threads.",
2459 target_pid_to_str (ptid_of (thread)).c_str ());
2460 return;
2461 }
2462 else
2463 {
2464 /* This is a delayed SIGSTOP. Filter out the event. */
2465 threads_debug_printf ("%s %s, 0, 0 (discard delayed SIGSTOP)",
2466 child->stepping ? "step" : "continue",
2467 target_pid_to_str (ptid_of (thread)).c_str ());
2468
2469 resume_one_lwp (child, child->stepping, 0, NULL);
2470 return;
2471 }
2472 }
2473
2474 child->status_pending_p = 1;
2475 child->status_pending = wstat;
2476 return;
2477 }
2478
2479 bool
2480 linux_process_target::maybe_hw_step (thread_info *thread)
2481 {
2482 if (supports_hardware_single_step ())
2483 return true;
2484 else
2485 {
2486 /* GDBserver must insert single-step breakpoint for software
2487 single step. */
2488 gdb_assert (has_single_step_breakpoints (thread));
2489 return false;
2490 }
2491 }
2492
2493 void
2494 linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
2495 {
2496 struct lwp_info *lp = get_thread_lwp (thread);
2497
2498 if (lp->stopped
2499 && !lp->suspended
2500 && !lp->status_pending_p
2501 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
2502 {
2503 int step = 0;
2504
2505 if (thread->last_resume_kind == resume_step)
2506 {
2507 if (supports_software_single_step ())
2508 install_software_single_step_breakpoints (lp);
2509
2510 step = maybe_hw_step (thread);
2511 }
2512
2513 threads_debug_printf ("resuming stopped-resumed LWP %s at %s: step=%d",
2514 target_pid_to_str (ptid_of (thread)).c_str (),
2515 paddress (lp->stop_pc), step);
2516
2517 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2518 }
2519 }
2520
2521 int
2522 linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2523 ptid_t filter_ptid,
2524 int *wstatp, int options)
2525 {
2526 struct thread_info *event_thread;
2527 struct lwp_info *event_child, *requested_child;
2528 sigset_t block_mask, prev_mask;
2529
2530 retry:
2531 /* N.B. event_thread points to the thread_info struct that contains
2532 event_child. Keep them in sync. */
2533 event_thread = NULL;
2534 event_child = NULL;
2535 requested_child = NULL;
2536
2537 /* Check for a lwp with a pending status. */
2538
2539 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2540 {
2541 event_thread = find_thread_in_random ([&] (thread_info *thread)
2542 {
2543 return status_pending_p_callback (thread, filter_ptid);
2544 });
2545
2546 if (event_thread != NULL)
2547 {
2548 event_child = get_thread_lwp (event_thread);
2549 threads_debug_printf ("Got a pending child %ld", lwpid_of (event_thread));
2550 }
2551 }
2552 else if (filter_ptid != null_ptid)
2553 {
2554 requested_child = find_lwp_pid (filter_ptid);
2555 gdb_assert (requested_child != nullptr);
2556
2557 if (stopping_threads == NOT_STOPPING_THREADS
2558 && requested_child->status_pending_p
2559 && (requested_child->collecting_fast_tracepoint
2560 != fast_tpoint_collect_result::not_collecting))
2561 {
2562 enqueue_one_deferred_signal (requested_child,
2563 &requested_child->status_pending);
2564 requested_child->status_pending_p = 0;
2565 requested_child->status_pending = 0;
2566 resume_one_lwp (requested_child, 0, 0, NULL);
2567 }
2568
2569 if (requested_child->suspended
2570 && requested_child->status_pending_p)
2571 {
2572 internal_error ("requesting an event out of a"
2573 " suspended child?");
2574 }
2575
2576 if (requested_child->status_pending_p)
2577 {
2578 event_child = requested_child;
2579 event_thread = get_lwp_thread (event_child);
2580 }
2581 }
2582
2583 if (event_child != NULL)
2584 {
2585 threads_debug_printf ("Got an event from pending child %ld (%04x)",
2586 lwpid_of (event_thread),
2587 event_child->status_pending);
2588
2589 *wstatp = event_child->status_pending;
2590 event_child->status_pending_p = 0;
2591 event_child->status_pending = 0;
2592 switch_to_thread (event_thread);
2593 return lwpid_of (event_thread);
2594 }
2595
2596 /* But if we don't find a pending event, we'll have to wait.
2597
2598 We only enter this loop if no process has a pending wait status.
2599 Thus any action taken in response to a wait status inside this
2600 loop is responding as soon as we detect the status, not after any
2601 pending events. */
2602
2603 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2604 all signals while here. */
2605 sigfillset (&block_mask);
2606 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
2607
2608 /* Always pull all events out of the kernel. We'll randomly select
2609 an event LWP out of all that have events, to prevent
2610 starvation. */
2611 while (event_child == NULL)
2612 {
2613 pid_t ret = 0;
2614
2615 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2616 quirks:
2617
2618 - If the thread group leader exits while other threads in the
2619 thread group still exist, waitpid(TGID, ...) hangs. That
2620 waitpid won't return an exit status until the other threads
2621 in the group are reaped.
2622
2623 - When a non-leader thread execs, that thread just vanishes
2624 without reporting an exit (so we'd hang if we waited for it
2625 explicitly in that case). The exec event is reported to
2626 the TGID pid. */
2627 errno = 0;
2628 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2629
2630 threads_debug_printf ("waitpid(-1, ...) returned %d, %s",
2631 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
2632
2633 if (ret > 0)
2634 {
2635 threads_debug_printf ("waitpid %ld received %s",
2636 (long) ret, status_to_str (*wstatp).c_str ());
2637
2638 /* Filter all events. IOW, leave all events pending. We'll
2639 randomly select an event LWP out of all that have events
2640 below. */
2641 filter_event (ret, *wstatp);
2642 /* Retry until nothing comes out of waitpid. A single
2643 SIGCHLD can indicate more than one child stopped. */
2644 continue;
2645 }
2646
2647 /* Now that we've pulled all events out of the kernel, resume
2648 LWPs that don't have an interesting event to report. */
2649 if (stopping_threads == NOT_STOPPING_THREADS)
2650 for_each_thread ([this] (thread_info *thread)
2651 {
2652 resume_stopped_resumed_lwps (thread);
2653 });
2654
2655 /* ... and find an LWP with a status to report to the core, if
2656 any. */
2657 event_thread = find_thread_in_random ([&] (thread_info *thread)
2658 {
2659 return status_pending_p_callback (thread, filter_ptid);
2660 });
2661
2662 if (event_thread != NULL)
2663 {
2664 event_child = get_thread_lwp (event_thread);
2665 *wstatp = event_child->status_pending;
2666 event_child->status_pending_p = 0;
2667 event_child->status_pending = 0;
2668 break;
2669 }
2670
2671 /* Check for zombie thread group leaders. Those can't be reaped
2672 until all other threads in the thread group are. */
2673 if (check_zombie_leaders ())
2674 goto retry;
2675
2676 auto not_stopped = [&] (thread_info *thread)
2677 {
2678 return not_stopped_callback (thread, wait_ptid);
2679 };
2680
2681 /* If there are no resumed children left in the set of LWPs we
2682 want to wait for, bail. We can't just block in
2683 waitpid/sigsuspend, because lwps might have been left stopped
2684 in trace-stop state, and we'd be stuck forever waiting for
2685 their status to change (which would only happen if we resumed
2686 them). Even if WNOHANG is set, this return code is preferred
2687 over 0 (below), as it is more detailed. */
2688 if (find_thread (not_stopped) == NULL)
2689 {
2690 threads_debug_printf ("exit (no unwaited-for LWP)");
2691
2692 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2693 return -1;
2694 }
2695
2696 /* No interesting event to report to the caller. */
2697 if ((options & WNOHANG))
2698 {
2699 threads_debug_printf ("WNOHANG set, no event found");
2700
2701 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2702 return 0;
2703 }
2704
2705 /* Block until we get an event reported with SIGCHLD. */
2706 threads_debug_printf ("sigsuspend'ing");
2707
2708 sigsuspend (&prev_mask);
2709 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2710 goto retry;
2711 }
2712
2713 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2714
2715 switch_to_thread (event_thread);
2716
2717 return lwpid_of (event_thread);
2718 }
2719
2720 int
2721 linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
2722 {
2723 return wait_for_event_filtered (ptid, ptid, wstatp, options);
2724 }
2725
2726 /* Select one LWP out of those that have events pending. */
2727
2728 static void
2729 select_event_lwp (struct lwp_info **orig_lp)
2730 {
2731 struct thread_info *event_thread = NULL;
2732
2733 /* In all-stop, give preference to the LWP that is being
2734 single-stepped. There will be at most one, and it's the LWP that
2735 the core is most interested in. If we didn't do this, then we'd
2736 have to handle pending step SIGTRAPs somehow in case the core
2737 later continues the previously-stepped thread, otherwise we'd
2738 report the pending SIGTRAP, and the core, not having stepped the
2739 thread, wouldn't understand what the trap was for, and therefore
2740 would report it to the user as a random signal. */
2741 if (!non_stop)
2742 {
2743 event_thread = find_thread ([] (thread_info *thread)
2744 {
2745 lwp_info *lp = get_thread_lwp (thread);
2746
2747 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2748 && thread->last_resume_kind == resume_step
2749 && lp->status_pending_p);
2750 });
2751
2752 if (event_thread != NULL)
2753 threads_debug_printf
2754 ("Select single-step %s",
2755 target_pid_to_str (ptid_of (event_thread)).c_str ());
2756 }
2757 if (event_thread == NULL)
2758 {
2759 /* No single-stepping LWP. Select one at random, out of those
2760 which have had events. */
2761
2762 event_thread = find_thread_in_random ([&] (thread_info *thread)
2763 {
2764 lwp_info *lp = get_thread_lwp (thread);
2765
2766 /* Only resumed LWPs that have an event pending. */
2767 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2768 && lp->status_pending_p);
2769 });
2770 }
2771
2772 if (event_thread != NULL)
2773 {
2774 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2775
2776 /* Switch the event LWP. */
2777 *orig_lp = event_lp;
2778 }
2779 }
2780
2781 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2782 NULL. */
2783
2784 static void
2785 unsuspend_all_lwps (struct lwp_info *except)
2786 {
2787 for_each_thread ([&] (thread_info *thread)
2788 {
2789 lwp_info *lwp = get_thread_lwp (thread);
2790
2791 if (lwp != except)
2792 lwp_suspended_decr (lwp);
2793 });
2794 }
2795
2796 static bool lwp_running (thread_info *thread);
2797
2798 /* Stabilize threads (move out of jump pads).
2799
2800 If a thread is midway collecting a fast tracepoint, we need to
2801 finish the collection and move it out of the jump pad before
2802 reporting the signal.
2803
2804 This avoids recursion while collecting (when a signal arrives
2805 midway, and the signal handler itself collects), which would trash
2806 the trace buffer. In case the user set a breakpoint in a signal
2807 handler, this avoids the backtrace showing the jump pad, etc..
2808 Most importantly, there are certain things we can't do safely if
2809 threads are stopped in a jump pad (or in its callee's). For
2810 example:
2811
2812 - starting a new trace run. A thread still collecting the
2813 previous run, could trash the trace buffer when resumed. The trace
2814 buffer control structures would have been reset but the thread had
2815 no way to tell. The thread could even midway memcpy'ing to the
2816 buffer, which would mean that when resumed, it would clobber the
2817 trace buffer that had been set for a new run.
2818
2819 - we can't rewrite/reuse the jump pads for new tracepoints
2820 safely. Say you do tstart while a thread is stopped midway while
2821 collecting. When the thread is later resumed, it finishes the
2822 collection, and returns to the jump pad, to execute the original
2823 instruction that was under the tracepoint jump at the time the
2824 older run had been started. If the jump pad had been rewritten
2825 since for something else in the new run, the thread would now
2826 execute the wrong / random instructions. */
2827
2828 void
2829 linux_process_target::stabilize_threads ()
2830 {
2831 thread_info *thread_stuck = find_thread ([this] (thread_info *thread)
2832 {
2833 return stuck_in_jump_pad (thread);
2834 });
2835
2836 if (thread_stuck != NULL)
2837 {
2838 threads_debug_printf ("can't stabilize, LWP %ld is stuck in jump pad",
2839 lwpid_of (thread_stuck));
2840 return;
2841 }
2842
2843 scoped_restore_current_thread restore_thread;
2844
2845 stabilizing_threads = 1;
2846
2847 /* Kick 'em all. */
2848 for_each_thread ([this] (thread_info *thread)
2849 {
2850 move_out_of_jump_pad (thread);
2851 });
2852
2853 /* Loop until all are stopped out of the jump pads. */
2854 while (find_thread (lwp_running) != NULL)
2855 {
2856 struct target_waitstatus ourstatus;
2857 struct lwp_info *lwp;
2858 int wstat;
2859
2860 /* Note that we go through the full wait even loop. While
2861 moving threads out of jump pad, we need to be able to step
2862 over internal breakpoints and such. */
2863 wait_1 (minus_one_ptid, &ourstatus, 0);
2864
2865 if (ourstatus.kind () == TARGET_WAITKIND_STOPPED)
2866 {
2867 lwp = get_thread_lwp (current_thread);
2868
2869 /* Lock it. */
2870 lwp_suspended_inc (lwp);
2871
2872 if (ourstatus.sig () != GDB_SIGNAL_0
2873 || current_thread->last_resume_kind == resume_stop)
2874 {
2875 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.sig ()));
2876 enqueue_one_deferred_signal (lwp, &wstat);
2877 }
2878 }
2879 }
2880
2881 unsuspend_all_lwps (NULL);
2882
2883 stabilizing_threads = 0;
2884
2885 if (debug_threads)
2886 {
2887 thread_stuck = find_thread ([this] (thread_info *thread)
2888 {
2889 return stuck_in_jump_pad (thread);
2890 });
2891
2892 if (thread_stuck != NULL)
2893 threads_debug_printf
2894 ("couldn't stabilize, LWP %ld got stuck in jump pad",
2895 lwpid_of (thread_stuck));
2896 }
2897 }
2898
2899 /* Convenience function that is called when the kernel reports an
2900 event that is not passed out to GDB. */
2901
2902 static ptid_t
2903 ignore_event (struct target_waitstatus *ourstatus)
2904 {
2905 /* If we got an event, there may still be others, as a single
2906 SIGCHLD can indicate more than one child stopped. This forces
2907 another target_wait call. */
2908 async_file_mark ();
2909
2910 ourstatus->set_ignore ();
2911 return null_ptid;
2912 }
2913
2914 ptid_t
2915 linux_process_target::filter_exit_event (lwp_info *event_child,
2916 target_waitstatus *ourstatus)
2917 {
2918 struct thread_info *thread = get_lwp_thread (event_child);
2919 ptid_t ptid = ptid_of (thread);
2920
2921 if (ourstatus->kind () == TARGET_WAITKIND_THREAD_EXITED)
2922 {
2923 /* We're reporting a thread exit for the leader. The exit was
2924 detected by check_zombie_leaders. */
2925 gdb_assert (is_leader (thread));
2926 gdb_assert (report_exit_events_for (thread));
2927
2928 delete_lwp (event_child);
2929 return ptid;
2930 }
2931
2932 /* Note we must filter TARGET_WAITKIND_SIGNALLED as well, otherwise
2933 if a non-leader thread exits with a signal, we'd report it to the
2934 core which would interpret it as the whole-process exiting.
2935 There is no TARGET_WAITKIND_THREAD_SIGNALLED event kind. */
2936 if (ourstatus->kind () != TARGET_WAITKIND_EXITED
2937 && ourstatus->kind () != TARGET_WAITKIND_SIGNALLED)
2938 return ptid;
2939
2940 if (!is_leader (thread))
2941 {
2942 if (report_exit_events_for (thread))
2943 ourstatus->set_thread_exited (0);
2944 else
2945 ourstatus->set_ignore ();
2946
2947 delete_lwp (event_child);
2948 }
2949 return ptid;
2950 }
2951
2952 /* Returns 1 if GDB is interested in any event_child syscalls. */
2953
2954 static int
2955 gdb_catching_syscalls_p (struct lwp_info *event_child)
2956 {
2957 struct thread_info *thread = get_lwp_thread (event_child);
2958 struct process_info *proc = get_thread_process (thread);
2959
2960 return !proc->syscalls_to_catch.empty ();
2961 }
2962
2963 bool
2964 linux_process_target::gdb_catch_this_syscall (lwp_info *event_child)
2965 {
2966 int sysno;
2967 struct thread_info *thread = get_lwp_thread (event_child);
2968 struct process_info *proc = get_thread_process (thread);
2969
2970 if (proc->syscalls_to_catch.empty ())
2971 return false;
2972
2973 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
2974 return true;
2975
2976 get_syscall_trapinfo (event_child, &sysno);
2977
2978 for (int iter : proc->syscalls_to_catch)
2979 if (iter == sysno)
2980 return true;
2981
2982 return false;
2983 }
2984
2985 ptid_t
2986 linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
2987 target_wait_flags target_options)
2988 {
2989 THREADS_SCOPED_DEBUG_ENTER_EXIT;
2990
2991 client_state &cs = get_client_state ();
2992 int w;
2993 struct lwp_info *event_child;
2994 int options;
2995 int pid;
2996 int step_over_finished;
2997 int bp_explains_trap;
2998 int maybe_internal_trap;
2999 int report_to_gdb;
3000 int trace_event;
3001 int in_step_range;
3002
3003 threads_debug_printf ("[%s]", target_pid_to_str (ptid).c_str ());
3004
3005 /* Translate generic target options into linux options. */
3006 options = __WALL;
3007 if (target_options & TARGET_WNOHANG)
3008 options |= WNOHANG;
3009
3010 bp_explains_trap = 0;
3011 trace_event = 0;
3012 in_step_range = 0;
3013 ourstatus->set_ignore ();
3014
3015 bool was_any_resumed = any_resumed ();
3016
3017 if (step_over_bkpt == null_ptid)
3018 pid = wait_for_event (ptid, &w, options);
3019 else
3020 {
3021 threads_debug_printf ("step_over_bkpt set [%s], doing a blocking wait",
3022 target_pid_to_str (step_over_bkpt).c_str ());
3023 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3024 }
3025
3026 if (pid == 0 || (pid == -1 && !was_any_resumed))
3027 {
3028 gdb_assert (target_options & TARGET_WNOHANG);
3029
3030 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_IGNORE");
3031
3032 ourstatus->set_ignore ();
3033 return null_ptid;
3034 }
3035 else if (pid == -1)
3036 {
3037 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_NO_RESUMED");
3038
3039 ourstatus->set_no_resumed ();
3040 return null_ptid;
3041 }
3042
3043 event_child = get_thread_lwp (current_thread);
3044
3045 /* wait_for_event only returns an exit status for the last
3046 child of a process. Report it. */
3047 if (WIFEXITED (w) || WIFSIGNALED (w))
3048 {
3049 if (WIFEXITED (w))
3050 {
3051 /* If we already have the exit recorded in waitstatus, use
3052 it. This will happen when we detect a zombie leader,
3053 when we had GDB_THREAD_OPTION_EXIT enabled for it. We
3054 want to report its exit as TARGET_WAITKIND_THREAD_EXITED,
3055 as the whole process hasn't exited yet. */
3056 const target_waitstatus &ws = event_child->waitstatus;
3057 if (ws.kind () != TARGET_WAITKIND_IGNORE)
3058 {
3059 gdb_assert (ws.kind () == TARGET_WAITKIND_EXITED
3060 || ws.kind () == TARGET_WAITKIND_THREAD_EXITED);
3061 *ourstatus = ws;
3062 }
3063 else
3064 ourstatus->set_exited (WEXITSTATUS (w));
3065
3066 threads_debug_printf
3067 ("ret = %s, exited with retcode %d",
3068 target_pid_to_str (ptid_of (current_thread)).c_str (),
3069 WEXITSTATUS (w));
3070 }
3071 else
3072 {
3073 ourstatus->set_signalled (gdb_signal_from_host (WTERMSIG (w)));
3074
3075 threads_debug_printf
3076 ("ret = %s, terminated with signal %d",
3077 target_pid_to_str (ptid_of (current_thread)).c_str (),
3078 WTERMSIG (w));
3079 }
3080
3081 return filter_exit_event (event_child, ourstatus);
3082 }
3083
3084 /* If step-over executes a breakpoint instruction, in the case of a
3085 hardware single step it means a gdb/gdbserver breakpoint had been
3086 planted on top of a permanent breakpoint, in the case of a software
3087 single step it may just mean that gdbserver hit the reinsert breakpoint.
3088 The PC has been adjusted by save_stop_reason to point at
3089 the breakpoint address.
3090 So in the case of the hardware single step advance the PC manually
3091 past the breakpoint and in the case of software single step advance only
3092 if it's not the single_step_breakpoint we are hitting.
3093 This avoids that a program would keep trapping a permanent breakpoint
3094 forever. */
3095 if (step_over_bkpt != null_ptid
3096 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3097 && (event_child->stepping
3098 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3099 {
3100 int increment_pc = 0;
3101 int breakpoint_kind = 0;
3102 CORE_ADDR stop_pc = event_child->stop_pc;
3103
3104 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3105 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3106
3107 threads_debug_printf
3108 ("step-over for %s executed software breakpoint",
3109 target_pid_to_str (ptid_of (current_thread)).c_str ());
3110
3111 if (increment_pc != 0)
3112 {
3113 struct regcache *regcache
3114 = get_thread_regcache (current_thread, 1);
3115
3116 event_child->stop_pc += increment_pc;
3117 low_set_pc (regcache, event_child->stop_pc);
3118
3119 if (!low_breakpoint_at (event_child->stop_pc))
3120 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3121 }
3122 }
3123
3124 /* If this event was not handled before, and is not a SIGTRAP, we
3125 report it. SIGILL and SIGSEGV are also treated as traps in case
3126 a breakpoint is inserted at the current PC. If this target does
3127 not support internal breakpoints at all, we also report the
3128 SIGTRAP without further processing; it's of no concern to us. */
3129 maybe_internal_trap
3130 = (low_supports_breakpoints ()
3131 && (WSTOPSIG (w) == SIGTRAP
3132 || ((WSTOPSIG (w) == SIGILL
3133 || WSTOPSIG (w) == SIGSEGV)
3134 && low_breakpoint_at (event_child->stop_pc))));
3135
3136 if (maybe_internal_trap)
3137 {
3138 /* Handle anything that requires bookkeeping before deciding to
3139 report the event or continue waiting. */
3140
3141 /* First check if we can explain the SIGTRAP with an internal
3142 breakpoint, or if we should possibly report the event to GDB.
3143 Do this before anything that may remove or insert a
3144 breakpoint. */
3145 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3146
3147 /* We have a SIGTRAP, possibly a step-over dance has just
3148 finished. If so, tweak the state machine accordingly,
3149 reinsert breakpoints and delete any single-step
3150 breakpoints. */
3151 step_over_finished = finish_step_over (event_child);
3152
3153 /* Now invoke the callbacks of any internal breakpoints there. */
3154 check_breakpoints (event_child->stop_pc);
3155
3156 /* Handle tracepoint data collecting. This may overflow the
3157 trace buffer, and cause a tracing stop, removing
3158 breakpoints. */
3159 trace_event = handle_tracepoints (event_child);
3160
3161 if (bp_explains_trap)
3162 threads_debug_printf ("Hit a gdbserver breakpoint.");
3163 }
3164 else
3165 {
3166 /* We have some other signal, possibly a step-over dance was in
3167 progress, and it should be cancelled too. */
3168 step_over_finished = finish_step_over (event_child);
3169 }
3170
3171 /* We have all the data we need. Either report the event to GDB, or
3172 resume threads and keep waiting for more. */
3173
3174 /* If we're collecting a fast tracepoint, finish the collection and
3175 move out of the jump pad before delivering a signal. See
3176 linux_stabilize_threads. */
3177
3178 if (WIFSTOPPED (w)
3179 && WSTOPSIG (w) != SIGTRAP
3180 && supports_fast_tracepoints ()
3181 && agent_loaded_p ())
3182 {
3183 threads_debug_printf ("Got signal %d for LWP %ld. Check if we need "
3184 "to defer or adjust it.",
3185 WSTOPSIG (w), lwpid_of (current_thread));
3186
3187 /* Allow debugging the jump pad itself. */
3188 if (current_thread->last_resume_kind != resume_step
3189 && maybe_move_out_of_jump_pad (event_child, &w))
3190 {
3191 enqueue_one_deferred_signal (event_child, &w);
3192
3193 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad)",
3194 WSTOPSIG (w), lwpid_of (current_thread));
3195
3196 resume_one_lwp (event_child, 0, 0, NULL);
3197
3198 return ignore_event (ourstatus);
3199 }
3200 }
3201
3202 if (event_child->collecting_fast_tracepoint
3203 != fast_tpoint_collect_result::not_collecting)
3204 {
3205 threads_debug_printf
3206 ("LWP %ld was trying to move out of the jump pad (%d). "
3207 "Check if we're already there.",
3208 lwpid_of (current_thread),
3209 (int) event_child->collecting_fast_tracepoint);
3210
3211 trace_event = 1;
3212
3213 event_child->collecting_fast_tracepoint
3214 = linux_fast_tracepoint_collecting (event_child, NULL);
3215
3216 if (event_child->collecting_fast_tracepoint
3217 != fast_tpoint_collect_result::before_insn)
3218 {
3219 /* No longer need this breakpoint. */
3220 if (event_child->exit_jump_pad_bkpt != NULL)
3221 {
3222 threads_debug_printf
3223 ("No longer need exit-jump-pad bkpt; removing it."
3224 "stopping all threads momentarily.");
3225
3226 /* Other running threads could hit this breakpoint.
3227 We don't handle moribund locations like GDB does,
3228 instead we always pause all threads when removing
3229 breakpoints, so that any step-over or
3230 decr_pc_after_break adjustment is always taken
3231 care of while the breakpoint is still
3232 inserted. */
3233 stop_all_lwps (1, event_child);
3234
3235 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3236 event_child->exit_jump_pad_bkpt = NULL;
3237
3238 unstop_all_lwps (1, event_child);
3239
3240 gdb_assert (event_child->suspended >= 0);
3241 }
3242 }
3243
3244 if (event_child->collecting_fast_tracepoint
3245 == fast_tpoint_collect_result::not_collecting)
3246 {
3247 threads_debug_printf
3248 ("fast tracepoint finished collecting successfully.");
3249
3250 /* We may have a deferred signal to report. */
3251 if (dequeue_one_deferred_signal (event_child, &w))
3252 threads_debug_printf ("dequeued one signal.");
3253 else
3254 {
3255 threads_debug_printf ("no deferred signals.");
3256
3257 if (stabilizing_threads)
3258 {
3259 ourstatus->set_stopped (GDB_SIGNAL_0);
3260
3261 threads_debug_printf
3262 ("ret = %s, stopped while stabilizing threads",
3263 target_pid_to_str (ptid_of (current_thread)).c_str ());
3264
3265 return ptid_of (current_thread);
3266 }
3267 }
3268 }
3269 }
3270
3271 /* Check whether GDB would be interested in this event. */
3272
3273 /* Check if GDB is interested in this syscall. */
3274 if (WIFSTOPPED (w)
3275 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3276 && !gdb_catch_this_syscall (event_child))
3277 {
3278 threads_debug_printf ("Ignored syscall for LWP %ld.",
3279 lwpid_of (current_thread));
3280
3281 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
3282
3283 return ignore_event (ourstatus);
3284 }
3285
3286 /* If GDB is not interested in this signal, don't stop other
3287 threads, and don't report it to GDB. Just resume the inferior
3288 right away. We do this for threading-related signals as well as
3289 any that GDB specifically requested we ignore. But never ignore
3290 SIGSTOP if we sent it ourselves, and do not ignore signals when
3291 stepping - they may require special handling to skip the signal
3292 handler. Also never ignore signals that could be caused by a
3293 breakpoint. */
3294 if (WIFSTOPPED (w)
3295 && current_thread->last_resume_kind != resume_step
3296 && (
3297 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3298 (current_process ()->priv->thread_db != NULL
3299 && (WSTOPSIG (w) == __SIGRTMIN
3300 || WSTOPSIG (w) == __SIGRTMIN + 1))
3301 ||
3302 #endif
3303 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3304 && !(WSTOPSIG (w) == SIGSTOP
3305 && current_thread->last_resume_kind == resume_stop)
3306 && !linux_wstatus_maybe_breakpoint (w))))
3307 {
3308 siginfo_t info, *info_p;
3309
3310 threads_debug_printf ("Ignored signal %d for LWP %ld.",
3311 WSTOPSIG (w), lwpid_of (current_thread));
3312
3313 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3314 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3315 info_p = &info;
3316 else
3317 info_p = NULL;
3318
3319 if (step_over_finished)
3320 {
3321 /* We cancelled this thread's step-over above. We still
3322 need to unsuspend all other LWPs, and set them back
3323 running again while the signal handler runs. */
3324 unsuspend_all_lwps (event_child);
3325
3326 /* Enqueue the pending signal info so that proceed_all_lwps
3327 doesn't lose it. */
3328 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3329
3330 proceed_all_lwps ();
3331 }
3332 else
3333 {
3334 resume_one_lwp (event_child, event_child->stepping,
3335 WSTOPSIG (w), info_p);
3336 }
3337
3338 return ignore_event (ourstatus);
3339 }
3340
3341 /* Note that all addresses are always "out of the step range" when
3342 there's no range to begin with. */
3343 in_step_range = lwp_in_step_range (event_child);
3344
3345 /* If GDB wanted this thread to single step, and the thread is out
3346 of the step range, we always want to report the SIGTRAP, and let
3347 GDB handle it. Watchpoints should always be reported. So should
3348 signals we can't explain. A SIGTRAP we can't explain could be a
3349 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3350 do, we're be able to handle GDB breakpoints on top of internal
3351 breakpoints, by handling the internal breakpoint and still
3352 reporting the event to GDB. If we don't, we're out of luck, GDB
3353 won't see the breakpoint hit. If we see a single-step event but
3354 the thread should be continuing, don't pass the trap to gdb.
3355 That indicates that we had previously finished a single-step but
3356 left the single-step pending -- see
3357 complete_ongoing_step_over. */
3358 report_to_gdb = (!maybe_internal_trap
3359 || (current_thread->last_resume_kind == resume_step
3360 && !in_step_range)
3361 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3362 || (!in_step_range
3363 && !bp_explains_trap
3364 && !trace_event
3365 && !step_over_finished
3366 && !(current_thread->last_resume_kind == resume_continue
3367 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3368 || (gdb_breakpoint_here (event_child->stop_pc)
3369 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3370 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3371 || event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE);
3372
3373 run_breakpoint_commands (event_child->stop_pc);
3374
3375 /* We found no reason GDB would want us to stop. We either hit one
3376 of our own breakpoints, or finished an internal step GDB
3377 shouldn't know about. */
3378 if (!report_to_gdb)
3379 {
3380 if (bp_explains_trap)
3381 threads_debug_printf ("Hit a gdbserver breakpoint.");
3382
3383 if (step_over_finished)
3384 threads_debug_printf ("Step-over finished.");
3385
3386 if (trace_event)
3387 threads_debug_printf ("Tracepoint event.");
3388
3389 if (lwp_in_step_range (event_child))
3390 threads_debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).",
3391 paddress (event_child->stop_pc),
3392 paddress (event_child->step_range_start),
3393 paddress (event_child->step_range_end));
3394
3395 /* We're not reporting this breakpoint to GDB, so apply the
3396 decr_pc_after_break adjustment to the inferior's regcache
3397 ourselves. */
3398
3399 if (low_supports_breakpoints ())
3400 {
3401 struct regcache *regcache
3402 = get_thread_regcache (current_thread, 1);
3403 low_set_pc (regcache, event_child->stop_pc);
3404 }
3405
3406 if (step_over_finished)
3407 {
3408 /* If we have finished stepping over a breakpoint, we've
3409 stopped and suspended all LWPs momentarily except the
3410 stepping one. This is where we resume them all again.
3411 We're going to keep waiting, so use proceed, which
3412 handles stepping over the next breakpoint. */
3413 unsuspend_all_lwps (event_child);
3414 }
3415 else
3416 {
3417 /* Remove the single-step breakpoints if any. Note that
3418 there isn't single-step breakpoint if we finished stepping
3419 over. */
3420 if (supports_software_single_step ()
3421 && has_single_step_breakpoints (current_thread))
3422 {
3423 stop_all_lwps (0, event_child);
3424 delete_single_step_breakpoints (current_thread);
3425 unstop_all_lwps (0, event_child);
3426 }
3427 }
3428
3429 threads_debug_printf ("proceeding all threads.");
3430
3431 proceed_all_lwps ();
3432
3433 return ignore_event (ourstatus);
3434 }
3435
3436 if (debug_threads)
3437 {
3438 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3439 threads_debug_printf ("LWP %ld: extended event with waitstatus %s",
3440 lwpid_of (get_lwp_thread (event_child)),
3441 event_child->waitstatus.to_string ().c_str ());
3442
3443 if (current_thread->last_resume_kind == resume_step)
3444 {
3445 if (event_child->step_range_start == event_child->step_range_end)
3446 threads_debug_printf
3447 ("GDB wanted to single-step, reporting event.");
3448 else if (!lwp_in_step_range (event_child))
3449 threads_debug_printf ("Out of step range, reporting event.");
3450 }
3451
3452 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3453 threads_debug_printf ("Stopped by watchpoint.");
3454 else if (gdb_breakpoint_here (event_child->stop_pc))
3455 threads_debug_printf ("Stopped by GDB breakpoint.");
3456 }
3457
3458 threads_debug_printf ("Hit a non-gdbserver trap event.");
3459
3460 /* Alright, we're going to report a stop. */
3461
3462 /* Remove single-step breakpoints. */
3463 if (supports_software_single_step ())
3464 {
3465 /* Remove single-step breakpoints or not. It it is true, stop all
3466 lwps, so that other threads won't hit the breakpoint in the
3467 staled memory. */
3468 int remove_single_step_breakpoints_p = 0;
3469
3470 if (non_stop)
3471 {
3472 remove_single_step_breakpoints_p
3473 = has_single_step_breakpoints (current_thread);
3474 }
3475 else
3476 {
3477 /* In all-stop, a stop reply cancels all previous resume
3478 requests. Delete all single-step breakpoints. */
3479
3480 find_thread ([&] (thread_info *thread) {
3481 if (has_single_step_breakpoints (thread))
3482 {
3483 remove_single_step_breakpoints_p = 1;
3484 return true;
3485 }
3486
3487 return false;
3488 });
3489 }
3490
3491 if (remove_single_step_breakpoints_p)
3492 {
3493 /* If we remove single-step breakpoints from memory, stop all lwps,
3494 so that other threads won't hit the breakpoint in the staled
3495 memory. */
3496 stop_all_lwps (0, event_child);
3497
3498 if (non_stop)
3499 {
3500 gdb_assert (has_single_step_breakpoints (current_thread));
3501 delete_single_step_breakpoints (current_thread);
3502 }
3503 else
3504 {
3505 for_each_thread ([] (thread_info *thread){
3506 if (has_single_step_breakpoints (thread))
3507 delete_single_step_breakpoints (thread);
3508 });
3509 }
3510
3511 unstop_all_lwps (0, event_child);
3512 }
3513 }
3514
3515 if (!stabilizing_threads)
3516 {
3517 /* In all-stop, stop all threads. */
3518 if (!non_stop)
3519 stop_all_lwps (0, NULL);
3520
3521 if (step_over_finished)
3522 {
3523 if (!non_stop)
3524 {
3525 /* If we were doing a step-over, all other threads but
3526 the stepping one had been paused in start_step_over,
3527 with their suspend counts incremented. We don't want
3528 to do a full unstop/unpause, because we're in
3529 all-stop mode (so we want threads stopped), but we
3530 still need to unsuspend the other threads, to
3531 decrement their `suspended' count back. */
3532 unsuspend_all_lwps (event_child);
3533 }
3534 else
3535 {
3536 /* If we just finished a step-over, then all threads had
3537 been momentarily paused. In all-stop, that's fine,
3538 we want threads stopped by now anyway. In non-stop,
3539 we need to re-resume threads that GDB wanted to be
3540 running. */
3541 unstop_all_lwps (1, event_child);
3542 }
3543 }
3544
3545 /* If we're not waiting for a specific LWP, choose an event LWP
3546 from among those that have had events. Giving equal priority
3547 to all LWPs that have had events helps prevent
3548 starvation. */
3549 if (ptid == minus_one_ptid)
3550 {
3551 event_child->status_pending_p = 1;
3552 event_child->status_pending = w;
3553
3554 select_event_lwp (&event_child);
3555
3556 /* current_thread and event_child must stay in sync. */
3557 switch_to_thread (get_lwp_thread (event_child));
3558
3559 event_child->status_pending_p = 0;
3560 w = event_child->status_pending;
3561 }
3562
3563
3564 /* Stabilize threads (move out of jump pads). */
3565 if (!non_stop)
3566 target_stabilize_threads ();
3567 }
3568 else
3569 {
3570 /* If we just finished a step-over, then all threads had been
3571 momentarily paused. In all-stop, that's fine, we want
3572 threads stopped by now anyway. In non-stop, we need to
3573 re-resume threads that GDB wanted to be running. */
3574 if (step_over_finished)
3575 unstop_all_lwps (1, event_child);
3576 }
3577
3578 /* At this point, we haven't set OURSTATUS. This is where we do it. */
3579 gdb_assert (ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3580
3581 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3582 {
3583 /* If the reported event is an exit, fork, vfork, clone or exec,
3584 let GDB know. */
3585
3586 /* Break the unreported fork/vfork/clone relationship chain. */
3587 if (is_new_child_status (event_child->waitstatus.kind ()))
3588 {
3589 event_child->relative->relative = NULL;
3590 event_child->relative = NULL;
3591 }
3592
3593 *ourstatus = event_child->waitstatus;
3594 /* Clear the event lwp's waitstatus since we handled it already. */
3595 event_child->waitstatus.set_ignore ();
3596 }
3597 else
3598 {
3599 /* The LWP stopped due to a plain signal or a syscall signal. Either way,
3600 event_child->waitstatus wasn't filled in with the details, so look at
3601 the wait status W. */
3602 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3603 {
3604 int syscall_number;
3605
3606 get_syscall_trapinfo (event_child, &syscall_number);
3607 if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
3608 ourstatus->set_syscall_entry (syscall_number);
3609 else if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
3610 ourstatus->set_syscall_return (syscall_number);
3611 else
3612 gdb_assert_not_reached ("unexpected syscall state");
3613 }
3614 else if (current_thread->last_resume_kind == resume_stop
3615 && WSTOPSIG (w) == SIGSTOP)
3616 {
3617 /* A thread that has been requested to stop by GDB with vCont;t,
3618 and it stopped cleanly, so report as SIG0. The use of
3619 SIGSTOP is an implementation detail. */
3620 ourstatus->set_stopped (GDB_SIGNAL_0);
3621 }
3622 else
3623 ourstatus->set_stopped (gdb_signal_from_host (WSTOPSIG (w)));
3624 }
3625
3626 /* Now that we've selected our final event LWP, un-adjust its PC if
3627 it was a software breakpoint, and the client doesn't know we can
3628 adjust the breakpoint ourselves. */
3629 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3630 && !cs.swbreak_feature)
3631 {
3632 int decr_pc = low_decr_pc_after_break ();
3633
3634 if (decr_pc != 0)
3635 {
3636 struct regcache *regcache
3637 = get_thread_regcache (current_thread, 1);
3638 low_set_pc (regcache, event_child->stop_pc + decr_pc);
3639 }
3640 }
3641
3642 gdb_assert (step_over_bkpt == null_ptid);
3643
3644 threads_debug_printf ("ret = %s, %s",
3645 target_pid_to_str (ptid_of (current_thread)).c_str (),
3646 ourstatus->to_string ().c_str ());
3647
3648 return filter_exit_event (event_child, ourstatus);
3649 }
3650
3651 /* Get rid of any pending event in the pipe. */
3652 static void
3653 async_file_flush (void)
3654 {
3655 linux_event_pipe.flush ();
3656 }
3657
3658 /* Put something in the pipe, so the event loop wakes up. */
3659 static void
3660 async_file_mark (void)
3661 {
3662 linux_event_pipe.mark ();
3663 }
3664
3665 ptid_t
3666 linux_process_target::wait (ptid_t ptid,
3667 target_waitstatus *ourstatus,
3668 target_wait_flags target_options)
3669 {
3670 ptid_t event_ptid;
3671
3672 /* Flush the async file first. */
3673 if (target_is_async_p ())
3674 async_file_flush ();
3675
3676 do
3677 {
3678 event_ptid = wait_1 (ptid, ourstatus, target_options);
3679 }
3680 while ((target_options & TARGET_WNOHANG) == 0
3681 && ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3682
3683 /* If at least one stop was reported, there may be more. A single
3684 SIGCHLD can signal more than one child stop. */
3685 if (target_is_async_p ()
3686 && (target_options & TARGET_WNOHANG) != 0
3687 && event_ptid != null_ptid)
3688 async_file_mark ();
3689
3690 return event_ptid;
3691 }
3692
3693 /* Send a signal to an LWP. */
3694
3695 static int
3696 kill_lwp (unsigned long lwpid, int signo)
3697 {
3698 int ret;
3699
3700 errno = 0;
3701 ret = syscall (__NR_tkill, lwpid, signo);
3702 if (errno == ENOSYS)
3703 {
3704 /* If tkill fails, then we are not using nptl threads, a
3705 configuration we no longer support. */
3706 perror_with_name (("tkill"));
3707 }
3708 return ret;
3709 }
3710
3711 void
3712 linux_stop_lwp (struct lwp_info *lwp)
3713 {
3714 send_sigstop (lwp);
3715 }
3716
3717 static void
3718 send_sigstop (struct lwp_info *lwp)
3719 {
3720 int pid;
3721
3722 pid = lwpid_of (get_lwp_thread (lwp));
3723
3724 /* If we already have a pending stop signal for this process, don't
3725 send another. */
3726 if (lwp->stop_expected)
3727 {
3728 threads_debug_printf ("Have pending sigstop for lwp %d", pid);
3729
3730 return;
3731 }
3732
3733 threads_debug_printf ("Sending sigstop to lwp %d", pid);
3734
3735 lwp->stop_expected = 1;
3736 kill_lwp (pid, SIGSTOP);
3737 }
3738
3739 static void
3740 send_sigstop (thread_info *thread, lwp_info *except)
3741 {
3742 struct lwp_info *lwp = get_thread_lwp (thread);
3743
3744 /* Ignore EXCEPT. */
3745 if (lwp == except)
3746 return;
3747
3748 if (lwp->stopped)
3749 return;
3750
3751 send_sigstop (lwp);
3752 }
3753
3754 /* Increment the suspend count of an LWP, and stop it, if not stopped
3755 yet. */
3756 static void
3757 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3758 {
3759 struct lwp_info *lwp = get_thread_lwp (thread);
3760
3761 /* Ignore EXCEPT. */
3762 if (lwp == except)
3763 return;
3764
3765 lwp_suspended_inc (lwp);
3766
3767 send_sigstop (thread, except);
3768 }
3769
3770 /* Mark LWP dead, with WSTAT as exit status pending to report later.
3771 If THREAD_EVENT is true, interpret WSTAT as a thread exit event
3772 instead of a process exit event. This is meaningful for the leader
3773 thread, as we normally report a process-wide exit event when we see
3774 the leader exit, and a thread exit event when we see any other
3775 thread exit. */
3776
3777 static void
3778 mark_lwp_dead (struct lwp_info *lwp, int wstat, bool thread_event)
3779 {
3780 /* Store the exit status for later. */
3781 lwp->status_pending_p = 1;
3782 lwp->status_pending = wstat;
3783
3784 /* Store in waitstatus as well, as there's nothing else to process
3785 for this event. */
3786 if (WIFEXITED (wstat))
3787 {
3788 if (thread_event)
3789 lwp->waitstatus.set_thread_exited (WEXITSTATUS (wstat));
3790 else
3791 lwp->waitstatus.set_exited (WEXITSTATUS (wstat));
3792 }
3793 else if (WIFSIGNALED (wstat))
3794 {
3795 gdb_assert (!thread_event);
3796 lwp->waitstatus.set_signalled (gdb_signal_from_host (WTERMSIG (wstat)));
3797 }
3798 else
3799 gdb_assert_not_reached ("unknown status kind");
3800
3801 /* Prevent trying to stop it. */
3802 lwp->stopped = 1;
3803
3804 /* No further stops are expected from a dead lwp. */
3805 lwp->stop_expected = 0;
3806 }
3807
3808 /* Return true if LWP has exited already, and has a pending exit event
3809 to report to GDB. */
3810
3811 static int
3812 lwp_is_marked_dead (struct lwp_info *lwp)
3813 {
3814 return (lwp->status_pending_p
3815 && (WIFEXITED (lwp->status_pending)
3816 || WIFSIGNALED (lwp->status_pending)));
3817 }
3818
3819 void
3820 linux_process_target::wait_for_sigstop ()
3821 {
3822 struct thread_info *saved_thread;
3823 ptid_t saved_tid;
3824 int wstat;
3825 int ret;
3826
3827 saved_thread = current_thread;
3828 if (saved_thread != NULL)
3829 saved_tid = saved_thread->id;
3830 else
3831 saved_tid = null_ptid; /* avoid bogus unused warning */
3832
3833 scoped_restore_current_thread restore_thread;
3834
3835 threads_debug_printf ("pulling events");
3836
3837 /* Passing NULL_PTID as filter indicates we want all events to be
3838 left pending. Eventually this returns when there are no
3839 unwaited-for children left. */
3840 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
3841 gdb_assert (ret == -1);
3842
3843 if (saved_thread == NULL || mythread_alive (saved_tid))
3844 return;
3845 else
3846 {
3847 threads_debug_printf ("Previously current thread died.");
3848
3849 /* We can't change the current inferior behind GDB's back,
3850 otherwise, a subsequent command may apply to the wrong
3851 process. */
3852 restore_thread.dont_restore ();
3853 switch_to_thread (nullptr);
3854 }
3855 }
3856
3857 bool
3858 linux_process_target::stuck_in_jump_pad (thread_info *thread)
3859 {
3860 struct lwp_info *lwp = get_thread_lwp (thread);
3861
3862 if (lwp->suspended != 0)
3863 {
3864 internal_error ("LWP %ld is suspended, suspended=%d\n",
3865 lwpid_of (thread), lwp->suspended);
3866 }
3867 gdb_assert (lwp->stopped);
3868
3869 /* Allow debugging the jump pad, gdb_collect, etc.. */
3870 return (supports_fast_tracepoints ()
3871 && agent_loaded_p ()
3872 && (gdb_breakpoint_here (lwp->stop_pc)
3873 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3874 || thread->last_resume_kind == resume_step)
3875 && (linux_fast_tracepoint_collecting (lwp, NULL)
3876 != fast_tpoint_collect_result::not_collecting));
3877 }
3878
3879 void
3880 linux_process_target::move_out_of_jump_pad (thread_info *thread)
3881 {
3882 struct lwp_info *lwp = get_thread_lwp (thread);
3883 int *wstat;
3884
3885 if (lwp->suspended != 0)
3886 {
3887 internal_error ("LWP %ld is suspended, suspended=%d\n",
3888 lwpid_of (thread), lwp->suspended);
3889 }
3890 gdb_assert (lwp->stopped);
3891
3892 /* For gdb_breakpoint_here. */
3893 scoped_restore_current_thread restore_thread;
3894 switch_to_thread (thread);
3895
3896 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3897
3898 /* Allow debugging the jump pad, gdb_collect, etc. */
3899 if (!gdb_breakpoint_here (lwp->stop_pc)
3900 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3901 && thread->last_resume_kind != resume_step
3902 && maybe_move_out_of_jump_pad (lwp, wstat))
3903 {
3904 threads_debug_printf ("LWP %ld needs stabilizing (in jump pad)",
3905 lwpid_of (thread));
3906
3907 if (wstat)
3908 {
3909 lwp->status_pending_p = 0;
3910 enqueue_one_deferred_signal (lwp, wstat);
3911
3912 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad",
3913 WSTOPSIG (*wstat), lwpid_of (thread));
3914 }
3915
3916 resume_one_lwp (lwp, 0, 0, NULL);
3917 }
3918 else
3919 lwp_suspended_inc (lwp);
3920 }
3921
3922 static bool
3923 lwp_running (thread_info *thread)
3924 {
3925 struct lwp_info *lwp = get_thread_lwp (thread);
3926
3927 if (lwp_is_marked_dead (lwp))
3928 return false;
3929
3930 return !lwp->stopped;
3931 }
3932
3933 void
3934 linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
3935 {
3936 /* Should not be called recursively. */
3937 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3938
3939 THREADS_SCOPED_DEBUG_ENTER_EXIT;
3940
3941 threads_debug_printf
3942 ("%s, except=%s", suspend ? "stop-and-suspend" : "stop",
3943 (except != NULL
3944 ? target_pid_to_str (ptid_of (get_lwp_thread (except))).c_str ()
3945 : "none"));
3946
3947 stopping_threads = (suspend
3948 ? STOPPING_AND_SUSPENDING_THREADS
3949 : STOPPING_THREADS);
3950
3951 if (suspend)
3952 for_each_thread ([&] (thread_info *thread)
3953 {
3954 suspend_and_send_sigstop (thread, except);
3955 });
3956 else
3957 for_each_thread ([&] (thread_info *thread)
3958 {
3959 send_sigstop (thread, except);
3960 });
3961
3962 wait_for_sigstop ();
3963 stopping_threads = NOT_STOPPING_THREADS;
3964
3965 threads_debug_printf ("setting stopping_threads back to !stopping");
3966 }
3967
3968 /* Enqueue one signal in the chain of signals which need to be
3969 delivered to this process on next resume. */
3970
3971 static void
3972 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
3973 {
3974 lwp->pending_signals.emplace_back (signal);
3975 if (info == nullptr)
3976 memset (&lwp->pending_signals.back ().info, 0, sizeof (siginfo_t));
3977 else
3978 lwp->pending_signals.back ().info = *info;
3979 }
3980
3981 void
3982 linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
3983 {
3984 struct thread_info *thread = get_lwp_thread (lwp);
3985 struct regcache *regcache = get_thread_regcache (thread, 1);
3986
3987 scoped_restore_current_thread restore_thread;
3988
3989 switch_to_thread (thread);
3990 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
3991
3992 for (CORE_ADDR pc : next_pcs)
3993 set_single_step_breakpoint (pc, current_ptid);
3994 }
3995
3996 int
3997 linux_process_target::single_step (lwp_info* lwp)
3998 {
3999 int step = 0;
4000
4001 if (supports_hardware_single_step ())
4002 {
4003 step = 1;
4004 }
4005 else if (supports_software_single_step ())
4006 {
4007 install_software_single_step_breakpoints (lwp);
4008 step = 0;
4009 }
4010 else
4011 threads_debug_printf ("stepping is not implemented on this target");
4012
4013 return step;
4014 }
4015
4016 /* The signal can be delivered to the inferior if we are not trying to
4017 finish a fast tracepoint collect. Since signal can be delivered in
4018 the step-over, the program may go to signal handler and trap again
4019 after return from the signal handler. We can live with the spurious
4020 double traps. */
4021
4022 static int
4023 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4024 {
4025 return (lwp->collecting_fast_tracepoint
4026 == fast_tpoint_collect_result::not_collecting);
4027 }
4028
4029 void
4030 linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
4031 int signal, siginfo_t *info)
4032 {
4033 struct thread_info *thread = get_lwp_thread (lwp);
4034 int ptrace_request;
4035 struct process_info *proc = get_thread_process (thread);
4036
4037 /* Note that target description may not be initialised
4038 (proc->tdesc == NULL) at this point because the program hasn't
4039 stopped at the first instruction yet. It means GDBserver skips
4040 the extra traps from the wrapper program (see option --wrapper).
4041 Code in this function that requires register access should be
4042 guarded by proc->tdesc == NULL or something else. */
4043
4044 if (lwp->stopped == 0)
4045 return;
4046
4047 gdb_assert (lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
4048
4049 fast_tpoint_collect_result fast_tp_collecting
4050 = lwp->collecting_fast_tracepoint;
4051
4052 gdb_assert (!stabilizing_threads
4053 || (fast_tp_collecting
4054 != fast_tpoint_collect_result::not_collecting));
4055
4056 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4057 user used the "jump" command, or "set $pc = foo"). */
4058 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4059 {
4060 /* Collecting 'while-stepping' actions doesn't make sense
4061 anymore. */
4062 release_while_stepping_state_list (thread);
4063 }
4064
4065 /* If we have pending signals or status, and a new signal, enqueue the
4066 signal. Also enqueue the signal if it can't be delivered to the
4067 inferior right now. */
4068 if (signal != 0
4069 && (lwp->status_pending_p
4070 || !lwp->pending_signals.empty ()
4071 || !lwp_signal_can_be_delivered (lwp)))
4072 {
4073 enqueue_pending_signal (lwp, signal, info);
4074
4075 /* Postpone any pending signal. It was enqueued above. */
4076 signal = 0;
4077 }
4078
4079 if (lwp->status_pending_p)
4080 {
4081 threads_debug_printf
4082 ("Not resuming lwp %ld (%s, stop %s); has pending status",
4083 lwpid_of (thread), step ? "step" : "continue",
4084 lwp->stop_expected ? "expected" : "not expected");
4085 return;
4086 }
4087
4088 scoped_restore_current_thread restore_thread;
4089 switch_to_thread (thread);
4090
4091 /* This bit needs some thinking about. If we get a signal that
4092 we must report while a single-step reinsert is still pending,
4093 we often end up resuming the thread. It might be better to
4094 (ew) allow a stack of pending events; then we could be sure that
4095 the reinsert happened right away and not lose any signals.
4096
4097 Making this stack would also shrink the window in which breakpoints are
4098 uninserted (see comment in linux_wait_for_lwp) but not enough for
4099 complete correctness, so it won't solve that problem. It may be
4100 worthwhile just to solve this one, however. */
4101 if (lwp->bp_reinsert != 0)
4102 {
4103 threads_debug_printf (" pending reinsert at 0x%s",
4104 paddress (lwp->bp_reinsert));
4105
4106 if (supports_hardware_single_step ())
4107 {
4108 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4109 {
4110 if (step == 0)
4111 warning ("BAD - reinserting but not stepping.");
4112 if (lwp->suspended)
4113 warning ("BAD - reinserting and suspended(%d).",
4114 lwp->suspended);
4115 }
4116 }
4117
4118 step = maybe_hw_step (thread);
4119 }
4120
4121 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4122 threads_debug_printf
4123 ("lwp %ld wants to get out of fast tracepoint jump pad "
4124 "(exit-jump-pad-bkpt)", lwpid_of (thread));
4125
4126 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4127 {
4128 threads_debug_printf
4129 ("lwp %ld wants to get out of fast tracepoint jump pad single-stepping",
4130 lwpid_of (thread));
4131
4132 if (supports_hardware_single_step ())
4133 step = 1;
4134 else
4135 {
4136 internal_error ("moving out of jump pad single-stepping"
4137 " not implemented on this target");
4138 }
4139 }
4140
4141 /* If we have while-stepping actions in this thread set it stepping.
4142 If we have a signal to deliver, it may or may not be set to
4143 SIG_IGN, we don't know. Assume so, and allow collecting
4144 while-stepping into a signal handler. A possible smart thing to
4145 do would be to set an internal breakpoint at the signal return
4146 address, continue, and carry on catching this while-stepping
4147 action only when that breakpoint is hit. A future
4148 enhancement. */
4149 if (thread->while_stepping != NULL)
4150 {
4151 threads_debug_printf
4152 ("lwp %ld has a while-stepping action -> forcing step.",
4153 lwpid_of (thread));
4154
4155 step = single_step (lwp);
4156 }
4157
4158 if (proc->tdesc != NULL && low_supports_breakpoints ())
4159 {
4160 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4161
4162 lwp->stop_pc = low_get_pc (regcache);
4163
4164 threads_debug_printf (" %s from pc 0x%lx", step ? "step" : "continue",
4165 (long) lwp->stop_pc);
4166 }
4167
4168 /* If we have pending signals, consume one if it can be delivered to
4169 the inferior. */
4170 if (!lwp->pending_signals.empty () && lwp_signal_can_be_delivered (lwp))
4171 {
4172 const pending_signal &p_sig = lwp->pending_signals.front ();
4173
4174 signal = p_sig.signal;
4175 if (p_sig.info.si_signo != 0)
4176 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4177 &p_sig.info);
4178
4179 lwp->pending_signals.pop_front ();
4180 }
4181
4182 threads_debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)",
4183 lwpid_of (thread), step ? "step" : "continue", signal,
4184 lwp->stop_expected ? "expected" : "not expected");
4185
4186 low_prepare_to_resume (lwp);
4187
4188 regcache_invalidate_thread (thread);
4189 errno = 0;
4190 lwp->stepping = step;
4191 if (step)
4192 ptrace_request = PTRACE_SINGLESTEP;
4193 else if (gdb_catching_syscalls_p (lwp))
4194 ptrace_request = PTRACE_SYSCALL;
4195 else
4196 ptrace_request = PTRACE_CONT;
4197 ptrace (ptrace_request,
4198 lwpid_of (thread),
4199 (PTRACE_TYPE_ARG3) 0,
4200 /* Coerce to a uintptr_t first to avoid potential gcc warning
4201 of coercing an 8 byte integer to a 4 byte pointer. */
4202 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4203
4204 if (errno)
4205 {
4206 int saved_errno = errno;
4207
4208 threads_debug_printf ("ptrace errno = %d (%s)",
4209 saved_errno, strerror (saved_errno));
4210
4211 errno = saved_errno;
4212 perror_with_name ("resuming thread");
4213 }
4214
4215 /* Successfully resumed. Clear state that no longer makes sense,
4216 and mark the LWP as running. Must not do this before resuming
4217 otherwise if that fails other code will be confused. E.g., we'd
4218 later try to stop the LWP and hang forever waiting for a stop
4219 status. Note that we must not throw after this is cleared,
4220 otherwise handle_zombie_lwp_error would get confused. */
4221 lwp->stopped = 0;
4222 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4223 }
4224
4225 void
4226 linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4227 {
4228 /* Nop. */
4229 }
4230
4231 /* Called when we try to resume a stopped LWP and that errors out. If
4232 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4233 or about to become), discard the error, clear any pending status
4234 the LWP may have, and return true (we'll collect the exit status
4235 soon enough). Otherwise, return false. */
4236
4237 static int
4238 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4239 {
4240 struct thread_info *thread = get_lwp_thread (lp);
4241
4242 /* If we get an error after resuming the LWP successfully, we'd
4243 confuse !T state for the LWP being gone. */
4244 gdb_assert (lp->stopped);
4245
4246 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4247 because even if ptrace failed with ESRCH, the tracee may be "not
4248 yet fully dead", but already refusing ptrace requests. In that
4249 case the tracee has 'R (Running)' state for a little bit
4250 (observed in Linux 3.18). See also the note on ESRCH in the
4251 ptrace(2) man page. Instead, check whether the LWP has any state
4252 other than ptrace-stopped. */
4253
4254 /* Don't assume anything if /proc/PID/status can't be read. */
4255 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4256 {
4257 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4258 lp->status_pending_p = 0;
4259 return 1;
4260 }
4261 return 0;
4262 }
4263
4264 void
4265 linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4266 siginfo_t *info)
4267 {
4268 try
4269 {
4270 resume_one_lwp_throw (lwp, step, signal, info);
4271 }
4272 catch (const gdb_exception_error &ex)
4273 {
4274 if (check_ptrace_stopped_lwp_gone (lwp))
4275 {
4276 /* This could because we tried to resume an LWP after its leader
4277 exited. Mark it as resumed, so we can collect an exit event
4278 from it. */
4279 lwp->stopped = 0;
4280 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4281 }
4282 else
4283 throw;
4284 }
4285 }
4286
4287 /* This function is called once per thread via for_each_thread.
4288 We look up which resume request applies to THREAD and mark it with a
4289 pointer to the appropriate resume request.
4290
4291 This algorithm is O(threads * resume elements), but resume elements
4292 is small (and will remain small at least until GDB supports thread
4293 suspension). */
4294
4295 static void
4296 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4297 {
4298 struct lwp_info *lwp = get_thread_lwp (thread);
4299
4300 for (int ndx = 0; ndx < n; ndx++)
4301 {
4302 ptid_t ptid = resume[ndx].thread;
4303 if (ptid == minus_one_ptid
4304 || ptid == thread->id
4305 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4306 of PID'. */
4307 || (ptid.pid () == pid_of (thread)
4308 && (ptid.is_pid ()
4309 || ptid.lwp () == -1)))
4310 {
4311 if (resume[ndx].kind == resume_stop
4312 && thread->last_resume_kind == resume_stop)
4313 {
4314 threads_debug_printf
4315 ("already %s LWP %ld at GDB's request",
4316 (thread->last_status.kind () == TARGET_WAITKIND_STOPPED
4317 ? "stopped" : "stopping"),
4318 lwpid_of (thread));
4319
4320 continue;
4321 }
4322
4323 /* Ignore (wildcard) resume requests for already-resumed
4324 threads. */
4325 if (resume[ndx].kind != resume_stop
4326 && thread->last_resume_kind != resume_stop)
4327 {
4328 threads_debug_printf
4329 ("already %s LWP %ld at GDB's request",
4330 (thread->last_resume_kind == resume_step
4331 ? "stepping" : "continuing"),
4332 lwpid_of (thread));
4333 continue;
4334 }
4335
4336 /* Don't let wildcard resumes resume fork/vfork/clone
4337 children that GDB does not yet know are new children. */
4338 if (lwp->relative != NULL)
4339 {
4340 struct lwp_info *rel = lwp->relative;
4341
4342 if (rel->status_pending_p
4343 && is_new_child_status (rel->waitstatus.kind ()))
4344 {
4345 threads_debug_printf
4346 ("not resuming LWP %ld: has queued stop reply",
4347 lwpid_of (thread));
4348 continue;
4349 }
4350 }
4351
4352 /* If the thread has a pending event that has already been
4353 reported to GDBserver core, but GDB has not pulled the
4354 event out of the vStopped queue yet, likewise, ignore the
4355 (wildcard) resume request. */
4356 if (in_queued_stop_replies (thread->id))
4357 {
4358 threads_debug_printf
4359 ("not resuming LWP %ld: has queued stop reply",
4360 lwpid_of (thread));
4361 continue;
4362 }
4363
4364 lwp->resume = &resume[ndx];
4365 thread->last_resume_kind = lwp->resume->kind;
4366
4367 lwp->step_range_start = lwp->resume->step_range_start;
4368 lwp->step_range_end = lwp->resume->step_range_end;
4369
4370 /* If we had a deferred signal to report, dequeue one now.
4371 This can happen if LWP gets more than one signal while
4372 trying to get out of a jump pad. */
4373 if (lwp->stopped
4374 && !lwp->status_pending_p
4375 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4376 {
4377 lwp->status_pending_p = 1;
4378
4379 threads_debug_printf
4380 ("Dequeueing deferred signal %d for LWP %ld, "
4381 "leaving status pending.",
4382 WSTOPSIG (lwp->status_pending),
4383 lwpid_of (thread));
4384 }
4385
4386 return;
4387 }
4388 }
4389
4390 /* No resume action for this thread. */
4391 lwp->resume = NULL;
4392 }
4393
4394 bool
4395 linux_process_target::resume_status_pending (thread_info *thread)
4396 {
4397 struct lwp_info *lwp = get_thread_lwp (thread);
4398
4399 /* LWPs which will not be resumed are not interesting, because
4400 we might not wait for them next time through linux_wait. */
4401 if (lwp->resume == NULL)
4402 return false;
4403
4404 return thread_still_has_status_pending (thread);
4405 }
4406
4407 bool
4408 linux_process_target::thread_needs_step_over (thread_info *thread)
4409 {
4410 struct lwp_info *lwp = get_thread_lwp (thread);
4411 CORE_ADDR pc;
4412 struct process_info *proc = get_thread_process (thread);
4413
4414 /* GDBserver is skipping the extra traps from the wrapper program,
4415 don't have to do step over. */
4416 if (proc->tdesc == NULL)
4417 return false;
4418
4419 /* LWPs which will not be resumed are not interesting, because we
4420 might not wait for them next time through linux_wait. */
4421
4422 if (!lwp->stopped)
4423 {
4424 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped",
4425 lwpid_of (thread));
4426 return false;
4427 }
4428
4429 if (thread->last_resume_kind == resume_stop)
4430 {
4431 threads_debug_printf
4432 ("Need step over [LWP %ld]? Ignoring, should remain stopped",
4433 lwpid_of (thread));
4434 return false;
4435 }
4436
4437 gdb_assert (lwp->suspended >= 0);
4438
4439 if (lwp->suspended)
4440 {
4441 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, suspended",
4442 lwpid_of (thread));
4443 return false;
4444 }
4445
4446 if (lwp->status_pending_p)
4447 {
4448 threads_debug_printf
4449 ("Need step over [LWP %ld]? Ignoring, has pending status.",
4450 lwpid_of (thread));
4451 return false;
4452 }
4453
4454 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4455 or we have. */
4456 pc = get_pc (lwp);
4457
4458 /* If the PC has changed since we stopped, then don't do anything,
4459 and let the breakpoint/tracepoint be hit. This happens if, for
4460 instance, GDB handled the decr_pc_after_break subtraction itself,
4461 GDB is OOL stepping this thread, or the user has issued a "jump"
4462 command, or poked thread's registers herself. */
4463 if (pc != lwp->stop_pc)
4464 {
4465 threads_debug_printf
4466 ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4467 "Old stop_pc was 0x%s, PC is now 0x%s", lwpid_of (thread),
4468 paddress (lwp->stop_pc), paddress (pc));
4469 return false;
4470 }
4471
4472 /* On software single step target, resume the inferior with signal
4473 rather than stepping over. */
4474 if (supports_software_single_step ()
4475 && !lwp->pending_signals.empty ()
4476 && lwp_signal_can_be_delivered (lwp))
4477 {
4478 threads_debug_printf
4479 ("Need step over [LWP %ld]? Ignoring, has pending signals.",
4480 lwpid_of (thread));
4481
4482 return false;
4483 }
4484
4485 scoped_restore_current_thread restore_thread;
4486 switch_to_thread (thread);
4487
4488 /* We can only step over breakpoints we know about. */
4489 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4490 {
4491 /* Don't step over a breakpoint that GDB expects to hit
4492 though. If the condition is being evaluated on the target's side
4493 and it evaluate to false, step over this breakpoint as well. */
4494 if (gdb_breakpoint_here (pc)
4495 && gdb_condition_true_at_breakpoint (pc)
4496 && gdb_no_commands_at_breakpoint (pc))
4497 {
4498 threads_debug_printf ("Need step over [LWP %ld]? yes, but found"
4499 " GDB breakpoint at 0x%s; skipping step over",
4500 lwpid_of (thread), paddress (pc));
4501
4502 return false;
4503 }
4504 else
4505 {
4506 threads_debug_printf ("Need step over [LWP %ld]? yes, "
4507 "found breakpoint at 0x%s",
4508 lwpid_of (thread), paddress (pc));
4509
4510 /* We've found an lwp that needs stepping over --- return 1 so
4511 that find_thread stops looking. */
4512 return true;
4513 }
4514 }
4515
4516 threads_debug_printf
4517 ("Need step over [LWP %ld]? No, no breakpoint found at 0x%s",
4518 lwpid_of (thread), paddress (pc));
4519
4520 return false;
4521 }
4522
4523 void
4524 linux_process_target::start_step_over (lwp_info *lwp)
4525 {
4526 struct thread_info *thread = get_lwp_thread (lwp);
4527 CORE_ADDR pc;
4528
4529 threads_debug_printf ("Starting step-over on LWP %ld. Stopping all threads",
4530 lwpid_of (thread));
4531
4532 stop_all_lwps (1, lwp);
4533
4534 if (lwp->suspended != 0)
4535 {
4536 internal_error ("LWP %ld suspended=%d\n", lwpid_of (thread),
4537 lwp->suspended);
4538 }
4539
4540 threads_debug_printf ("Done stopping all threads for step-over.");
4541
4542 /* Note, we should always reach here with an already adjusted PC,
4543 either by GDB (if we're resuming due to GDB's request), or by our
4544 caller, if we just finished handling an internal breakpoint GDB
4545 shouldn't care about. */
4546 pc = get_pc (lwp);
4547
4548 bool step = false;
4549 {
4550 scoped_restore_current_thread restore_thread;
4551 switch_to_thread (thread);
4552
4553 lwp->bp_reinsert = pc;
4554 uninsert_breakpoints_at (pc);
4555 uninsert_fast_tracepoint_jumps_at (pc);
4556
4557 step = single_step (lwp);
4558 }
4559
4560 resume_one_lwp (lwp, step, 0, NULL);
4561
4562 /* Require next event from this LWP. */
4563 step_over_bkpt = thread->id;
4564 }
4565
4566 bool
4567 linux_process_target::finish_step_over (lwp_info *lwp)
4568 {
4569 if (lwp->bp_reinsert != 0)
4570 {
4571 scoped_restore_current_thread restore_thread;
4572
4573 threads_debug_printf ("Finished step over.");
4574
4575 switch_to_thread (get_lwp_thread (lwp));
4576
4577 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4578 may be no breakpoint to reinsert there by now. */
4579 reinsert_breakpoints_at (lwp->bp_reinsert);
4580 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4581
4582 lwp->bp_reinsert = 0;
4583
4584 /* Delete any single-step breakpoints. No longer needed. We
4585 don't have to worry about other threads hitting this trap,
4586 and later not being able to explain it, because we were
4587 stepping over a breakpoint, and we hold all threads but
4588 LWP stopped while doing that. */
4589 if (!supports_hardware_single_step ())
4590 {
4591 gdb_assert (has_single_step_breakpoints (current_thread));
4592 delete_single_step_breakpoints (current_thread);
4593 }
4594
4595 step_over_bkpt = null_ptid;
4596 return true;
4597 }
4598 else
4599 return false;
4600 }
4601
4602 void
4603 linux_process_target::complete_ongoing_step_over ()
4604 {
4605 if (step_over_bkpt != null_ptid)
4606 {
4607 struct lwp_info *lwp;
4608 int wstat;
4609 int ret;
4610
4611 threads_debug_printf ("detach: step over in progress, finish it first");
4612
4613 /* Passing NULL_PTID as filter indicates we want all events to
4614 be left pending. Eventually this returns when there are no
4615 unwaited-for children left. */
4616 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4617 __WALL);
4618 gdb_assert (ret == -1);
4619
4620 lwp = find_lwp_pid (step_over_bkpt);
4621 if (lwp != NULL)
4622 {
4623 finish_step_over (lwp);
4624
4625 /* If we got our step SIGTRAP, don't leave it pending,
4626 otherwise we would report it to GDB as a spurious
4627 SIGTRAP. */
4628 gdb_assert (lwp->status_pending_p);
4629 if (WIFSTOPPED (lwp->status_pending)
4630 && WSTOPSIG (lwp->status_pending) == SIGTRAP)
4631 {
4632 thread_info *thread = get_lwp_thread (lwp);
4633 if (thread->last_resume_kind != resume_step)
4634 {
4635 threads_debug_printf ("detach: discard step-over SIGTRAP");
4636
4637 lwp->status_pending_p = 0;
4638 lwp->status_pending = 0;
4639 resume_one_lwp (lwp, lwp->stepping, 0, NULL);
4640 }
4641 else
4642 threads_debug_printf
4643 ("detach: resume_step, not discarding step-over SIGTRAP");
4644 }
4645 }
4646 step_over_bkpt = null_ptid;
4647 unsuspend_all_lwps (lwp);
4648 }
4649 }
4650
4651 void
4652 linux_process_target::resume_one_thread (thread_info *thread,
4653 bool leave_all_stopped)
4654 {
4655 struct lwp_info *lwp = get_thread_lwp (thread);
4656 int leave_pending;
4657
4658 if (lwp->resume == NULL)
4659 return;
4660
4661 if (lwp->resume->kind == resume_stop)
4662 {
4663 threads_debug_printf ("resume_stop request for LWP %ld",
4664 lwpid_of (thread));
4665
4666 if (!lwp->stopped)
4667 {
4668 threads_debug_printf ("stopping LWP %ld", lwpid_of (thread));
4669
4670 /* Stop the thread, and wait for the event asynchronously,
4671 through the event loop. */
4672 send_sigstop (lwp);
4673 }
4674 else
4675 {
4676 threads_debug_printf ("already stopped LWP %ld", lwpid_of (thread));
4677
4678 /* The LWP may have been stopped in an internal event that
4679 was not meant to be notified back to GDB (e.g., gdbserver
4680 breakpoint), so we should be reporting a stop event in
4681 this case too. */
4682
4683 /* If the thread already has a pending SIGSTOP, this is a
4684 no-op. Otherwise, something later will presumably resume
4685 the thread and this will cause it to cancel any pending
4686 operation, due to last_resume_kind == resume_stop. If
4687 the thread already has a pending status to report, we
4688 will still report it the next time we wait - see
4689 status_pending_p_callback. */
4690
4691 /* If we already have a pending signal to report, then
4692 there's no need to queue a SIGSTOP, as this means we're
4693 midway through moving the LWP out of the jumppad, and we
4694 will report the pending signal as soon as that is
4695 finished. */
4696 if (lwp->pending_signals_to_report.empty ())
4697 send_sigstop (lwp);
4698 }
4699
4700 /* For stop requests, we're done. */
4701 lwp->resume = NULL;
4702 thread->last_status.set_ignore ();
4703 return;
4704 }
4705
4706 /* If this thread which is about to be resumed has a pending status,
4707 then don't resume it - we can just report the pending status.
4708 Likewise if it is suspended, because e.g., another thread is
4709 stepping past a breakpoint. Make sure to queue any signals that
4710 would otherwise be sent. In all-stop mode, we do this decision
4711 based on if *any* thread has a pending status. If there's a
4712 thread that needs the step-over-breakpoint dance, then don't
4713 resume any other thread but that particular one. */
4714 leave_pending = (lwp->suspended
4715 || lwp->status_pending_p
4716 || leave_all_stopped);
4717
4718 /* If we have a new signal, enqueue the signal. */
4719 if (lwp->resume->sig != 0)
4720 {
4721 siginfo_t info, *info_p;
4722
4723 /* If this is the same signal we were previously stopped by,
4724 make sure to queue its siginfo. */
4725 if (WIFSTOPPED (lwp->last_status)
4726 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4727 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4728 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4729 info_p = &info;
4730 else
4731 info_p = NULL;
4732
4733 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4734 }
4735
4736 if (!leave_pending)
4737 {
4738 threads_debug_printf ("resuming LWP %ld", lwpid_of (thread));
4739
4740 proceed_one_lwp (thread, NULL);
4741 }
4742 else
4743 threads_debug_printf ("leaving LWP %ld stopped", lwpid_of (thread));
4744
4745 thread->last_status.set_ignore ();
4746 lwp->resume = NULL;
4747 }
4748
4749 void
4750 linux_process_target::resume (thread_resume *resume_info, size_t n)
4751 {
4752 struct thread_info *need_step_over = NULL;
4753
4754 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4755
4756 for_each_thread ([&] (thread_info *thread)
4757 {
4758 linux_set_resume_request (thread, resume_info, n);
4759 });
4760
4761 /* If there is a thread which would otherwise be resumed, which has
4762 a pending status, then don't resume any threads - we can just
4763 report the pending status. Make sure to queue any signals that
4764 would otherwise be sent. In non-stop mode, we'll apply this
4765 logic to each thread individually. We consume all pending events
4766 before considering to start a step-over (in all-stop). */
4767 bool any_pending = false;
4768 if (!non_stop)
4769 any_pending = find_thread ([this] (thread_info *thread)
4770 {
4771 return resume_status_pending (thread);
4772 }) != nullptr;
4773
4774 /* If there is a thread which would otherwise be resumed, which is
4775 stopped at a breakpoint that needs stepping over, then don't
4776 resume any threads - have it step over the breakpoint with all
4777 other threads stopped, then resume all threads again. Make sure
4778 to queue any signals that would otherwise be delivered or
4779 queued. */
4780 if (!any_pending && low_supports_breakpoints ())
4781 need_step_over = find_thread ([this] (thread_info *thread)
4782 {
4783 return thread_needs_step_over (thread);
4784 });
4785
4786 bool leave_all_stopped = (need_step_over != NULL || any_pending);
4787
4788 if (need_step_over != NULL)
4789 threads_debug_printf ("Not resuming all, need step over");
4790 else if (any_pending)
4791 threads_debug_printf ("Not resuming, all-stop and found "
4792 "an LWP with pending status");
4793 else
4794 threads_debug_printf ("Resuming, no pending status or step over needed");
4795
4796 /* Even if we're leaving threads stopped, queue all signals we'd
4797 otherwise deliver. */
4798 for_each_thread ([&] (thread_info *thread)
4799 {
4800 resume_one_thread (thread, leave_all_stopped);
4801 });
4802
4803 if (need_step_over)
4804 start_step_over (get_thread_lwp (need_step_over));
4805
4806 /* We may have events that were pending that can/should be sent to
4807 the client now. Trigger a linux_wait call. */
4808 if (target_is_async_p ())
4809 async_file_mark ();
4810 }
4811
4812 void
4813 linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
4814 {
4815 struct lwp_info *lwp = get_thread_lwp (thread);
4816 int step;
4817
4818 if (lwp == except)
4819 return;
4820
4821 threads_debug_printf ("lwp %ld", lwpid_of (thread));
4822
4823 if (!lwp->stopped)
4824 {
4825 threads_debug_printf (" LWP %ld already running", lwpid_of (thread));
4826 return;
4827 }
4828
4829 if (thread->last_resume_kind == resume_stop
4830 && thread->last_status.kind () != TARGET_WAITKIND_IGNORE)
4831 {
4832 threads_debug_printf (" client wants LWP to remain %ld stopped",
4833 lwpid_of (thread));
4834 return;
4835 }
4836
4837 if (lwp->status_pending_p)
4838 {
4839 threads_debug_printf (" LWP %ld has pending status, leaving stopped",
4840 lwpid_of (thread));
4841 return;
4842 }
4843
4844 gdb_assert (lwp->suspended >= 0);
4845
4846 if (lwp->suspended)
4847 {
4848 threads_debug_printf (" LWP %ld is suspended", lwpid_of (thread));
4849 return;
4850 }
4851
4852 if (thread->last_resume_kind == resume_stop
4853 && lwp->pending_signals_to_report.empty ()
4854 && (lwp->collecting_fast_tracepoint
4855 == fast_tpoint_collect_result::not_collecting))
4856 {
4857 /* We haven't reported this LWP as stopped yet (otherwise, the
4858 last_status.kind check above would catch it, and we wouldn't
4859 reach here. This LWP may have been momentarily paused by a
4860 stop_all_lwps call while handling for example, another LWP's
4861 step-over. In that case, the pending expected SIGSTOP signal
4862 that was queued at vCont;t handling time will have already
4863 been consumed by wait_for_sigstop, and so we need to requeue
4864 another one here. Note that if the LWP already has a SIGSTOP
4865 pending, this is a no-op. */
4866
4867 threads_debug_printf
4868 ("Client wants LWP %ld to stop. Making sure it has a SIGSTOP pending",
4869 lwpid_of (thread));
4870
4871 send_sigstop (lwp);
4872 }
4873
4874 if (thread->last_resume_kind == resume_step)
4875 {
4876 threads_debug_printf (" stepping LWP %ld, client wants it stepping",
4877 lwpid_of (thread));
4878
4879 /* If resume_step is requested by GDB, install single-step
4880 breakpoints when the thread is about to be actually resumed if
4881 the single-step breakpoints weren't removed. */
4882 if (supports_software_single_step ()
4883 && !has_single_step_breakpoints (thread))
4884 install_software_single_step_breakpoints (lwp);
4885
4886 step = maybe_hw_step (thread);
4887 }
4888 else if (lwp->bp_reinsert != 0)
4889 {
4890 threads_debug_printf (" stepping LWP %ld, reinsert set",
4891 lwpid_of (thread));
4892
4893 step = maybe_hw_step (thread);
4894 }
4895 else
4896 step = 0;
4897
4898 resume_one_lwp (lwp, step, 0, NULL);
4899 }
4900
4901 void
4902 linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
4903 lwp_info *except)
4904 {
4905 struct lwp_info *lwp = get_thread_lwp (thread);
4906
4907 if (lwp == except)
4908 return;
4909
4910 lwp_suspended_decr (lwp);
4911
4912 proceed_one_lwp (thread, except);
4913 }
4914
4915 void
4916 linux_process_target::proceed_all_lwps ()
4917 {
4918 struct thread_info *need_step_over;
4919
4920 /* If there is a thread which would otherwise be resumed, which is
4921 stopped at a breakpoint that needs stepping over, then don't
4922 resume any threads - have it step over the breakpoint with all
4923 other threads stopped, then resume all threads again. */
4924
4925 if (low_supports_breakpoints ())
4926 {
4927 need_step_over = find_thread ([this] (thread_info *thread)
4928 {
4929 return thread_needs_step_over (thread);
4930 });
4931
4932 if (need_step_over != NULL)
4933 {
4934 threads_debug_printf ("found thread %ld needing a step-over",
4935 lwpid_of (need_step_over));
4936
4937 start_step_over (get_thread_lwp (need_step_over));
4938 return;
4939 }
4940 }
4941
4942 threads_debug_printf ("Proceeding, no step-over needed");
4943
4944 for_each_thread ([this] (thread_info *thread)
4945 {
4946 proceed_one_lwp (thread, NULL);
4947 });
4948 }
4949
4950 void
4951 linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
4952 {
4953 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4954
4955 if (except)
4956 threads_debug_printf ("except=(LWP %ld)",
4957 lwpid_of (get_lwp_thread (except)));
4958 else
4959 threads_debug_printf ("except=nullptr");
4960
4961 if (unsuspend)
4962 for_each_thread ([&] (thread_info *thread)
4963 {
4964 unsuspend_and_proceed_one_lwp (thread, except);
4965 });
4966 else
4967 for_each_thread ([&] (thread_info *thread)
4968 {
4969 proceed_one_lwp (thread, except);
4970 });
4971 }
4972
4973
4974 #ifdef HAVE_LINUX_REGSETS
4975
4976 #define use_linux_regsets 1
4977
4978 /* Returns true if REGSET has been disabled. */
4979
4980 static int
4981 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4982 {
4983 return (info->disabled_regsets != NULL
4984 && info->disabled_regsets[regset - info->regsets]);
4985 }
4986
4987 /* Disable REGSET. */
4988
4989 static void
4990 disable_regset (struct regsets_info *info, struct regset_info *regset)
4991 {
4992 int dr_offset;
4993
4994 dr_offset = regset - info->regsets;
4995 if (info->disabled_regsets == NULL)
4996 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
4997 info->disabled_regsets[dr_offset] = 1;
4998 }
4999
5000 static int
5001 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5002 struct regcache *regcache)
5003 {
5004 struct regset_info *regset;
5005 int saw_general_regs = 0;
5006 int pid;
5007 struct iovec iov;
5008
5009 pid = lwpid_of (current_thread);
5010 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5011 {
5012 void *buf, *data;
5013 int nt_type, res;
5014
5015 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5016 continue;
5017
5018 buf = xmalloc (regset->size);
5019
5020 nt_type = regset->nt_type;
5021 if (nt_type)
5022 {
5023 iov.iov_base = buf;
5024 iov.iov_len = regset->size;
5025 data = (void *) &iov;
5026 }
5027 else
5028 data = buf;
5029
5030 #ifndef __sparc__
5031 res = ptrace (regset->get_request, pid,
5032 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5033 #else
5034 res = ptrace (regset->get_request, pid, data, nt_type);
5035 #endif
5036 if (res < 0)
5037 {
5038 if (errno == EIO
5039 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5040 {
5041 /* If we get EIO on a regset, or an EINVAL and the regset is
5042 optional, do not try it again for this process mode. */
5043 disable_regset (regsets_info, regset);
5044 }
5045 else if (errno == ENODATA)
5046 {
5047 /* ENODATA may be returned if the regset is currently
5048 not "active". This can happen in normal operation,
5049 so suppress the warning in this case. */
5050 }
5051 else if (errno == ESRCH)
5052 {
5053 /* At this point, ESRCH should mean the process is
5054 already gone, in which case we simply ignore attempts
5055 to read its registers. */
5056 }
5057 else
5058 {
5059 char s[256];
5060 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5061 pid);
5062 perror (s);
5063 }
5064 }
5065 else
5066 {
5067 if (regset->type == GENERAL_REGS)
5068 saw_general_regs = 1;
5069 regset->store_function (regcache, buf);
5070 }
5071 free (buf);
5072 }
5073 if (saw_general_regs)
5074 return 0;
5075 else
5076 return 1;
5077 }
5078
5079 static int
5080 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5081 struct regcache *regcache)
5082 {
5083 struct regset_info *regset;
5084 int saw_general_regs = 0;
5085 int pid;
5086 struct iovec iov;
5087
5088 pid = lwpid_of (current_thread);
5089 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5090 {
5091 void *buf, *data;
5092 int nt_type, res;
5093
5094 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5095 || regset->fill_function == NULL)
5096 continue;
5097
5098 buf = xmalloc (regset->size);
5099
5100 /* First fill the buffer with the current register set contents,
5101 in case there are any items in the kernel's regset that are
5102 not in gdbserver's regcache. */
5103
5104 nt_type = regset->nt_type;
5105 if (nt_type)
5106 {
5107 iov.iov_base = buf;
5108 iov.iov_len = regset->size;
5109 data = (void *) &iov;
5110 }
5111 else
5112 data = buf;
5113
5114 #ifndef __sparc__
5115 res = ptrace (regset->get_request, pid,
5116 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5117 #else
5118 res = ptrace (regset->get_request, pid, data, nt_type);
5119 #endif
5120
5121 if (res == 0)
5122 {
5123 /* Then overlay our cached registers on that. */
5124 regset->fill_function (regcache, buf);
5125
5126 /* Only now do we write the register set. */
5127 #ifndef __sparc__
5128 res = ptrace (regset->set_request, pid,
5129 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5130 #else
5131 res = ptrace (regset->set_request, pid, data, nt_type);
5132 #endif
5133 }
5134
5135 if (res < 0)
5136 {
5137 if (errno == EIO
5138 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5139 {
5140 /* If we get EIO on a regset, or an EINVAL and the regset is
5141 optional, do not try it again for this process mode. */
5142 disable_regset (regsets_info, regset);
5143 }
5144 else if (errno == ESRCH)
5145 {
5146 /* At this point, ESRCH should mean the process is
5147 already gone, in which case we simply ignore attempts
5148 to change its registers. See also the related
5149 comment in resume_one_lwp. */
5150 free (buf);
5151 return 0;
5152 }
5153 else
5154 {
5155 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5156 }
5157 }
5158 else if (regset->type == GENERAL_REGS)
5159 saw_general_regs = 1;
5160 free (buf);
5161 }
5162 if (saw_general_regs)
5163 return 0;
5164 else
5165 return 1;
5166 }
5167
5168 #else /* !HAVE_LINUX_REGSETS */
5169
5170 #define use_linux_regsets 0
5171 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5172 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5173
5174 #endif
5175
5176 /* Return 1 if register REGNO is supported by one of the regset ptrace
5177 calls or 0 if it has to be transferred individually. */
5178
5179 static int
5180 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5181 {
5182 unsigned char mask = 1 << (regno % 8);
5183 size_t index = regno / 8;
5184
5185 return (use_linux_regsets
5186 && (regs_info->regset_bitmap == NULL
5187 || (regs_info->regset_bitmap[index] & mask) != 0));
5188 }
5189
5190 #ifdef HAVE_LINUX_USRREGS
5191
5192 static int
5193 register_addr (const struct usrregs_info *usrregs, int regnum)
5194 {
5195 int addr;
5196
5197 if (regnum < 0 || regnum >= usrregs->num_regs)
5198 error ("Invalid register number %d.", regnum);
5199
5200 addr = usrregs->regmap[regnum];
5201
5202 return addr;
5203 }
5204
5205
5206 void
5207 linux_process_target::fetch_register (const usrregs_info *usrregs,
5208 regcache *regcache, int regno)
5209 {
5210 CORE_ADDR regaddr;
5211 int i, size;
5212 char *buf;
5213 int pid;
5214
5215 if (regno >= usrregs->num_regs)
5216 return;
5217 if (low_cannot_fetch_register (regno))
5218 return;
5219
5220 regaddr = register_addr (usrregs, regno);
5221 if (regaddr == -1)
5222 return;
5223
5224 size = ((register_size (regcache->tdesc, regno)
5225 + sizeof (PTRACE_XFER_TYPE) - 1)
5226 & -sizeof (PTRACE_XFER_TYPE));
5227 buf = (char *) alloca (size);
5228
5229 pid = lwpid_of (current_thread);
5230 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5231 {
5232 errno = 0;
5233 *(PTRACE_XFER_TYPE *) (buf + i) =
5234 ptrace (PTRACE_PEEKUSER, pid,
5235 /* Coerce to a uintptr_t first to avoid potential gcc warning
5236 of coercing an 8 byte integer to a 4 byte pointer. */
5237 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5238 regaddr += sizeof (PTRACE_XFER_TYPE);
5239 if (errno != 0)
5240 {
5241 /* Mark register REGNO unavailable. */
5242 supply_register (regcache, regno, NULL);
5243 return;
5244 }
5245 }
5246
5247 low_supply_ptrace_register (regcache, regno, buf);
5248 }
5249
5250 void
5251 linux_process_target::store_register (const usrregs_info *usrregs,
5252 regcache *regcache, int regno)
5253 {
5254 CORE_ADDR regaddr;
5255 int i, size;
5256 char *buf;
5257 int pid;
5258
5259 if (regno >= usrregs->num_regs)
5260 return;
5261 if (low_cannot_store_register (regno))
5262 return;
5263
5264 regaddr = register_addr (usrregs, regno);
5265 if (regaddr == -1)
5266 return;
5267
5268 size = ((register_size (regcache->tdesc, regno)
5269 + sizeof (PTRACE_XFER_TYPE) - 1)
5270 & -sizeof (PTRACE_XFER_TYPE));
5271 buf = (char *) alloca (size);
5272 memset (buf, 0, size);
5273
5274 low_collect_ptrace_register (regcache, regno, buf);
5275
5276 pid = lwpid_of (current_thread);
5277 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5278 {
5279 errno = 0;
5280 ptrace (PTRACE_POKEUSER, pid,
5281 /* Coerce to a uintptr_t first to avoid potential gcc warning
5282 about coercing an 8 byte integer to a 4 byte pointer. */
5283 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5284 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5285 if (errno != 0)
5286 {
5287 /* At this point, ESRCH should mean the process is
5288 already gone, in which case we simply ignore attempts
5289 to change its registers. See also the related
5290 comment in resume_one_lwp. */
5291 if (errno == ESRCH)
5292 return;
5293
5294
5295 if (!low_cannot_store_register (regno))
5296 error ("writing register %d: %s", regno, safe_strerror (errno));
5297 }
5298 regaddr += sizeof (PTRACE_XFER_TYPE);
5299 }
5300 }
5301 #endif /* HAVE_LINUX_USRREGS */
5302
5303 void
5304 linux_process_target::low_collect_ptrace_register (regcache *regcache,
5305 int regno, char *buf)
5306 {
5307 collect_register (regcache, regno, buf);
5308 }
5309
5310 void
5311 linux_process_target::low_supply_ptrace_register (regcache *regcache,
5312 int regno, const char *buf)
5313 {
5314 supply_register (regcache, regno, buf);
5315 }
5316
5317 void
5318 linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5319 regcache *regcache,
5320 int regno, int all)
5321 {
5322 #ifdef HAVE_LINUX_USRREGS
5323 struct usrregs_info *usr = regs_info->usrregs;
5324
5325 if (regno == -1)
5326 {
5327 for (regno = 0; regno < usr->num_regs; regno++)
5328 if (all || !linux_register_in_regsets (regs_info, regno))
5329 fetch_register (usr, regcache, regno);
5330 }
5331 else
5332 fetch_register (usr, regcache, regno);
5333 #endif
5334 }
5335
5336 void
5337 linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5338 regcache *regcache,
5339 int regno, int all)
5340 {
5341 #ifdef HAVE_LINUX_USRREGS
5342 struct usrregs_info *usr = regs_info->usrregs;
5343
5344 if (regno == -1)
5345 {
5346 for (regno = 0; regno < usr->num_regs; regno++)
5347 if (all || !linux_register_in_regsets (regs_info, regno))
5348 store_register (usr, regcache, regno);
5349 }
5350 else
5351 store_register (usr, regcache, regno);
5352 #endif
5353 }
5354
5355 void
5356 linux_process_target::fetch_registers (regcache *regcache, int regno)
5357 {
5358 int use_regsets;
5359 int all = 0;
5360 const regs_info *regs_info = get_regs_info ();
5361
5362 if (regno == -1)
5363 {
5364 if (regs_info->usrregs != NULL)
5365 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5366 low_fetch_register (regcache, regno);
5367
5368 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5369 if (regs_info->usrregs != NULL)
5370 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5371 }
5372 else
5373 {
5374 if (low_fetch_register (regcache, regno))
5375 return;
5376
5377 use_regsets = linux_register_in_regsets (regs_info, regno);
5378 if (use_regsets)
5379 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5380 regcache);
5381 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5382 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5383 }
5384 }
5385
5386 void
5387 linux_process_target::store_registers (regcache *regcache, int regno)
5388 {
5389 int use_regsets;
5390 int all = 0;
5391 const regs_info *regs_info = get_regs_info ();
5392
5393 if (regno == -1)
5394 {
5395 all = regsets_store_inferior_registers (regs_info->regsets_info,
5396 regcache);
5397 if (regs_info->usrregs != NULL)
5398 usr_store_inferior_registers (regs_info, regcache, regno, all);
5399 }
5400 else
5401 {
5402 use_regsets = linux_register_in_regsets (regs_info, regno);
5403 if (use_regsets)
5404 all = regsets_store_inferior_registers (regs_info->regsets_info,
5405 regcache);
5406 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5407 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5408 }
5409 }
5410
5411 bool
5412 linux_process_target::low_fetch_register (regcache *regcache, int regno)
5413 {
5414 return false;
5415 }
5416
5417 /* A wrapper for the read_memory target op. */
5418
5419 static int
5420 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5421 {
5422 return the_target->read_memory (memaddr, myaddr, len);
5423 }
5424
5425
5426 /* Helper for read_memory/write_memory using /proc/PID/mem. Because
5427 we can use a single read/write call, this can be much more
5428 efficient than banging away at PTRACE_PEEKTEXT. Also, unlike
5429 PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running threads.
5430 One an only one of READBUF and WRITEBUF is non-null. If READBUF is
5431 not null, then we're reading, otherwise we're writing. */
5432
5433 static int
5434 proc_xfer_memory (CORE_ADDR memaddr, unsigned char *readbuf,
5435 const gdb_byte *writebuf, int len)
5436 {
5437 gdb_assert ((readbuf == nullptr) != (writebuf == nullptr));
5438
5439 process_info *proc = current_process ();
5440
5441 int fd = proc->priv->mem_fd;
5442 if (fd == -1)
5443 return EIO;
5444
5445 while (len > 0)
5446 {
5447 int bytes;
5448
5449 /* Use pread64/pwrite64 if available, since they save a syscall
5450 and can handle 64-bit offsets even on 32-bit platforms (for
5451 instance, SPARC debugging a SPARC64 application). But only
5452 use them if the offset isn't so high that when cast to off_t
5453 it'd be negative, as seen on SPARC64. pread64/pwrite64
5454 outright reject such offsets. lseek does not. */
5455 #ifdef HAVE_PREAD64
5456 if ((off_t) memaddr >= 0)
5457 bytes = (readbuf != nullptr
5458 ? pread64 (fd, readbuf, len, memaddr)
5459 : pwrite64 (fd, writebuf, len, memaddr));
5460 else
5461 #endif
5462 {
5463 bytes = -1;
5464 if (lseek (fd, memaddr, SEEK_SET) != -1)
5465 bytes = (readbuf != nullptr
5466 ? read (fd, readbuf, len)
5467 : write (fd, writebuf, len));
5468 }
5469
5470 if (bytes < 0)
5471 return errno;
5472 else if (bytes == 0)
5473 {
5474 /* EOF means the address space is gone, the whole process
5475 exited or execed. */
5476 return EIO;
5477 }
5478
5479 memaddr += bytes;
5480 if (readbuf != nullptr)
5481 readbuf += bytes;
5482 else
5483 writebuf += bytes;
5484 len -= bytes;
5485 }
5486
5487 return 0;
5488 }
5489
5490 int
5491 linux_process_target::read_memory (CORE_ADDR memaddr,
5492 unsigned char *myaddr, int len)
5493 {
5494 return proc_xfer_memory (memaddr, myaddr, nullptr, len);
5495 }
5496
5497 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5498 memory at MEMADDR. On failure (cannot write to the inferior)
5499 returns the value of errno. Always succeeds if LEN is zero. */
5500
5501 int
5502 linux_process_target::write_memory (CORE_ADDR memaddr,
5503 const unsigned char *myaddr, int len)
5504 {
5505 if (debug_threads)
5506 {
5507 /* Dump up to four bytes. */
5508 char str[4 * 2 + 1];
5509 char *p = str;
5510 int dump = len < 4 ? len : 4;
5511
5512 for (int i = 0; i < dump; i++)
5513 {
5514 sprintf (p, "%02x", myaddr[i]);
5515 p += 2;
5516 }
5517 *p = '\0';
5518
5519 threads_debug_printf ("Writing %s to 0x%08lx in process %d",
5520 str, (long) memaddr, current_process ()->pid);
5521 }
5522
5523 return proc_xfer_memory (memaddr, nullptr, myaddr, len);
5524 }
5525
5526 void
5527 linux_process_target::look_up_symbols ()
5528 {
5529 #ifdef USE_THREAD_DB
5530 struct process_info *proc = current_process ();
5531
5532 if (proc->priv->thread_db != NULL)
5533 return;
5534
5535 thread_db_init ();
5536 #endif
5537 }
5538
5539 void
5540 linux_process_target::request_interrupt ()
5541 {
5542 /* Send a SIGINT to the process group. This acts just like the user
5543 typed a ^C on the controlling terminal. */
5544 int res = ::kill (-signal_pid, SIGINT);
5545 if (res == -1)
5546 warning (_("Sending SIGINT to process group of pid %ld failed: %s"),
5547 signal_pid, safe_strerror (errno));
5548 }
5549
5550 bool
5551 linux_process_target::supports_read_auxv ()
5552 {
5553 return true;
5554 }
5555
5556 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5557 to debugger memory starting at MYADDR. */
5558
5559 int
5560 linux_process_target::read_auxv (int pid, CORE_ADDR offset,
5561 unsigned char *myaddr, unsigned int len)
5562 {
5563 char filename[PATH_MAX];
5564 int fd, n;
5565
5566 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5567
5568 fd = open (filename, O_RDONLY);
5569 if (fd < 0)
5570 return -1;
5571
5572 if (offset != (CORE_ADDR) 0
5573 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5574 n = -1;
5575 else
5576 n = read (fd, myaddr, len);
5577
5578 close (fd);
5579
5580 return n;
5581 }
5582
5583 int
5584 linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5585 int size, raw_breakpoint *bp)
5586 {
5587 if (type == raw_bkpt_type_sw)
5588 return insert_memory_breakpoint (bp);
5589 else
5590 return low_insert_point (type, addr, size, bp);
5591 }
5592
5593 int
5594 linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5595 int size, raw_breakpoint *bp)
5596 {
5597 /* Unsupported (see target.h). */
5598 return 1;
5599 }
5600
5601 int
5602 linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5603 int size, raw_breakpoint *bp)
5604 {
5605 if (type == raw_bkpt_type_sw)
5606 return remove_memory_breakpoint (bp);
5607 else
5608 return low_remove_point (type, addr, size, bp);
5609 }
5610
5611 int
5612 linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5613 int size, raw_breakpoint *bp)
5614 {
5615 /* Unsupported (see target.h). */
5616 return 1;
5617 }
5618
5619 /* Implement the stopped_by_sw_breakpoint target_ops
5620 method. */
5621
5622 bool
5623 linux_process_target::stopped_by_sw_breakpoint ()
5624 {
5625 struct lwp_info *lwp = get_thread_lwp (current_thread);
5626
5627 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5628 }
5629
5630 /* Implement the supports_stopped_by_sw_breakpoint target_ops
5631 method. */
5632
5633 bool
5634 linux_process_target::supports_stopped_by_sw_breakpoint ()
5635 {
5636 return USE_SIGTRAP_SIGINFO;
5637 }
5638
5639 /* Implement the stopped_by_hw_breakpoint target_ops
5640 method. */
5641
5642 bool
5643 linux_process_target::stopped_by_hw_breakpoint ()
5644 {
5645 struct lwp_info *lwp = get_thread_lwp (current_thread);
5646
5647 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5648 }
5649
5650 /* Implement the supports_stopped_by_hw_breakpoint target_ops
5651 method. */
5652
5653 bool
5654 linux_process_target::supports_stopped_by_hw_breakpoint ()
5655 {
5656 return USE_SIGTRAP_SIGINFO;
5657 }
5658
5659 /* Implement the supports_hardware_single_step target_ops method. */
5660
5661 bool
5662 linux_process_target::supports_hardware_single_step ()
5663 {
5664 return true;
5665 }
5666
5667 bool
5668 linux_process_target::stopped_by_watchpoint ()
5669 {
5670 struct lwp_info *lwp = get_thread_lwp (current_thread);
5671
5672 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5673 }
5674
5675 CORE_ADDR
5676 linux_process_target::stopped_data_address ()
5677 {
5678 struct lwp_info *lwp = get_thread_lwp (current_thread);
5679
5680 return lwp->stopped_data_address;
5681 }
5682
5683 /* This is only used for targets that define PT_TEXT_ADDR,
5684 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5685 the target has different ways of acquiring this information, like
5686 loadmaps. */
5687
5688 bool
5689 linux_process_target::supports_read_offsets ()
5690 {
5691 #ifdef SUPPORTS_READ_OFFSETS
5692 return true;
5693 #else
5694 return false;
5695 #endif
5696 }
5697
5698 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5699 to tell gdb about. */
5700
5701 int
5702 linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5703 {
5704 #ifdef SUPPORTS_READ_OFFSETS
5705 unsigned long text, text_end, data;
5706 int pid = lwpid_of (current_thread);
5707
5708 errno = 0;
5709
5710 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5711 (PTRACE_TYPE_ARG4) 0);
5712 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5713 (PTRACE_TYPE_ARG4) 0);
5714 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5715 (PTRACE_TYPE_ARG4) 0);
5716
5717 if (errno == 0)
5718 {
5719 /* Both text and data offsets produced at compile-time (and so
5720 used by gdb) are relative to the beginning of the program,
5721 with the data segment immediately following the text segment.
5722 However, the actual runtime layout in memory may put the data
5723 somewhere else, so when we send gdb a data base-address, we
5724 use the real data base address and subtract the compile-time
5725 data base-address from it (which is just the length of the
5726 text segment). BSS immediately follows data in both
5727 cases. */
5728 *text_p = text;
5729 *data_p = data - (text_end - text);
5730
5731 return 1;
5732 }
5733 return 0;
5734 #else
5735 gdb_assert_not_reached ("target op read_offsets not supported");
5736 #endif
5737 }
5738
5739 bool
5740 linux_process_target::supports_get_tls_address ()
5741 {
5742 #ifdef USE_THREAD_DB
5743 return true;
5744 #else
5745 return false;
5746 #endif
5747 }
5748
5749 int
5750 linux_process_target::get_tls_address (thread_info *thread,
5751 CORE_ADDR offset,
5752 CORE_ADDR load_module,
5753 CORE_ADDR *address)
5754 {
5755 #ifdef USE_THREAD_DB
5756 return thread_db_get_tls_address (thread, offset, load_module, address);
5757 #else
5758 return -1;
5759 #endif
5760 }
5761
5762 bool
5763 linux_process_target::supports_qxfer_osdata ()
5764 {
5765 return true;
5766 }
5767
5768 int
5769 linux_process_target::qxfer_osdata (const char *annex,
5770 unsigned char *readbuf,
5771 unsigned const char *writebuf,
5772 CORE_ADDR offset, int len)
5773 {
5774 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5775 }
5776
5777 void
5778 linux_process_target::siginfo_fixup (siginfo_t *siginfo,
5779 gdb_byte *inf_siginfo, int direction)
5780 {
5781 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
5782
5783 /* If there was no callback, or the callback didn't do anything,
5784 then just do a straight memcpy. */
5785 if (!done)
5786 {
5787 if (direction == 1)
5788 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5789 else
5790 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5791 }
5792 }
5793
5794 bool
5795 linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
5796 int direction)
5797 {
5798 return false;
5799 }
5800
5801 bool
5802 linux_process_target::supports_qxfer_siginfo ()
5803 {
5804 return true;
5805 }
5806
5807 int
5808 linux_process_target::qxfer_siginfo (const char *annex,
5809 unsigned char *readbuf,
5810 unsigned const char *writebuf,
5811 CORE_ADDR offset, int len)
5812 {
5813 int pid;
5814 siginfo_t siginfo;
5815 gdb_byte inf_siginfo[sizeof (siginfo_t)];
5816
5817 if (current_thread == NULL)
5818 return -1;
5819
5820 pid = lwpid_of (current_thread);
5821
5822 threads_debug_printf ("%s siginfo for lwp %d.",
5823 readbuf != NULL ? "Reading" : "Writing",
5824 pid);
5825
5826 if (offset >= sizeof (siginfo))
5827 return -1;
5828
5829 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5830 return -1;
5831
5832 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5833 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5834 inferior with a 64-bit GDBSERVER should look the same as debugging it
5835 with a 32-bit GDBSERVER, we need to convert it. */
5836 siginfo_fixup (&siginfo, inf_siginfo, 0);
5837
5838 if (offset + len > sizeof (siginfo))
5839 len = sizeof (siginfo) - offset;
5840
5841 if (readbuf != NULL)
5842 memcpy (readbuf, inf_siginfo + offset, len);
5843 else
5844 {
5845 memcpy (inf_siginfo + offset, writebuf, len);
5846
5847 /* Convert back to ptrace layout before flushing it out. */
5848 siginfo_fixup (&siginfo, inf_siginfo, 1);
5849
5850 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5851 return -1;
5852 }
5853
5854 return len;
5855 }
5856
5857 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5858 so we notice when children change state; as the handler for the
5859 sigsuspend in my_waitpid. */
5860
5861 static void
5862 sigchld_handler (int signo)
5863 {
5864 int old_errno = errno;
5865
5866 if (debug_threads)
5867 {
5868 do
5869 {
5870 /* Use the async signal safe debug function. */
5871 if (debug_write ("sigchld_handler\n",
5872 sizeof ("sigchld_handler\n") - 1) < 0)
5873 break; /* just ignore */
5874 } while (0);
5875 }
5876
5877 if (target_is_async_p ())
5878 async_file_mark (); /* trigger a linux_wait */
5879
5880 errno = old_errno;
5881 }
5882
5883 bool
5884 linux_process_target::supports_non_stop ()
5885 {
5886 return true;
5887 }
5888
5889 bool
5890 linux_process_target::async (bool enable)
5891 {
5892 bool previous = target_is_async_p ();
5893
5894 threads_debug_printf ("async (%d), previous=%d",
5895 enable, previous);
5896
5897 if (previous != enable)
5898 {
5899 sigset_t mask;
5900 sigemptyset (&mask);
5901 sigaddset (&mask, SIGCHLD);
5902
5903 gdb_sigmask (SIG_BLOCK, &mask, NULL);
5904
5905 if (enable)
5906 {
5907 if (!linux_event_pipe.open_pipe ())
5908 {
5909 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
5910
5911 warning ("creating event pipe failed.");
5912 return previous;
5913 }
5914
5915 /* Register the event loop handler. */
5916 add_file_handler (linux_event_pipe.event_fd (),
5917 handle_target_event, NULL,
5918 "linux-low");
5919
5920 /* Always trigger a linux_wait. */
5921 async_file_mark ();
5922 }
5923 else
5924 {
5925 delete_file_handler (linux_event_pipe.event_fd ());
5926
5927 linux_event_pipe.close_pipe ();
5928 }
5929
5930 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
5931 }
5932
5933 return previous;
5934 }
5935
5936 int
5937 linux_process_target::start_non_stop (bool nonstop)
5938 {
5939 /* Register or unregister from event-loop accordingly. */
5940 target_async (nonstop);
5941
5942 if (target_is_async_p () != (nonstop != false))
5943 return -1;
5944
5945 return 0;
5946 }
5947
5948 bool
5949 linux_process_target::supports_multi_process ()
5950 {
5951 return true;
5952 }
5953
5954 /* Check if fork events are supported. */
5955
5956 bool
5957 linux_process_target::supports_fork_events ()
5958 {
5959 return true;
5960 }
5961
5962 /* Check if vfork events are supported. */
5963
5964 bool
5965 linux_process_target::supports_vfork_events ()
5966 {
5967 return true;
5968 }
5969
5970 /* Return the set of supported thread options. */
5971
5972 gdb_thread_options
5973 linux_process_target::supported_thread_options ()
5974 {
5975 return GDB_THREAD_OPTION_CLONE | GDB_THREAD_OPTION_EXIT;
5976 }
5977
5978 /* Check if exec events are supported. */
5979
5980 bool
5981 linux_process_target::supports_exec_events ()
5982 {
5983 return true;
5984 }
5985
5986 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5987 ptrace flags for all inferiors. This is in case the new GDB connection
5988 doesn't support the same set of events that the previous one did. */
5989
5990 void
5991 linux_process_target::handle_new_gdb_connection ()
5992 {
5993 /* Request that all the lwps reset their ptrace options. */
5994 for_each_thread ([] (thread_info *thread)
5995 {
5996 struct lwp_info *lwp = get_thread_lwp (thread);
5997
5998 if (!lwp->stopped)
5999 {
6000 /* Stop the lwp so we can modify its ptrace options. */
6001 lwp->must_set_ptrace_flags = 1;
6002 linux_stop_lwp (lwp);
6003 }
6004 else
6005 {
6006 /* Already stopped; go ahead and set the ptrace options. */
6007 struct process_info *proc = find_process_pid (pid_of (thread));
6008 int options = linux_low_ptrace_options (proc->attached);
6009
6010 linux_enable_event_reporting (lwpid_of (thread), options);
6011 lwp->must_set_ptrace_flags = 0;
6012 }
6013 });
6014 }
6015
6016 int
6017 linux_process_target::handle_monitor_command (char *mon)
6018 {
6019 #ifdef USE_THREAD_DB
6020 return thread_db_handle_monitor_command (mon);
6021 #else
6022 return 0;
6023 #endif
6024 }
6025
6026 int
6027 linux_process_target::core_of_thread (ptid_t ptid)
6028 {
6029 return linux_common_core_of_thread (ptid);
6030 }
6031
6032 bool
6033 linux_process_target::supports_disable_randomization ()
6034 {
6035 return true;
6036 }
6037
6038 bool
6039 linux_process_target::supports_agent ()
6040 {
6041 return true;
6042 }
6043
6044 bool
6045 linux_process_target::supports_range_stepping ()
6046 {
6047 if (supports_software_single_step ())
6048 return true;
6049
6050 return low_supports_range_stepping ();
6051 }
6052
6053 bool
6054 linux_process_target::low_supports_range_stepping ()
6055 {
6056 return false;
6057 }
6058
6059 bool
6060 linux_process_target::supports_pid_to_exec_file ()
6061 {
6062 return true;
6063 }
6064
6065 const char *
6066 linux_process_target::pid_to_exec_file (int pid)
6067 {
6068 return linux_proc_pid_to_exec_file (pid);
6069 }
6070
6071 bool
6072 linux_process_target::supports_multifs ()
6073 {
6074 return true;
6075 }
6076
6077 int
6078 linux_process_target::multifs_open (int pid, const char *filename,
6079 int flags, mode_t mode)
6080 {
6081 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6082 }
6083
6084 int
6085 linux_process_target::multifs_unlink (int pid, const char *filename)
6086 {
6087 return linux_mntns_unlink (pid, filename);
6088 }
6089
6090 ssize_t
6091 linux_process_target::multifs_readlink (int pid, const char *filename,
6092 char *buf, size_t bufsiz)
6093 {
6094 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6095 }
6096
6097 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6098 struct target_loadseg
6099 {
6100 /* Core address to which the segment is mapped. */
6101 Elf32_Addr addr;
6102 /* VMA recorded in the program header. */
6103 Elf32_Addr p_vaddr;
6104 /* Size of this segment in memory. */
6105 Elf32_Word p_memsz;
6106 };
6107
6108 # if defined PT_GETDSBT
6109 struct target_loadmap
6110 {
6111 /* Protocol version number, must be zero. */
6112 Elf32_Word version;
6113 /* Pointer to the DSBT table, its size, and the DSBT index. */
6114 unsigned *dsbt_table;
6115 unsigned dsbt_size, dsbt_index;
6116 /* Number of segments in this map. */
6117 Elf32_Word nsegs;
6118 /* The actual memory map. */
6119 struct target_loadseg segs[/*nsegs*/];
6120 };
6121 # define LINUX_LOADMAP PT_GETDSBT
6122 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6123 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6124 # else
6125 struct target_loadmap
6126 {
6127 /* Protocol version number, must be zero. */
6128 Elf32_Half version;
6129 /* Number of segments in this map. */
6130 Elf32_Half nsegs;
6131 /* The actual memory map. */
6132 struct target_loadseg segs[/*nsegs*/];
6133 };
6134 # define LINUX_LOADMAP PTRACE_GETFDPIC
6135 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6136 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6137 # endif
6138
6139 bool
6140 linux_process_target::supports_read_loadmap ()
6141 {
6142 return true;
6143 }
6144
6145 int
6146 linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6147 unsigned char *myaddr, unsigned int len)
6148 {
6149 int pid = lwpid_of (current_thread);
6150 int addr = -1;
6151 struct target_loadmap *data = NULL;
6152 unsigned int actual_length, copy_length;
6153
6154 if (strcmp (annex, "exec") == 0)
6155 addr = (int) LINUX_LOADMAP_EXEC;
6156 else if (strcmp (annex, "interp") == 0)
6157 addr = (int) LINUX_LOADMAP_INTERP;
6158 else
6159 return -1;
6160
6161 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6162 return -1;
6163
6164 if (data == NULL)
6165 return -1;
6166
6167 actual_length = sizeof (struct target_loadmap)
6168 + sizeof (struct target_loadseg) * data->nsegs;
6169
6170 if (offset < 0 || offset > actual_length)
6171 return -1;
6172
6173 copy_length = actual_length - offset < len ? actual_length - offset : len;
6174 memcpy (myaddr, (char *) data + offset, copy_length);
6175 return copy_length;
6176 }
6177 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6178
6179 bool
6180 linux_process_target::supports_catch_syscall ()
6181 {
6182 return low_supports_catch_syscall ();
6183 }
6184
6185 bool
6186 linux_process_target::low_supports_catch_syscall ()
6187 {
6188 return false;
6189 }
6190
6191 CORE_ADDR
6192 linux_process_target::read_pc (regcache *regcache)
6193 {
6194 if (!low_supports_breakpoints ())
6195 return 0;
6196
6197 return low_get_pc (regcache);
6198 }
6199
6200 void
6201 linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
6202 {
6203 gdb_assert (low_supports_breakpoints ());
6204
6205 low_set_pc (regcache, pc);
6206 }
6207
6208 bool
6209 linux_process_target::supports_thread_stopped ()
6210 {
6211 return true;
6212 }
6213
6214 bool
6215 linux_process_target::thread_stopped (thread_info *thread)
6216 {
6217 return get_thread_lwp (thread)->stopped;
6218 }
6219
6220 bool
6221 linux_process_target::any_resumed ()
6222 {
6223 bool any_resumed;
6224
6225 auto status_pending_p_any = [&] (thread_info *thread)
6226 {
6227 return status_pending_p_callback (thread, minus_one_ptid);
6228 };
6229
6230 auto not_stopped = [&] (thread_info *thread)
6231 {
6232 return not_stopped_callback (thread, minus_one_ptid);
6233 };
6234
6235 /* Find a resumed LWP, if any. */
6236 if (find_thread (status_pending_p_any) != NULL)
6237 any_resumed = 1;
6238 else if (find_thread (not_stopped) != NULL)
6239 any_resumed = 1;
6240 else
6241 any_resumed = 0;
6242
6243 return any_resumed;
6244 }
6245
6246 /* This exposes stop-all-threads functionality to other modules. */
6247
6248 void
6249 linux_process_target::pause_all (bool freeze)
6250 {
6251 stop_all_lwps (freeze, NULL);
6252 }
6253
6254 /* This exposes unstop-all-threads functionality to other gdbserver
6255 modules. */
6256
6257 void
6258 linux_process_target::unpause_all (bool unfreeze)
6259 {
6260 unstop_all_lwps (unfreeze, NULL);
6261 }
6262
6263 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6264
6265 static int
6266 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6267 CORE_ADDR *phdr_memaddr, int *num_phdr)
6268 {
6269 char filename[PATH_MAX];
6270 int fd;
6271 const int auxv_size = is_elf64
6272 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6273 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6274
6275 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6276
6277 fd = open (filename, O_RDONLY);
6278 if (fd < 0)
6279 return 1;
6280
6281 *phdr_memaddr = 0;
6282 *num_phdr = 0;
6283 while (read (fd, buf, auxv_size) == auxv_size
6284 && (*phdr_memaddr == 0 || *num_phdr == 0))
6285 {
6286 if (is_elf64)
6287 {
6288 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6289
6290 switch (aux->a_type)
6291 {
6292 case AT_PHDR:
6293 *phdr_memaddr = aux->a_un.a_val;
6294 break;
6295 case AT_PHNUM:
6296 *num_phdr = aux->a_un.a_val;
6297 break;
6298 }
6299 }
6300 else
6301 {
6302 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6303
6304 switch (aux->a_type)
6305 {
6306 case AT_PHDR:
6307 *phdr_memaddr = aux->a_un.a_val;
6308 break;
6309 case AT_PHNUM:
6310 *num_phdr = aux->a_un.a_val;
6311 break;
6312 }
6313 }
6314 }
6315
6316 close (fd);
6317
6318 if (*phdr_memaddr == 0 || *num_phdr == 0)
6319 {
6320 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6321 "phdr_memaddr = %ld, phdr_num = %d",
6322 (long) *phdr_memaddr, *num_phdr);
6323 return 2;
6324 }
6325
6326 return 0;
6327 }
6328
6329 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6330
6331 static CORE_ADDR
6332 get_dynamic (const int pid, const int is_elf64)
6333 {
6334 CORE_ADDR phdr_memaddr, relocation;
6335 int num_phdr, i;
6336 unsigned char *phdr_buf;
6337 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6338
6339 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6340 return 0;
6341
6342 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6343 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6344
6345 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6346 return 0;
6347
6348 /* Compute relocation: it is expected to be 0 for "regular" executables,
6349 non-zero for PIE ones. */
6350 relocation = -1;
6351 for (i = 0; relocation == -1 && i < num_phdr; i++)
6352 if (is_elf64)
6353 {
6354 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6355
6356 if (p->p_type == PT_PHDR)
6357 relocation = phdr_memaddr - p->p_vaddr;
6358 }
6359 else
6360 {
6361 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6362
6363 if (p->p_type == PT_PHDR)
6364 relocation = phdr_memaddr - p->p_vaddr;
6365 }
6366
6367 if (relocation == -1)
6368 {
6369 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6370 any real world executables, including PIE executables, have always
6371 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6372 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6373 or present DT_DEBUG anyway (fpc binaries are statically linked).
6374
6375 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6376
6377 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6378
6379 return 0;
6380 }
6381
6382 for (i = 0; i < num_phdr; i++)
6383 {
6384 if (is_elf64)
6385 {
6386 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6387
6388 if (p->p_type == PT_DYNAMIC)
6389 return p->p_vaddr + relocation;
6390 }
6391 else
6392 {
6393 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6394
6395 if (p->p_type == PT_DYNAMIC)
6396 return p->p_vaddr + relocation;
6397 }
6398 }
6399
6400 return 0;
6401 }
6402
6403 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6404 can be 0 if the inferior does not yet have the library list initialized.
6405 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6406 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6407
6408 static CORE_ADDR
6409 get_r_debug (const int pid, const int is_elf64)
6410 {
6411 CORE_ADDR dynamic_memaddr;
6412 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6413 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6414 CORE_ADDR map = -1;
6415
6416 dynamic_memaddr = get_dynamic (pid, is_elf64);
6417 if (dynamic_memaddr == 0)
6418 return map;
6419
6420 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6421 {
6422 if (is_elf64)
6423 {
6424 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6425 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6426 union
6427 {
6428 Elf64_Xword map;
6429 unsigned char buf[sizeof (Elf64_Xword)];
6430 }
6431 rld_map;
6432 #endif
6433 #ifdef DT_MIPS_RLD_MAP
6434 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6435 {
6436 if (linux_read_memory (dyn->d_un.d_val,
6437 rld_map.buf, sizeof (rld_map.buf)) == 0)
6438 return rld_map.map;
6439 else
6440 break;
6441 }
6442 #endif /* DT_MIPS_RLD_MAP */
6443 #ifdef DT_MIPS_RLD_MAP_REL
6444 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6445 {
6446 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6447 rld_map.buf, sizeof (rld_map.buf)) == 0)
6448 return rld_map.map;
6449 else
6450 break;
6451 }
6452 #endif /* DT_MIPS_RLD_MAP_REL */
6453
6454 if (dyn->d_tag == DT_DEBUG && map == -1)
6455 map = dyn->d_un.d_val;
6456
6457 if (dyn->d_tag == DT_NULL)
6458 break;
6459 }
6460 else
6461 {
6462 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6463 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6464 union
6465 {
6466 Elf32_Word map;
6467 unsigned char buf[sizeof (Elf32_Word)];
6468 }
6469 rld_map;
6470 #endif
6471 #ifdef DT_MIPS_RLD_MAP
6472 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6473 {
6474 if (linux_read_memory (dyn->d_un.d_val,
6475 rld_map.buf, sizeof (rld_map.buf)) == 0)
6476 return rld_map.map;
6477 else
6478 break;
6479 }
6480 #endif /* DT_MIPS_RLD_MAP */
6481 #ifdef DT_MIPS_RLD_MAP_REL
6482 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6483 {
6484 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6485 rld_map.buf, sizeof (rld_map.buf)) == 0)
6486 return rld_map.map;
6487 else
6488 break;
6489 }
6490 #endif /* DT_MIPS_RLD_MAP_REL */
6491
6492 if (dyn->d_tag == DT_DEBUG && map == -1)
6493 map = dyn->d_un.d_val;
6494
6495 if (dyn->d_tag == DT_NULL)
6496 break;
6497 }
6498
6499 dynamic_memaddr += dyn_size;
6500 }
6501
6502 return map;
6503 }
6504
6505 /* Read one pointer from MEMADDR in the inferior. */
6506
6507 static int
6508 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6509 {
6510 int ret;
6511
6512 /* Go through a union so this works on either big or little endian
6513 hosts, when the inferior's pointer size is smaller than the size
6514 of CORE_ADDR. It is assumed the inferior's endianness is the
6515 same of the superior's. */
6516 union
6517 {
6518 CORE_ADDR core_addr;
6519 unsigned int ui;
6520 unsigned char uc;
6521 } addr;
6522
6523 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6524 if (ret == 0)
6525 {
6526 if (ptr_size == sizeof (CORE_ADDR))
6527 *ptr = addr.core_addr;
6528 else if (ptr_size == sizeof (unsigned int))
6529 *ptr = addr.ui;
6530 else
6531 gdb_assert_not_reached ("unhandled pointer size");
6532 }
6533 return ret;
6534 }
6535
6536 bool
6537 linux_process_target::supports_qxfer_libraries_svr4 ()
6538 {
6539 return true;
6540 }
6541
6542 struct link_map_offsets
6543 {
6544 /* Offset and size of r_debug.r_version. */
6545 int r_version_offset;
6546
6547 /* Offset and size of r_debug.r_map. */
6548 int r_map_offset;
6549
6550 /* Offset of r_debug_extended.r_next. */
6551 int r_next_offset;
6552
6553 /* Offset to l_addr field in struct link_map. */
6554 int l_addr_offset;
6555
6556 /* Offset to l_name field in struct link_map. */
6557 int l_name_offset;
6558
6559 /* Offset to l_ld field in struct link_map. */
6560 int l_ld_offset;
6561
6562 /* Offset to l_next field in struct link_map. */
6563 int l_next_offset;
6564
6565 /* Offset to l_prev field in struct link_map. */
6566 int l_prev_offset;
6567 };
6568
6569 static const link_map_offsets lmo_32bit_offsets =
6570 {
6571 0, /* r_version offset. */
6572 4, /* r_debug.r_map offset. */
6573 20, /* r_debug_extended.r_next. */
6574 0, /* l_addr offset in link_map. */
6575 4, /* l_name offset in link_map. */
6576 8, /* l_ld offset in link_map. */
6577 12, /* l_next offset in link_map. */
6578 16 /* l_prev offset in link_map. */
6579 };
6580
6581 static const link_map_offsets lmo_64bit_offsets =
6582 {
6583 0, /* r_version offset. */
6584 8, /* r_debug.r_map offset. */
6585 40, /* r_debug_extended.r_next. */
6586 0, /* l_addr offset in link_map. */
6587 8, /* l_name offset in link_map. */
6588 16, /* l_ld offset in link_map. */
6589 24, /* l_next offset in link_map. */
6590 32 /* l_prev offset in link_map. */
6591 };
6592
6593 /* Get the loaded shared libraries from one namespace. */
6594
6595 static void
6596 read_link_map (std::string &document, CORE_ADDR lmid, CORE_ADDR lm_addr,
6597 CORE_ADDR lm_prev, int ptr_size, const link_map_offsets *lmo)
6598 {
6599 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6600
6601 while (lm_addr
6602 && read_one_ptr (lm_addr + lmo->l_name_offset,
6603 &l_name, ptr_size) == 0
6604 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6605 &l_addr, ptr_size) == 0
6606 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6607 &l_ld, ptr_size) == 0
6608 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6609 &l_prev, ptr_size) == 0
6610 && read_one_ptr (lm_addr + lmo->l_next_offset,
6611 &l_next, ptr_size) == 0)
6612 {
6613 unsigned char libname[PATH_MAX];
6614
6615 if (lm_prev != l_prev)
6616 {
6617 warning ("Corrupted shared library list: 0x%s != 0x%s",
6618 paddress (lm_prev), paddress (l_prev));
6619 break;
6620 }
6621
6622 /* Not checking for error because reading may stop before we've got
6623 PATH_MAX worth of characters. */
6624 libname[0] = '\0';
6625 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6626 libname[sizeof (libname) - 1] = '\0';
6627 if (libname[0] != '\0')
6628 {
6629 string_appendf (document, "<library name=\"");
6630 xml_escape_text_append (document, (char *) libname);
6631 string_appendf (document, "\" lm=\"0x%s\" l_addr=\"0x%s\" "
6632 "l_ld=\"0x%s\" lmid=\"0x%s\"/>",
6633 paddress (lm_addr), paddress (l_addr),
6634 paddress (l_ld), paddress (lmid));
6635 }
6636
6637 lm_prev = lm_addr;
6638 lm_addr = l_next;
6639 }
6640 }
6641
6642 /* Construct qXfer:libraries-svr4:read reply. */
6643
6644 int
6645 linux_process_target::qxfer_libraries_svr4 (const char *annex,
6646 unsigned char *readbuf,
6647 unsigned const char *writebuf,
6648 CORE_ADDR offset, int len)
6649 {
6650 struct process_info_private *const priv = current_process ()->priv;
6651 char filename[PATH_MAX];
6652 int pid, is_elf64;
6653 unsigned int machine;
6654 CORE_ADDR lmid = 0, lm_addr = 0, lm_prev = 0;
6655
6656 if (writebuf != NULL)
6657 return -2;
6658 if (readbuf == NULL)
6659 return -1;
6660
6661 pid = lwpid_of (current_thread);
6662 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6663 is_elf64 = elf_64_file_p (filename, &machine);
6664 const link_map_offsets *lmo;
6665 int ptr_size;
6666 if (is_elf64)
6667 {
6668 lmo = &lmo_64bit_offsets;
6669 ptr_size = 8;
6670 }
6671 else
6672 {
6673 lmo = &lmo_32bit_offsets;
6674 ptr_size = 4;
6675 }
6676
6677 while (annex[0] != '\0')
6678 {
6679 const char *sep;
6680 CORE_ADDR *addrp;
6681 int name_len;
6682
6683 sep = strchr (annex, '=');
6684 if (sep == NULL)
6685 break;
6686
6687 name_len = sep - annex;
6688 if (name_len == 4 && startswith (annex, "lmid"))
6689 addrp = &lmid;
6690 else if (name_len == 5 && startswith (annex, "start"))
6691 addrp = &lm_addr;
6692 else if (name_len == 4 && startswith (annex, "prev"))
6693 addrp = &lm_prev;
6694 else
6695 {
6696 annex = strchr (sep, ';');
6697 if (annex == NULL)
6698 break;
6699 annex++;
6700 continue;
6701 }
6702
6703 annex = decode_address_to_semicolon (addrp, sep + 1);
6704 }
6705
6706 std::string document = "<library-list-svr4 version=\"1.0\"";
6707
6708 /* When the starting LM_ADDR is passed in the annex, only traverse that
6709 namespace, which is assumed to be identified by LMID.
6710
6711 Otherwise, start with R_DEBUG and traverse all namespaces we find. */
6712 if (lm_addr != 0)
6713 {
6714 document += ">";
6715 read_link_map (document, lmid, lm_addr, lm_prev, ptr_size, lmo);
6716 }
6717 else
6718 {
6719 if (lm_prev != 0)
6720 warning ("ignoring prev=0x%s without start", paddress (lm_prev));
6721
6722 /* We could interpret LMID as 'provide only the libraries for this
6723 namespace' but GDB is currently only providing lmid, start, and
6724 prev, or nothing. */
6725 if (lmid != 0)
6726 warning ("ignoring lmid=0x%s without start", paddress (lmid));
6727
6728 CORE_ADDR r_debug = priv->r_debug;
6729 if (r_debug == 0)
6730 r_debug = priv->r_debug = get_r_debug (pid, is_elf64);
6731
6732 /* We failed to find DT_DEBUG. Such situation will not change
6733 for this inferior - do not retry it. Report it to GDB as
6734 E01, see for the reasons at the GDB solib-svr4.c side. */
6735 if (r_debug == (CORE_ADDR) -1)
6736 return -1;
6737
6738 /* Terminate the header if we end up with an empty list. */
6739 if (r_debug == 0)
6740 document += ">";
6741
6742 while (r_debug != 0)
6743 {
6744 int r_version = 0;
6745 if (linux_read_memory (r_debug + lmo->r_version_offset,
6746 (unsigned char *) &r_version,
6747 sizeof (r_version)) != 0)
6748 {
6749 warning ("unable to read r_version from 0x%s",
6750 paddress (r_debug + lmo->r_version_offset));
6751 break;
6752 }
6753
6754 if (r_version < 1)
6755 {
6756 warning ("unexpected r_debug version %d", r_version);
6757 break;
6758 }
6759
6760 if (read_one_ptr (r_debug + lmo->r_map_offset, &lm_addr,
6761 ptr_size) != 0)
6762 {
6763 warning ("unable to read r_map from 0x%s",
6764 paddress (r_debug + lmo->r_map_offset));
6765 break;
6766 }
6767
6768 /* We read the entire namespace. */
6769 lm_prev = 0;
6770
6771 /* The first entry corresponds to the main executable unless the
6772 dynamic loader was loaded late by a static executable. But
6773 in such case the main executable does not have PT_DYNAMIC
6774 present and we would not have gotten here. */
6775 if (r_debug == priv->r_debug)
6776 {
6777 if (lm_addr != 0)
6778 string_appendf (document, " main-lm=\"0x%s\">",
6779 paddress (lm_addr));
6780 else
6781 document += ">";
6782
6783 lm_prev = lm_addr;
6784 if (read_one_ptr (lm_addr + lmo->l_next_offset,
6785 &lm_addr, ptr_size) != 0)
6786 {
6787 warning ("unable to read l_next from 0x%s",
6788 paddress (lm_addr + lmo->l_next_offset));
6789 break;
6790 }
6791 }
6792
6793 read_link_map (document, r_debug, lm_addr, lm_prev, ptr_size, lmo);
6794
6795 if (r_version < 2)
6796 break;
6797
6798 if (read_one_ptr (r_debug + lmo->r_next_offset, &r_debug,
6799 ptr_size) != 0)
6800 {
6801 warning ("unable to read r_next from 0x%s",
6802 paddress (r_debug + lmo->r_next_offset));
6803 break;
6804 }
6805 }
6806 }
6807
6808 document += "</library-list-svr4>";
6809
6810 int document_len = document.length ();
6811 if (offset < document_len)
6812 document_len -= offset;
6813 else
6814 document_len = 0;
6815 if (len > document_len)
6816 len = document_len;
6817
6818 memcpy (readbuf, document.data () + offset, len);
6819
6820 return len;
6821 }
6822
6823 #ifdef HAVE_LINUX_BTRACE
6824
6825 bool
6826 linux_process_target::supports_btrace ()
6827 {
6828 return true;
6829 }
6830
6831 btrace_target_info *
6832 linux_process_target::enable_btrace (thread_info *tp,
6833 const btrace_config *conf)
6834 {
6835 return linux_enable_btrace (tp->id, conf);
6836 }
6837
6838 /* See to_disable_btrace target method. */
6839
6840 int
6841 linux_process_target::disable_btrace (btrace_target_info *tinfo)
6842 {
6843 enum btrace_error err;
6844
6845 err = linux_disable_btrace (tinfo);
6846 return (err == BTRACE_ERR_NONE ? 0 : -1);
6847 }
6848
6849 /* Encode an Intel Processor Trace configuration. */
6850
6851 static void
6852 linux_low_encode_pt_config (std::string *buffer,
6853 const struct btrace_data_pt_config *config)
6854 {
6855 *buffer += "<pt-config>\n";
6856
6857 switch (config->cpu.vendor)
6858 {
6859 case CV_INTEL:
6860 string_xml_appendf (*buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6861 "model=\"%u\" stepping=\"%u\"/>\n",
6862 config->cpu.family, config->cpu.model,
6863 config->cpu.stepping);
6864 break;
6865
6866 default:
6867 break;
6868 }
6869
6870 *buffer += "</pt-config>\n";
6871 }
6872
6873 /* Encode a raw buffer. */
6874
6875 static void
6876 linux_low_encode_raw (std::string *buffer, const gdb_byte *data,
6877 unsigned int size)
6878 {
6879 if (size == 0)
6880 return;
6881
6882 /* We use hex encoding - see gdbsupport/rsp-low.h. */
6883 *buffer += "<raw>\n";
6884
6885 while (size-- > 0)
6886 {
6887 char elem[2];
6888
6889 elem[0] = tohex ((*data >> 4) & 0xf);
6890 elem[1] = tohex (*data++ & 0xf);
6891
6892 buffer->append (elem, 2);
6893 }
6894
6895 *buffer += "</raw>\n";
6896 }
6897
6898 /* See to_read_btrace target method. */
6899
6900 int
6901 linux_process_target::read_btrace (btrace_target_info *tinfo,
6902 std::string *buffer,
6903 enum btrace_read_type type)
6904 {
6905 struct btrace_data btrace;
6906 enum btrace_error err;
6907
6908 err = linux_read_btrace (&btrace, tinfo, type);
6909 if (err != BTRACE_ERR_NONE)
6910 {
6911 if (err == BTRACE_ERR_OVERFLOW)
6912 *buffer += "E.Overflow.";
6913 else
6914 *buffer += "E.Generic Error.";
6915
6916 return -1;
6917 }
6918
6919 switch (btrace.format)
6920 {
6921 case BTRACE_FORMAT_NONE:
6922 *buffer += "E.No Trace.";
6923 return -1;
6924
6925 case BTRACE_FORMAT_BTS:
6926 *buffer += "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n";
6927 *buffer += "<btrace version=\"1.0\">\n";
6928
6929 for (const btrace_block &block : *btrace.variant.bts.blocks)
6930 string_xml_appendf (*buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6931 paddress (block.begin), paddress (block.end));
6932
6933 *buffer += "</btrace>\n";
6934 break;
6935
6936 case BTRACE_FORMAT_PT:
6937 *buffer += "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n";
6938 *buffer += "<btrace version=\"1.0\">\n";
6939 *buffer += "<pt>\n";
6940
6941 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
6942
6943 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6944 btrace.variant.pt.size);
6945
6946 *buffer += "</pt>\n";
6947 *buffer += "</btrace>\n";
6948 break;
6949
6950 default:
6951 *buffer += "E.Unsupported Trace Format.";
6952 return -1;
6953 }
6954
6955 return 0;
6956 }
6957
6958 /* See to_btrace_conf target method. */
6959
6960 int
6961 linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
6962 std::string *buffer)
6963 {
6964 const struct btrace_config *conf;
6965
6966 *buffer += "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n";
6967 *buffer += "<btrace-conf version=\"1.0\">\n";
6968
6969 conf = linux_btrace_conf (tinfo);
6970 if (conf != NULL)
6971 {
6972 switch (conf->format)
6973 {
6974 case BTRACE_FORMAT_NONE:
6975 break;
6976
6977 case BTRACE_FORMAT_BTS:
6978 string_xml_appendf (*buffer, "<bts");
6979 string_xml_appendf (*buffer, " size=\"0x%x\"", conf->bts.size);
6980 string_xml_appendf (*buffer, " />\n");
6981 break;
6982
6983 case BTRACE_FORMAT_PT:
6984 string_xml_appendf (*buffer, "<pt");
6985 string_xml_appendf (*buffer, " size=\"0x%x\"", conf->pt.size);
6986 string_xml_appendf (*buffer, "/>\n");
6987 break;
6988 }
6989 }
6990
6991 *buffer += "</btrace-conf>\n";
6992 return 0;
6993 }
6994 #endif /* HAVE_LINUX_BTRACE */
6995
6996 /* See nat/linux-nat.h. */
6997
6998 ptid_t
6999 current_lwp_ptid (void)
7000 {
7001 return ptid_of (current_thread);
7002 }
7003
7004 /* A helper function that copies NAME to DEST, replacing non-printable
7005 characters with '?'. Returns DEST as a convenience. */
7006
7007 static const char *
7008 replace_non_ascii (char *dest, const char *name)
7009 {
7010 while (*name != '\0')
7011 {
7012 if (!ISPRINT (*name))
7013 *dest++ = '?';
7014 else
7015 *dest++ = *name;
7016 ++name;
7017 }
7018 return dest;
7019 }
7020
7021 const char *
7022 linux_process_target::thread_name (ptid_t thread)
7023 {
7024 static char dest[100];
7025
7026 const char *name = linux_proc_tid_get_name (thread);
7027 if (name == nullptr)
7028 return nullptr;
7029
7030 /* Linux limits the comm file to 16 bytes (including the trailing
7031 \0. If the program or thread name is set when using a multi-byte
7032 encoding, this might cause it to be truncated mid-character. In
7033 this situation, sending the truncated form in an XML <thread>
7034 response will cause a parse error in gdb. So, instead convert
7035 from the locale's encoding (we can't be sure this is the correct
7036 encoding, but it's as good a guess as we have) to UTF-8, but in a
7037 way that ignores any encoding errors. See PR remote/30618. */
7038 const char *cset = nl_langinfo (CODESET);
7039 iconv_t handle = iconv_open ("UTF-8//IGNORE", cset);
7040 if (handle == (iconv_t) -1)
7041 return replace_non_ascii (dest, name);
7042
7043 size_t inbytes = strlen (name);
7044 char *inbuf = const_cast<char *> (name);
7045 size_t outbytes = sizeof (dest);
7046 char *outbuf = dest;
7047 size_t result = iconv (handle, &inbuf, &inbytes, &outbuf, &outbytes);
7048
7049 if (result == (size_t) -1)
7050 {
7051 if (errno == E2BIG)
7052 outbuf = &dest[sizeof (dest) - 1];
7053 else if ((errno == EILSEQ || errno == EINVAL)
7054 && outbuf < &dest[sizeof (dest) - 2])
7055 *outbuf++ = '?';
7056 *outbuf = '\0';
7057 }
7058
7059 iconv_close (handle);
7060 return *dest == '\0' ? nullptr : dest;
7061 }
7062
7063 #if USE_THREAD_DB
7064 bool
7065 linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
7066 int *handle_len)
7067 {
7068 return thread_db_thread_handle (ptid, handle, handle_len);
7069 }
7070 #endif
7071
7072 thread_info *
7073 linux_process_target::thread_pending_parent (thread_info *thread)
7074 {
7075 lwp_info *parent = get_thread_lwp (thread)->pending_parent ();
7076
7077 if (parent == nullptr)
7078 return nullptr;
7079
7080 return get_lwp_thread (parent);
7081 }
7082
7083 thread_info *
7084 linux_process_target::thread_pending_child (thread_info *thread,
7085 target_waitkind *kind)
7086 {
7087 lwp_info *child = get_thread_lwp (thread)->pending_child (kind);
7088
7089 if (child == nullptr)
7090 return nullptr;
7091
7092 return get_lwp_thread (child);
7093 }
7094
7095 /* Default implementation of linux_target_ops method "set_pc" for
7096 32-bit pc register which is literally named "pc". */
7097
7098 void
7099 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7100 {
7101 uint32_t newpc = pc;
7102
7103 supply_register_by_name (regcache, "pc", &newpc);
7104 }
7105
7106 /* Default implementation of linux_target_ops method "get_pc" for
7107 32-bit pc register which is literally named "pc". */
7108
7109 CORE_ADDR
7110 linux_get_pc_32bit (struct regcache *regcache)
7111 {
7112 uint32_t pc;
7113
7114 collect_register_by_name (regcache, "pc", &pc);
7115 threads_debug_printf ("stop pc is 0x%" PRIx32, pc);
7116 return pc;
7117 }
7118
7119 /* Default implementation of linux_target_ops method "set_pc" for
7120 64-bit pc register which is literally named "pc". */
7121
7122 void
7123 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7124 {
7125 uint64_t newpc = pc;
7126
7127 supply_register_by_name (regcache, "pc", &newpc);
7128 }
7129
7130 /* Default implementation of linux_target_ops method "get_pc" for
7131 64-bit pc register which is literally named "pc". */
7132
7133 CORE_ADDR
7134 linux_get_pc_64bit (struct regcache *regcache)
7135 {
7136 uint64_t pc;
7137
7138 collect_register_by_name (regcache, "pc", &pc);
7139 threads_debug_printf ("stop pc is 0x%" PRIx64, pc);
7140 return pc;
7141 }
7142
7143 /* See linux-low.h. */
7144
7145 int
7146 linux_get_auxv (int pid, int wordsize, CORE_ADDR match, CORE_ADDR *valp)
7147 {
7148 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7149 int offset = 0;
7150
7151 gdb_assert (wordsize == 4 || wordsize == 8);
7152
7153 while (the_target->read_auxv (pid, offset, data, 2 * wordsize)
7154 == 2 * wordsize)
7155 {
7156 if (wordsize == 4)
7157 {
7158 uint32_t *data_p = (uint32_t *) data;
7159 if (data_p[0] == match)
7160 {
7161 *valp = data_p[1];
7162 return 1;
7163 }
7164 }
7165 else
7166 {
7167 uint64_t *data_p = (uint64_t *) data;
7168 if (data_p[0] == match)
7169 {
7170 *valp = data_p[1];
7171 return 1;
7172 }
7173 }
7174
7175 offset += 2 * wordsize;
7176 }
7177
7178 return 0;
7179 }
7180
7181 /* See linux-low.h. */
7182
7183 CORE_ADDR
7184 linux_get_hwcap (int pid, int wordsize)
7185 {
7186 CORE_ADDR hwcap = 0;
7187 linux_get_auxv (pid, wordsize, AT_HWCAP, &hwcap);
7188 return hwcap;
7189 }
7190
7191 /* See linux-low.h. */
7192
7193 CORE_ADDR
7194 linux_get_hwcap2 (int pid, int wordsize)
7195 {
7196 CORE_ADDR hwcap2 = 0;
7197 linux_get_auxv (pid, wordsize, AT_HWCAP2, &hwcap2);
7198 return hwcap2;
7199 }
7200
7201 #ifdef HAVE_LINUX_REGSETS
7202 void
7203 initialize_regsets_info (struct regsets_info *info)
7204 {
7205 for (info->num_regsets = 0;
7206 info->regsets[info->num_regsets].size >= 0;
7207 info->num_regsets++)
7208 ;
7209 }
7210 #endif
7211
7212 void
7213 initialize_low (void)
7214 {
7215 struct sigaction sigchld_action;
7216
7217 memset (&sigchld_action, 0, sizeof (sigchld_action));
7218 set_target_ops (the_linux_target);
7219
7220 linux_ptrace_init_warnings ();
7221 linux_proc_init_warnings ();
7222
7223 sigchld_action.sa_handler = sigchld_handler;
7224 sigemptyset (&sigchld_action.sa_mask);
7225 sigchld_action.sa_flags = SA_RESTART;
7226 sigaction (SIGCHLD, &sigchld_action, NULL);
7227
7228 initialize_low_arch ();
7229
7230 linux_check_ptrace_features ();
7231 }