bf82649ed983fdcb100af24eae280d215a37a4b0
[binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2023 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "target.h"
24 #include "target-dcache.h"
25 #include "gdbcmd.h"
26 #include "symtab.h"
27 #include "inferior.h"
28 #include "infrun.h"
29 #include "observable.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdbcore.h"
37 #include "target-descriptions.h"
38 #include "gdbthread.h"
39 #include "solib.h"
40 #include "exec.h"
41 #include "inline-frame.h"
42 #include "tracepoint.h"
43 #include "gdbsupport/fileio.h"
44 #include "gdbsupport/agent.h"
45 #include "auxv.h"
46 #include "target-debug.h"
47 #include "ui.h"
48 #include "event-top.h"
49 #include <algorithm>
50 #include "gdbsupport/byte-vector.h"
51 #include "gdbsupport/search.h"
52 #include "terminal.h"
53 #include <unordered_map>
54 #include "target-connection.h"
55 #include "valprint.h"
56 #include "cli/cli-decode.h"
57
58 static void generic_tls_error (void) ATTRIBUTE_NORETURN;
59
60 static void default_rcmd (struct target_ops *, const char *, struct ui_file *);
61
62 static int default_verify_memory (struct target_ops *self,
63 const gdb_byte *data,
64 CORE_ADDR memaddr, ULONGEST size);
65
66 static void tcomplain (void) ATTRIBUTE_NORETURN;
67
68 /* Mapping between target_info objects (which have address identity)
69 and corresponding open/factory function/callback. Each add_target
70 call adds one entry to this map, and registers a "target
71 TARGET_NAME" command that when invoked calls the factory registered
72 here. The target_info object is associated with the command via
73 the command's context. */
74 static std::unordered_map<const target_info *, target_open_ftype *>
75 target_factories;
76
77 /* The singleton debug target. */
78
79 static struct target_ops *the_debug_target;
80
81 /* Command list for target. */
82
83 static struct cmd_list_element *targetlist = NULL;
84
85 /* See target.h. */
86
87 bool trust_readonly = false;
88
89 /* Nonzero if we should show true memory content including
90 memory breakpoint inserted by gdb. */
91
92 static int show_memory_breakpoints = 0;
93
94 /* These globals control whether GDB attempts to perform these
95 operations; they are useful for targets that need to prevent
96 inadvertent disruption, such as in non-stop mode. */
97
98 bool may_write_registers = true;
99
100 bool may_write_memory = true;
101
102 bool may_insert_breakpoints = true;
103
104 bool may_insert_tracepoints = true;
105
106 bool may_insert_fast_tracepoints = true;
107
108 bool may_stop = true;
109
110 /* Non-zero if we want to see trace of target level stuff. */
111
112 static unsigned int targetdebug = 0;
113
114 static void
115 set_targetdebug (const char *args, int from_tty, struct cmd_list_element *c)
116 {
117 if (targetdebug)
118 current_inferior ()->push_target (the_debug_target);
119 else
120 current_inferior ()->unpush_target (the_debug_target);
121 }
122
123 static void
124 show_targetdebug (struct ui_file *file, int from_tty,
125 struct cmd_list_element *c, const char *value)
126 {
127 gdb_printf (file, _("Target debugging is %s.\n"), value);
128 }
129
130 int
131 target_has_memory ()
132 {
133 for (target_ops *t = current_inferior ()->top_target ();
134 t != NULL;
135 t = t->beneath ())
136 if (t->has_memory ())
137 return 1;
138
139 return 0;
140 }
141
142 int
143 target_has_stack ()
144 {
145 for (target_ops *t = current_inferior ()->top_target ();
146 t != NULL;
147 t = t->beneath ())
148 if (t->has_stack ())
149 return 1;
150
151 return 0;
152 }
153
154 int
155 target_has_registers ()
156 {
157 for (target_ops *t = current_inferior ()->top_target ();
158 t != NULL;
159 t = t->beneath ())
160 if (t->has_registers ())
161 return 1;
162
163 return 0;
164 }
165
166 bool
167 target_has_execution (inferior *inf)
168 {
169 if (inf == nullptr)
170 inf = current_inferior ();
171
172 for (target_ops *t = inf->top_target ();
173 t != nullptr;
174 t = inf->find_target_beneath (t))
175 if (t->has_execution (inf))
176 return true;
177
178 return false;
179 }
180
181 const char *
182 target_shortname ()
183 {
184 return current_inferior ()->top_target ()->shortname ();
185 }
186
187 /* See target.h. */
188
189 bool
190 target_attach_no_wait ()
191 {
192 return current_inferior ()->top_target ()->attach_no_wait ();
193 }
194
195 /* See target.h. */
196
197 void
198 target_post_attach (int pid)
199 {
200 return current_inferior ()->top_target ()->post_attach (pid);
201 }
202
203 /* See target.h. */
204
205 void
206 target_prepare_to_store (regcache *regcache)
207 {
208 return current_inferior ()->top_target ()->prepare_to_store (regcache);
209 }
210
211 /* See target.h. */
212
213 bool
214 target_supports_enable_disable_tracepoint ()
215 {
216 target_ops *target = current_inferior ()->top_target ();
217
218 return target->supports_enable_disable_tracepoint ();
219 }
220
221 bool
222 target_supports_string_tracing ()
223 {
224 return current_inferior ()->top_target ()->supports_string_tracing ();
225 }
226
227 /* See target.h. */
228
229 bool
230 target_supports_evaluation_of_breakpoint_conditions ()
231 {
232 target_ops *target = current_inferior ()->top_target ();
233
234 return target->supports_evaluation_of_breakpoint_conditions ();
235 }
236
237 /* See target.h. */
238
239 bool
240 target_supports_dumpcore ()
241 {
242 return current_inferior ()->top_target ()->supports_dumpcore ();
243 }
244
245 /* See target.h. */
246
247 void
248 target_dumpcore (const char *filename)
249 {
250 return current_inferior ()->top_target ()->dumpcore (filename);
251 }
252
253 /* See target.h. */
254
255 bool
256 target_can_run_breakpoint_commands ()
257 {
258 return current_inferior ()->top_target ()->can_run_breakpoint_commands ();
259 }
260
261 /* See target.h. */
262
263 void
264 target_files_info ()
265 {
266 return current_inferior ()->top_target ()->files_info ();
267 }
268
269 /* See target.h. */
270
271 int
272 target_insert_fork_catchpoint (int pid)
273 {
274 return current_inferior ()->top_target ()->insert_fork_catchpoint (pid);
275 }
276
277 /* See target.h. */
278
279 int
280 target_remove_fork_catchpoint (int pid)
281 {
282 return current_inferior ()->top_target ()->remove_fork_catchpoint (pid);
283 }
284
285 /* See target.h. */
286
287 int
288 target_insert_vfork_catchpoint (int pid)
289 {
290 return current_inferior ()->top_target ()->insert_vfork_catchpoint (pid);
291 }
292
293 /* See target.h. */
294
295 int
296 target_remove_vfork_catchpoint (int pid)
297 {
298 return current_inferior ()->top_target ()->remove_vfork_catchpoint (pid);
299 }
300
301 /* See target.h. */
302
303 int
304 target_insert_exec_catchpoint (int pid)
305 {
306 return current_inferior ()->top_target ()->insert_exec_catchpoint (pid);
307 }
308
309 /* See target.h. */
310
311 int
312 target_remove_exec_catchpoint (int pid)
313 {
314 return current_inferior ()->top_target ()->remove_exec_catchpoint (pid);
315 }
316
317 /* See target.h. */
318
319 int
320 target_set_syscall_catchpoint (int pid, bool needed, int any_count,
321 gdb::array_view<const int> syscall_counts)
322 {
323 target_ops *target = current_inferior ()->top_target ();
324
325 return target->set_syscall_catchpoint (pid, needed, any_count,
326 syscall_counts);
327 }
328
329 /* See target.h. */
330
331 void
332 target_rcmd (const char *command, struct ui_file *outbuf)
333 {
334 return current_inferior ()->top_target ()->rcmd (command, outbuf);
335 }
336
337 /* See target.h. */
338
339 bool
340 target_can_lock_scheduler ()
341 {
342 target_ops *target = current_inferior ()->top_target ();
343
344 return (target->get_thread_control_capabilities ()& tc_schedlock) != 0;
345 }
346
347 /* See target.h. */
348
349 bool
350 target_can_async_p ()
351 {
352 return target_can_async_p (current_inferior ()->top_target ());
353 }
354
355 /* See target.h. */
356
357 bool
358 target_can_async_p (struct target_ops *target)
359 {
360 if (!target_async_permitted)
361 return false;
362 return target->can_async_p ();
363 }
364
365 /* See target.h. */
366
367 bool
368 target_is_async_p ()
369 {
370 bool result = current_inferior ()->top_target ()->is_async_p ();
371 gdb_assert (target_async_permitted || !result);
372 return result;
373 }
374
375 exec_direction_kind
376 target_execution_direction ()
377 {
378 return current_inferior ()->top_target ()->execution_direction ();
379 }
380
381 /* See target.h. */
382
383 const char *
384 target_extra_thread_info (thread_info *tp)
385 {
386 return current_inferior ()->top_target ()->extra_thread_info (tp);
387 }
388
389 /* See target.h. */
390
391 const char *
392 target_pid_to_exec_file (int pid)
393 {
394 return current_inferior ()->top_target ()->pid_to_exec_file (pid);
395 }
396
397 /* See target.h. */
398
399 gdbarch *
400 target_thread_architecture (ptid_t ptid)
401 {
402 return current_inferior ()->top_target ()->thread_architecture (ptid);
403 }
404
405 /* See target.h. */
406
407 int
408 target_find_memory_regions (find_memory_region_ftype func, void *data)
409 {
410 return current_inferior ()->top_target ()->find_memory_regions (func, data);
411 }
412
413 /* See target.h. */
414
415 gdb::unique_xmalloc_ptr<char>
416 target_make_corefile_notes (bfd *bfd, int *size_p)
417 {
418 return current_inferior ()->top_target ()->make_corefile_notes (bfd, size_p);
419 }
420
421 gdb_byte *
422 target_get_bookmark (const char *args, int from_tty)
423 {
424 return current_inferior ()->top_target ()->get_bookmark (args, from_tty);
425 }
426
427 void
428 target_goto_bookmark (const gdb_byte *arg, int from_tty)
429 {
430 return current_inferior ()->top_target ()->goto_bookmark (arg, from_tty);
431 }
432
433 /* See target.h. */
434
435 bool
436 target_stopped_by_watchpoint ()
437 {
438 return current_inferior ()->top_target ()->stopped_by_watchpoint ();
439 }
440
441 /* See target.h. */
442
443 bool
444 target_stopped_by_sw_breakpoint ()
445 {
446 return current_inferior ()->top_target ()->stopped_by_sw_breakpoint ();
447 }
448
449 bool
450 target_supports_stopped_by_sw_breakpoint ()
451 {
452 target_ops *target = current_inferior ()->top_target ();
453
454 return target->supports_stopped_by_sw_breakpoint ();
455 }
456
457 bool
458 target_stopped_by_hw_breakpoint ()
459 {
460 return current_inferior ()->top_target ()->stopped_by_hw_breakpoint ();
461 }
462
463 bool
464 target_supports_stopped_by_hw_breakpoint ()
465 {
466 target_ops *target = current_inferior ()->top_target ();
467
468 return target->supports_stopped_by_hw_breakpoint ();
469 }
470
471 /* See target.h. */
472
473 bool
474 target_have_steppable_watchpoint ()
475 {
476 return current_inferior ()->top_target ()->have_steppable_watchpoint ();
477 }
478
479 /* See target.h. */
480
481 int
482 target_can_use_hardware_watchpoint (bptype type, int cnt, int othertype)
483 {
484 target_ops *target = current_inferior ()->top_target ();
485
486 return target->can_use_hw_breakpoint (type, cnt, othertype);
487 }
488
489 /* See target.h. */
490
491 int
492 target_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
493 {
494 target_ops *target = current_inferior ()->top_target ();
495
496 return target->region_ok_for_hw_watchpoint (addr, len);
497 }
498
499
500 int
501 target_can_do_single_step ()
502 {
503 return current_inferior ()->top_target ()->can_do_single_step ();
504 }
505
506 /* See target.h. */
507
508 int
509 target_insert_watchpoint (CORE_ADDR addr, int len, target_hw_bp_type type,
510 expression *cond)
511 {
512 target_ops *target = current_inferior ()->top_target ();
513
514 return target->insert_watchpoint (addr, len, type, cond);
515 }
516
517 /* See target.h. */
518
519 int
520 target_remove_watchpoint (CORE_ADDR addr, int len, target_hw_bp_type type,
521 expression *cond)
522 {
523 target_ops *target = current_inferior ()->top_target ();
524
525 return target->remove_watchpoint (addr, len, type, cond);
526 }
527
528 /* See target.h. */
529
530 int
531 target_insert_hw_breakpoint (gdbarch *gdbarch, bp_target_info *bp_tgt)
532 {
533 target_ops *target = current_inferior ()->top_target ();
534
535 return target->insert_hw_breakpoint (gdbarch, bp_tgt);
536 }
537
538 /* See target.h. */
539
540 int
541 target_remove_hw_breakpoint (gdbarch *gdbarch, bp_target_info *bp_tgt)
542 {
543 target_ops *target = current_inferior ()->top_target ();
544
545 return target->remove_hw_breakpoint (gdbarch, bp_tgt);
546 }
547
548 /* See target.h. */
549
550 bool
551 target_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int type,
552 expression *cond)
553 {
554 target_ops *target = current_inferior ()->top_target ();
555
556 return target->can_accel_watchpoint_condition (addr, len, type, cond);
557 }
558
559 /* See target.h. */
560
561 bool
562 target_can_execute_reverse ()
563 {
564 return current_inferior ()->top_target ()->can_execute_reverse ();
565 }
566
567 ptid_t
568 target_get_ada_task_ptid (long lwp, ULONGEST tid)
569 {
570 return current_inferior ()->top_target ()->get_ada_task_ptid (lwp, tid);
571 }
572
573 bool
574 target_filesystem_is_local ()
575 {
576 return current_inferior ()->top_target ()->filesystem_is_local ();
577 }
578
579 void
580 target_trace_init ()
581 {
582 return current_inferior ()->top_target ()->trace_init ();
583 }
584
585 void
586 target_download_tracepoint (bp_location *location)
587 {
588 return current_inferior ()->top_target ()->download_tracepoint (location);
589 }
590
591 bool
592 target_can_download_tracepoint ()
593 {
594 return current_inferior ()->top_target ()->can_download_tracepoint ();
595 }
596
597 void
598 target_download_trace_state_variable (const trace_state_variable &tsv)
599 {
600 target_ops *target = current_inferior ()->top_target ();
601
602 return target->download_trace_state_variable (tsv);
603 }
604
605 void
606 target_enable_tracepoint (bp_location *loc)
607 {
608 return current_inferior ()->top_target ()->enable_tracepoint (loc);
609 }
610
611 void
612 target_disable_tracepoint (bp_location *loc)
613 {
614 return current_inferior ()->top_target ()->disable_tracepoint (loc);
615 }
616
617 void
618 target_trace_start ()
619 {
620 return current_inferior ()->top_target ()->trace_start ();
621 }
622
623 void
624 target_trace_set_readonly_regions ()
625 {
626 return current_inferior ()->top_target ()->trace_set_readonly_regions ();
627 }
628
629 int
630 target_get_trace_status (trace_status *ts)
631 {
632 return current_inferior ()->top_target ()->get_trace_status (ts);
633 }
634
635 void
636 target_get_tracepoint_status (tracepoint *tp, uploaded_tp *utp)
637 {
638 return current_inferior ()->top_target ()->get_tracepoint_status (tp, utp);
639 }
640
641 void
642 target_trace_stop ()
643 {
644 return current_inferior ()->top_target ()->trace_stop ();
645 }
646
647 int
648 target_trace_find (trace_find_type type, int num,
649 CORE_ADDR addr1, CORE_ADDR addr2, int *tpp)
650 {
651 target_ops *target = current_inferior ()->top_target ();
652
653 return target->trace_find (type, num, addr1, addr2, tpp);
654 }
655
656 bool
657 target_get_trace_state_variable_value (int tsv, LONGEST *val)
658 {
659 target_ops *target = current_inferior ()->top_target ();
660
661 return target->get_trace_state_variable_value (tsv, val);
662 }
663
664 int
665 target_save_trace_data (const char *filename)
666 {
667 return current_inferior ()->top_target ()->save_trace_data (filename);
668 }
669
670 int
671 target_upload_tracepoints (uploaded_tp **utpp)
672 {
673 return current_inferior ()->top_target ()->upload_tracepoints (utpp);
674 }
675
676 int
677 target_upload_trace_state_variables (uploaded_tsv **utsvp)
678 {
679 target_ops *target = current_inferior ()->top_target ();
680
681 return target->upload_trace_state_variables (utsvp);
682 }
683
684 LONGEST
685 target_get_raw_trace_data (gdb_byte *buf, ULONGEST offset, LONGEST len)
686 {
687 target_ops *target = current_inferior ()->top_target ();
688
689 return target->get_raw_trace_data (buf, offset, len);
690 }
691
692 int
693 target_get_min_fast_tracepoint_insn_len ()
694 {
695 target_ops *target = current_inferior ()->top_target ();
696
697 return target->get_min_fast_tracepoint_insn_len ();
698 }
699
700 void
701 target_set_disconnected_tracing (int val)
702 {
703 return current_inferior ()->top_target ()->set_disconnected_tracing (val);
704 }
705
706 void
707 target_set_circular_trace_buffer (int val)
708 {
709 return current_inferior ()->top_target ()->set_circular_trace_buffer (val);
710 }
711
712 void
713 target_set_trace_buffer_size (LONGEST val)
714 {
715 return current_inferior ()->top_target ()->set_trace_buffer_size (val);
716 }
717
718 bool
719 target_set_trace_notes (const char *user, const char *notes,
720 const char *stopnotes)
721 {
722 target_ops *target = current_inferior ()->top_target ();
723
724 return target->set_trace_notes (user, notes, stopnotes);
725 }
726
727 bool
728 target_get_tib_address (ptid_t ptid, CORE_ADDR *addr)
729 {
730 return current_inferior ()->top_target ()->get_tib_address (ptid, addr);
731 }
732
733 void
734 target_set_permissions ()
735 {
736 return current_inferior ()->top_target ()->set_permissions ();
737 }
738
739 bool
740 target_static_tracepoint_marker_at (CORE_ADDR addr,
741 static_tracepoint_marker *marker)
742 {
743 target_ops *target = current_inferior ()->top_target ();
744
745 return target->static_tracepoint_marker_at (addr, marker);
746 }
747
748 std::vector<static_tracepoint_marker>
749 target_static_tracepoint_markers_by_strid (const char *marker_id)
750 {
751 target_ops *target = current_inferior ()->top_target ();
752
753 return target->static_tracepoint_markers_by_strid (marker_id);
754 }
755
756 traceframe_info_up
757 target_traceframe_info ()
758 {
759 return current_inferior ()->top_target ()->traceframe_info ();
760 }
761
762 bool
763 target_use_agent (bool use)
764 {
765 return current_inferior ()->top_target ()->use_agent (use);
766 }
767
768 bool
769 target_can_use_agent ()
770 {
771 return current_inferior ()->top_target ()->can_use_agent ();
772 }
773
774 bool
775 target_augmented_libraries_svr4_read ()
776 {
777 return current_inferior ()->top_target ()->augmented_libraries_svr4_read ();
778 }
779
780 bool
781 target_supports_memory_tagging ()
782 {
783 return current_inferior ()->top_target ()->supports_memory_tagging ();
784 }
785
786 bool
787 target_fetch_memtags (CORE_ADDR address, size_t len, gdb::byte_vector &tags,
788 int type)
789 {
790 return current_inferior ()->top_target ()->fetch_memtags (address, len, tags, type);
791 }
792
793 bool
794 target_store_memtags (CORE_ADDR address, size_t len,
795 const gdb::byte_vector &tags, int type)
796 {
797 return current_inferior ()->top_target ()->store_memtags (address, len, tags, type);
798 }
799
800 x86_xsave_layout
801 target_fetch_x86_xsave_layout ()
802 {
803 return current_inferior ()->top_target ()->fetch_x86_xsave_layout ();
804 }
805
806 void
807 target_log_command (const char *p)
808 {
809 return current_inferior ()->top_target ()->log_command (p);
810 }
811
812 /* This is used to implement the various target commands. */
813
814 static void
815 open_target (const char *args, int from_tty, struct cmd_list_element *command)
816 {
817 auto *ti = static_cast<target_info *> (command->context ());
818 target_open_ftype *func = target_factories[ti];
819
820 if (targetdebug)
821 gdb_printf (gdb_stdlog, "-> %s->open (...)\n",
822 ti->shortname);
823
824 func (args, from_tty);
825
826 if (targetdebug)
827 gdb_printf (gdb_stdlog, "<- %s->open (%s, %d)\n",
828 ti->shortname, args, from_tty);
829 }
830
831 /* See target.h. */
832
833 void
834 add_target (const target_info &t, target_open_ftype *func,
835 completer_ftype *completer)
836 {
837 struct cmd_list_element *c;
838
839 auto &func_slot = target_factories[&t];
840 if (func_slot != nullptr)
841 internal_error (_("target already added (\"%s\")."), t.shortname);
842 func_slot = func;
843
844 if (targetlist == NULL)
845 add_basic_prefix_cmd ("target", class_run, _("\
846 Connect to a target machine or process.\n\
847 The first argument is the type or protocol of the target machine.\n\
848 Remaining arguments are interpreted by the target protocol. For more\n\
849 information on the arguments for a particular protocol, type\n\
850 `help target ' followed by the protocol name."),
851 &targetlist, 0, &cmdlist);
852 c = add_cmd (t.shortname, no_class, t.doc, &targetlist);
853 c->set_context ((void *) &t);
854 c->func = open_target;
855 if (completer != NULL)
856 set_cmd_completer (c, completer);
857 }
858
859 /* See target.h. */
860
861 void
862 add_deprecated_target_alias (const target_info &tinfo, const char *alias)
863 {
864 struct cmd_list_element *c;
865
866 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
867 see PR cli/15104. */
868 c = add_cmd (alias, no_class, tinfo.doc, &targetlist);
869 c->func = open_target;
870 c->set_context ((void *) &tinfo);
871 gdb::unique_xmalloc_ptr<char> alt
872 = xstrprintf ("target %s", tinfo.shortname);
873 deprecate_cmd (c, alt.release ());
874 }
875
876 /* Stub functions */
877
878 void
879 target_kill (void)
880 {
881
882 /* If the commit_resume_state of the to-be-killed-inferior's process stratum
883 is true, and this inferior is the last live inferior with resumed threads
884 of that target, then we want to leave commit_resume_state to false, as the
885 target won't have any resumed threads anymore. We achieve this with
886 this scoped_disable_commit_resumed. On construction, it will set the flag
887 to false. On destruction, it will only set it to true if there are resumed
888 threads left. */
889 scoped_disable_commit_resumed disable ("killing");
890 current_inferior ()->top_target ()->kill ();
891 }
892
893 void
894 target_load (const char *arg, int from_tty)
895 {
896 target_dcache_invalidate ();
897 current_inferior ()->top_target ()->load (arg, from_tty);
898 }
899
900 /* Define it. */
901
902 target_terminal_state target_terminal::m_terminal_state
903 = target_terminal_state::is_ours;
904
905 /* See target/target.h. */
906
907 void
908 target_terminal::init (void)
909 {
910 current_inferior ()->top_target ()->terminal_init ();
911
912 m_terminal_state = target_terminal_state::is_ours;
913 }
914
915 /* See target/target.h. */
916
917 void
918 target_terminal::inferior (void)
919 {
920 struct ui *ui = current_ui;
921
922 /* A background resume (``run&'') should leave GDB in control of the
923 terminal. */
924 if (ui->prompt_state != PROMPT_BLOCKED)
925 return;
926
927 /* Since we always run the inferior in the main console (unless "set
928 inferior-tty" is in effect), when some UI other than the main one
929 calls target_terminal::inferior, then we leave the main UI's
930 terminal settings as is. */
931 if (ui != main_ui)
932 return;
933
934 /* If GDB is resuming the inferior in the foreground, install
935 inferior's terminal modes. */
936
937 struct inferior *inf = current_inferior ();
938
939 if (inf->terminal_state != target_terminal_state::is_inferior)
940 {
941 current_inferior ()->top_target ()->terminal_inferior ();
942 inf->terminal_state = target_terminal_state::is_inferior;
943 }
944
945 m_terminal_state = target_terminal_state::is_inferior;
946
947 /* If the user hit C-c before, pretend that it was hit right
948 here. */
949 if (check_quit_flag ())
950 target_pass_ctrlc ();
951 }
952
953 /* See target/target.h. */
954
955 void
956 target_terminal::restore_inferior (void)
957 {
958 struct ui *ui = current_ui;
959
960 /* See target_terminal::inferior(). */
961 if (ui->prompt_state != PROMPT_BLOCKED || ui != main_ui)
962 return;
963
964 /* Restore the terminal settings of inferiors that were in the
965 foreground but are now ours_for_output due to a temporary
966 target_target::ours_for_output() call. */
967
968 {
969 scoped_restore_current_inferior restore_inferior;
970
971 for (::inferior *inf : all_inferiors ())
972 {
973 if (inf->terminal_state == target_terminal_state::is_ours_for_output)
974 {
975 set_current_inferior (inf);
976 current_inferior ()->top_target ()->terminal_inferior ();
977 inf->terminal_state = target_terminal_state::is_inferior;
978 }
979 }
980 }
981
982 m_terminal_state = target_terminal_state::is_inferior;
983
984 /* If the user hit C-c before, pretend that it was hit right
985 here. */
986 if (check_quit_flag ())
987 target_pass_ctrlc ();
988 }
989
990 /* Switch terminal state to DESIRED_STATE, either is_ours, or
991 is_ours_for_output. */
992
993 static void
994 target_terminal_is_ours_kind (target_terminal_state desired_state)
995 {
996 scoped_restore_current_inferior restore_inferior;
997
998 /* Must do this in two passes. First, have all inferiors save the
999 current terminal settings. Then, after all inferiors have add a
1000 chance to safely save the terminal settings, restore GDB's
1001 terminal settings. */
1002
1003 for (inferior *inf : all_inferiors ())
1004 {
1005 if (inf->terminal_state == target_terminal_state::is_inferior)
1006 {
1007 set_current_inferior (inf);
1008 current_inferior ()->top_target ()->terminal_save_inferior ();
1009 }
1010 }
1011
1012 for (inferior *inf : all_inferiors ())
1013 {
1014 /* Note we don't check is_inferior here like above because we
1015 need to handle 'is_ours_for_output -> is_ours' too. Careful
1016 to never transition from 'is_ours' to 'is_ours_for_output',
1017 though. */
1018 if (inf->terminal_state != target_terminal_state::is_ours
1019 && inf->terminal_state != desired_state)
1020 {
1021 set_current_inferior (inf);
1022 if (desired_state == target_terminal_state::is_ours)
1023 current_inferior ()->top_target ()->terminal_ours ();
1024 else if (desired_state == target_terminal_state::is_ours_for_output)
1025 current_inferior ()->top_target ()->terminal_ours_for_output ();
1026 else
1027 gdb_assert_not_reached ("unhandled desired state");
1028 inf->terminal_state = desired_state;
1029 }
1030 }
1031 }
1032
1033 /* See target/target.h. */
1034
1035 void
1036 target_terminal::ours ()
1037 {
1038 struct ui *ui = current_ui;
1039
1040 /* See target_terminal::inferior. */
1041 if (ui != main_ui)
1042 return;
1043
1044 if (m_terminal_state == target_terminal_state::is_ours)
1045 return;
1046
1047 target_terminal_is_ours_kind (target_terminal_state::is_ours);
1048 m_terminal_state = target_terminal_state::is_ours;
1049 }
1050
1051 /* See target/target.h. */
1052
1053 void
1054 target_terminal::ours_for_output ()
1055 {
1056 struct ui *ui = current_ui;
1057
1058 /* See target_terminal::inferior. */
1059 if (ui != main_ui)
1060 return;
1061
1062 if (!target_terminal::is_inferior ())
1063 return;
1064
1065 target_terminal_is_ours_kind (target_terminal_state::is_ours_for_output);
1066 target_terminal::m_terminal_state = target_terminal_state::is_ours_for_output;
1067 }
1068
1069 /* See target/target.h. */
1070
1071 void
1072 target_terminal::info (const char *arg, int from_tty)
1073 {
1074 current_inferior ()->top_target ()->terminal_info (arg, from_tty);
1075 }
1076
1077 /* See target.h. */
1078
1079 bool
1080 target_supports_terminal_ours (void)
1081 {
1082 /* The current top target is the target at the top of the target
1083 stack of the current inferior. While normally there's always an
1084 inferior, we must check for nullptr here because we can get here
1085 very early during startup, before the initial inferior is first
1086 created. */
1087 inferior *inf = current_inferior ();
1088
1089 if (inf == nullptr)
1090 return false;
1091 return inf->top_target ()->supports_terminal_ours ();
1092 }
1093
1094 static void
1095 tcomplain (void)
1096 {
1097 error (_("You can't do that when your target is `%s'"),
1098 current_inferior ()->top_target ()->shortname ());
1099 }
1100
1101 void
1102 noprocess (void)
1103 {
1104 error (_("You can't do that without a process to debug."));
1105 }
1106
1107 static void
1108 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
1109 {
1110 gdb_printf (_("No saved terminal information.\n"));
1111 }
1112
1113 /* A default implementation for the to_get_ada_task_ptid target method.
1114
1115 This function builds the PTID by using both LWP and TID as part of
1116 the PTID lwp and tid elements. The pid used is the pid of the
1117 inferior_ptid. */
1118
1119 static ptid_t
1120 default_get_ada_task_ptid (struct target_ops *self, long lwp, ULONGEST tid)
1121 {
1122 return ptid_t (inferior_ptid.pid (), lwp, tid);
1123 }
1124
1125 static enum exec_direction_kind
1126 default_execution_direction (struct target_ops *self)
1127 {
1128 if (!target_can_execute_reverse ())
1129 return EXEC_FORWARD;
1130 else if (!target_can_async_p ())
1131 return EXEC_FORWARD;
1132 else
1133 gdb_assert_not_reached ("\
1134 to_execution_direction must be implemented for reverse async");
1135 }
1136
1137 /* See target.h. */
1138
1139 void
1140 target_ops_ref_policy::decref (target_ops *t)
1141 {
1142 t->decref ();
1143 if (t->refcount () == 0)
1144 {
1145 if (t->stratum () == process_stratum)
1146 connection_list_remove (as_process_stratum_target (t));
1147
1148 for (inferior *inf : all_inferiors ())
1149 gdb_assert (!inf->target_is_pushed (t));
1150
1151 fileio_handles_invalidate_target (t);
1152
1153 t->close ();
1154
1155 if (targetdebug)
1156 gdb_printf (gdb_stdlog, "closing target\n");
1157 }
1158 }
1159
1160 /* See target.h. */
1161
1162 void
1163 target_stack::push (target_ops *t)
1164 {
1165 /* We must create a new reference first. It is possible that T is
1166 already pushed on this target stack, in which case we will first
1167 unpush it below, before re-pushing it. If we don't increment the
1168 reference count now, then when we unpush it, we might end up deleting
1169 T, which is not good. */
1170 auto ref = target_ops_ref::new_reference (t);
1171
1172 strata stratum = t->stratum ();
1173
1174 /* If there's already a target at this stratum, remove it. */
1175
1176 if (m_stack[stratum].get () != nullptr)
1177 unpush (m_stack[stratum].get ());
1178
1179 /* Now add the new one. */
1180 m_stack[stratum] = std::move (ref);
1181
1182 if (m_top < stratum)
1183 m_top = stratum;
1184
1185 if (stratum == process_stratum)
1186 connection_list_add (as_process_stratum_target (t));
1187 }
1188
1189 /* See target.h. */
1190
1191 bool
1192 target_stack::unpush (target_ops *t)
1193 {
1194 gdb_assert (t != NULL);
1195
1196 strata stratum = t->stratum ();
1197
1198 if (stratum == dummy_stratum)
1199 internal_error (_("Attempt to unpush the dummy target"));
1200
1201 /* Look for the specified target. Note that a target can only occur
1202 once in the target stack. */
1203
1204 if (m_stack[stratum] != t)
1205 {
1206 /* If T wasn't pushed, quit. Only open targets should be
1207 closed. */
1208 return false;
1209 }
1210
1211 if (m_top == stratum)
1212 m_top = this->find_beneath (t)->stratum ();
1213
1214 /* Move the target reference off the target stack, this sets the pointer
1215 held in m_stack to nullptr, and places the reference in ref. When
1216 ref goes out of scope its reference count will be decremented, which
1217 might cause the target to close.
1218
1219 We have to do it this way, and not just set the value in m_stack to
1220 nullptr directly, because doing so would decrement the reference
1221 count first, which might close the target, and closing the target
1222 does a check that the target is not on any inferiors target_stack. */
1223 auto ref = std::move (m_stack[stratum]);
1224
1225 return true;
1226 }
1227
1228 void
1229 target_unpusher::operator() (struct target_ops *ops) const
1230 {
1231 current_inferior ()->unpush_target (ops);
1232 }
1233
1234 /* Default implementation of to_get_thread_local_address. */
1235
1236 static void
1237 generic_tls_error (void)
1238 {
1239 throw_error (TLS_GENERIC_ERROR,
1240 _("Cannot find thread-local variables on this target"));
1241 }
1242
1243 /* Using the objfile specified in OBJFILE, find the address for the
1244 current thread's thread-local storage with offset OFFSET. */
1245 CORE_ADDR
1246 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1247 {
1248 volatile CORE_ADDR addr = 0;
1249 struct target_ops *target = current_inferior ()->top_target ();
1250 gdbarch *gdbarch = current_inferior ()->arch ();
1251
1252 /* If OBJFILE is a separate debug object file, look for the
1253 original object file. */
1254 if (objfile->separate_debug_objfile_backlink != NULL)
1255 objfile = objfile->separate_debug_objfile_backlink;
1256
1257 if (gdbarch_fetch_tls_load_module_address_p (gdbarch))
1258 {
1259 ptid_t ptid = inferior_ptid;
1260
1261 try
1262 {
1263 CORE_ADDR lm_addr;
1264
1265 /* Fetch the load module address for this objfile. */
1266 lm_addr = gdbarch_fetch_tls_load_module_address (gdbarch,
1267 objfile);
1268
1269 if (gdbarch_get_thread_local_address_p (gdbarch))
1270 addr = gdbarch_get_thread_local_address (gdbarch, ptid, lm_addr,
1271 offset);
1272 else
1273 addr = target->get_thread_local_address (ptid, lm_addr, offset);
1274 }
1275 /* If an error occurred, print TLS related messages here. Otherwise,
1276 throw the error to some higher catcher. */
1277 catch (const gdb_exception &ex)
1278 {
1279 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1280
1281 switch (ex.error)
1282 {
1283 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1284 error (_("Cannot find thread-local variables "
1285 "in this thread library."));
1286 break;
1287 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1288 if (objfile_is_library)
1289 error (_("Cannot find shared library `%s' in dynamic"
1290 " linker's load module list"), objfile_name (objfile));
1291 else
1292 error (_("Cannot find executable file `%s' in dynamic"
1293 " linker's load module list"), objfile_name (objfile));
1294 break;
1295 case TLS_NOT_ALLOCATED_YET_ERROR:
1296 if (objfile_is_library)
1297 error (_("The inferior has not yet allocated storage for"
1298 " thread-local variables in\n"
1299 "the shared library `%s'\n"
1300 "for %s"),
1301 objfile_name (objfile),
1302 target_pid_to_str (ptid).c_str ());
1303 else
1304 error (_("The inferior has not yet allocated storage for"
1305 " thread-local variables in\n"
1306 "the executable `%s'\n"
1307 "for %s"),
1308 objfile_name (objfile),
1309 target_pid_to_str (ptid).c_str ());
1310 break;
1311 case TLS_GENERIC_ERROR:
1312 if (objfile_is_library)
1313 error (_("Cannot find thread-local storage for %s, "
1314 "shared library %s:\n%s"),
1315 target_pid_to_str (ptid).c_str (),
1316 objfile_name (objfile), ex.what ());
1317 else
1318 error (_("Cannot find thread-local storage for %s, "
1319 "executable file %s:\n%s"),
1320 target_pid_to_str (ptid).c_str (),
1321 objfile_name (objfile), ex.what ());
1322 break;
1323 default:
1324 throw;
1325 break;
1326 }
1327 }
1328 }
1329 else
1330 error (_("Cannot find thread-local variables on this target"));
1331
1332 return addr;
1333 }
1334
1335 const char *
1336 target_xfer_status_to_string (enum target_xfer_status status)
1337 {
1338 #define CASE(X) case X: return #X
1339 switch (status)
1340 {
1341 CASE(TARGET_XFER_E_IO);
1342 CASE(TARGET_XFER_UNAVAILABLE);
1343 default:
1344 return "<unknown>";
1345 }
1346 #undef CASE
1347 };
1348
1349
1350 const std::vector<target_section> *
1351 target_get_section_table (struct target_ops *target)
1352 {
1353 return target->get_section_table ();
1354 }
1355
1356 /* Find a section containing ADDR. */
1357
1358 const struct target_section *
1359 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1360 {
1361 const std::vector<target_section> *table = target_get_section_table (target);
1362
1363 if (table == NULL)
1364 return NULL;
1365
1366 for (const target_section &secp : *table)
1367 {
1368 if (addr >= secp.addr && addr < secp.endaddr)
1369 return &secp;
1370 }
1371 return NULL;
1372 }
1373
1374 /* See target.h. */
1375
1376 const std::vector<target_section> *
1377 default_get_section_table ()
1378 {
1379 return &current_program_space->target_sections ();
1380 }
1381
1382 /* Helper for the memory xfer routines. Checks the attributes of the
1383 memory region of MEMADDR against the read or write being attempted.
1384 If the access is permitted returns true, otherwise returns false.
1385 REGION_P is an optional output parameter. If not-NULL, it is
1386 filled with a pointer to the memory region of MEMADDR. REG_LEN
1387 returns LEN trimmed to the end of the region. This is how much the
1388 caller can continue requesting, if the access is permitted. A
1389 single xfer request must not straddle memory region boundaries. */
1390
1391 static int
1392 memory_xfer_check_region (gdb_byte *readbuf, const gdb_byte *writebuf,
1393 ULONGEST memaddr, ULONGEST len, ULONGEST *reg_len,
1394 struct mem_region **region_p)
1395 {
1396 struct mem_region *region;
1397
1398 region = lookup_mem_region (memaddr);
1399
1400 if (region_p != NULL)
1401 *region_p = region;
1402
1403 switch (region->attrib.mode)
1404 {
1405 case MEM_RO:
1406 if (writebuf != NULL)
1407 return 0;
1408 break;
1409
1410 case MEM_WO:
1411 if (readbuf != NULL)
1412 return 0;
1413 break;
1414
1415 case MEM_FLASH:
1416 /* We only support writing to flash during "load" for now. */
1417 if (writebuf != NULL)
1418 error (_("Writing to flash memory forbidden in this context"));
1419 break;
1420
1421 case MEM_NONE:
1422 return 0;
1423 }
1424
1425 /* region->hi == 0 means there's no upper bound. */
1426 if (memaddr + len < region->hi || region->hi == 0)
1427 *reg_len = len;
1428 else
1429 *reg_len = region->hi - memaddr;
1430
1431 return 1;
1432 }
1433
1434 /* Read memory from more than one valid target. A core file, for
1435 instance, could have some of memory but delegate other bits to
1436 the target below it. So, we must manually try all targets. */
1437
1438 enum target_xfer_status
1439 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1440 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1441 ULONGEST *xfered_len)
1442 {
1443 enum target_xfer_status res;
1444
1445 do
1446 {
1447 res = ops->xfer_partial (TARGET_OBJECT_MEMORY, NULL,
1448 readbuf, writebuf, memaddr, len,
1449 xfered_len);
1450 if (res == TARGET_XFER_OK)
1451 break;
1452
1453 /* Stop if the target reports that the memory is not available. */
1454 if (res == TARGET_XFER_UNAVAILABLE)
1455 break;
1456
1457 /* Don't continue past targets which have all the memory.
1458 At one time, this code was necessary to read data from
1459 executables / shared libraries when data for the requested
1460 addresses weren't available in the core file. But now the
1461 core target handles this case itself. */
1462 if (ops->has_all_memory ())
1463 break;
1464
1465 ops = ops->beneath ();
1466 }
1467 while (ops != NULL);
1468
1469 /* The cache works at the raw memory level. Make sure the cache
1470 gets updated with raw contents no matter what kind of memory
1471 object was originally being written. Note we do write-through
1472 first, so that if it fails, we don't write to the cache contents
1473 that never made it to the target. */
1474 if (writebuf != NULL
1475 && inferior_ptid != null_ptid
1476 && target_dcache_init_p ()
1477 && (stack_cache_enabled_p () || code_cache_enabled_p ()))
1478 {
1479 DCACHE *dcache = target_dcache_get ();
1480
1481 /* Note that writing to an area of memory which wasn't present
1482 in the cache doesn't cause it to be loaded in. */
1483 dcache_update (dcache, res, memaddr, writebuf, *xfered_len);
1484 }
1485
1486 return res;
1487 }
1488
1489 /* Perform a partial memory transfer.
1490 For docs see target.h, to_xfer_partial. */
1491
1492 static enum target_xfer_status
1493 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1494 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1495 ULONGEST len, ULONGEST *xfered_len)
1496 {
1497 enum target_xfer_status res;
1498 ULONGEST reg_len;
1499 struct mem_region *region;
1500 struct inferior *inf;
1501
1502 /* For accesses to unmapped overlay sections, read directly from
1503 files. Must do this first, as MEMADDR may need adjustment. */
1504 if (readbuf != NULL && overlay_debugging)
1505 {
1506 struct obj_section *section = find_pc_overlay (memaddr);
1507
1508 if (pc_in_unmapped_range (memaddr, section))
1509 {
1510 const std::vector<target_section> *table = target_get_section_table (ops);
1511 const char *section_name = section->the_bfd_section->name;
1512
1513 memaddr = overlay_mapped_address (memaddr, section);
1514
1515 auto match_cb = [=] (const struct target_section *s)
1516 {
1517 return (strcmp (section_name, s->the_bfd_section->name) == 0);
1518 };
1519
1520 return section_table_xfer_memory_partial (readbuf, writebuf,
1521 memaddr, len, xfered_len,
1522 *table, match_cb);
1523 }
1524 }
1525
1526 /* Try the executable files, if "trust-readonly-sections" is set. */
1527 if (readbuf != NULL && trust_readonly)
1528 {
1529 const struct target_section *secp
1530 = target_section_by_addr (ops, memaddr);
1531 if (secp != NULL
1532 && (bfd_section_flags (secp->the_bfd_section) & SEC_READONLY))
1533 {
1534 const std::vector<target_section> *table = target_get_section_table (ops);
1535 return section_table_xfer_memory_partial (readbuf, writebuf,
1536 memaddr, len, xfered_len,
1537 *table);
1538 }
1539 }
1540
1541 /* Try GDB's internal data cache. */
1542
1543 if (!memory_xfer_check_region (readbuf, writebuf, memaddr, len, &reg_len,
1544 &region))
1545 return TARGET_XFER_E_IO;
1546
1547 if (inferior_ptid != null_ptid)
1548 inf = current_inferior ();
1549 else
1550 inf = NULL;
1551
1552 if (inf != NULL
1553 && readbuf != NULL
1554 /* The dcache reads whole cache lines; that doesn't play well
1555 with reading from a trace buffer, because reading outside of
1556 the collected memory range fails. */
1557 && get_traceframe_number () == -1
1558 && (region->attrib.cache
1559 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1560 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1561 {
1562 DCACHE *dcache = target_dcache_get_or_init ();
1563
1564 return dcache_read_memory_partial (ops, dcache, memaddr, readbuf,
1565 reg_len, xfered_len);
1566 }
1567
1568 /* If none of those methods found the memory we wanted, fall back
1569 to a target partial transfer. Normally a single call to
1570 to_xfer_partial is enough; if it doesn't recognize an object
1571 it will call the to_xfer_partial of the next target down.
1572 But for memory this won't do. Memory is the only target
1573 object which can be read from more than one valid target.
1574 A core file, for instance, could have some of memory but
1575 delegate other bits to the target below it. So, we must
1576 manually try all targets. */
1577
1578 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1579 xfered_len);
1580
1581 /* If we still haven't got anything, return the last error. We
1582 give up. */
1583 return res;
1584 }
1585
1586 /* Perform a partial memory transfer. For docs see target.h,
1587 to_xfer_partial. */
1588
1589 static enum target_xfer_status
1590 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1591 gdb_byte *readbuf, const gdb_byte *writebuf,
1592 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1593 {
1594 enum target_xfer_status res;
1595
1596 /* Zero length requests are ok and require no work. */
1597 if (len == 0)
1598 return TARGET_XFER_EOF;
1599
1600 memaddr = gdbarch_remove_non_address_bits (current_inferior ()->arch (),
1601 memaddr);
1602
1603 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1604 breakpoint insns, thus hiding out from higher layers whether
1605 there are software breakpoints inserted in the code stream. */
1606 if (readbuf != NULL)
1607 {
1608 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1609 xfered_len);
1610
1611 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1612 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, *xfered_len);
1613 }
1614 else
1615 {
1616 /* A large write request is likely to be partially satisfied
1617 by memory_xfer_partial_1. We will continually malloc
1618 and free a copy of the entire write request for breakpoint
1619 shadow handling even though we only end up writing a small
1620 subset of it. Cap writes to a limit specified by the target
1621 to mitigate this. */
1622 len = std::min (ops->get_memory_xfer_limit (), len);
1623
1624 gdb::byte_vector buf (writebuf, writebuf + len);
1625 breakpoint_xfer_memory (NULL, buf.data (), writebuf, memaddr, len);
1626 res = memory_xfer_partial_1 (ops, object, NULL, buf.data (), memaddr, len,
1627 xfered_len);
1628 }
1629
1630 return res;
1631 }
1632
1633 scoped_restore_tmpl<int>
1634 make_scoped_restore_show_memory_breakpoints (int show)
1635 {
1636 return make_scoped_restore (&show_memory_breakpoints, show);
1637 }
1638
1639 /* For docs see target.h, to_xfer_partial. */
1640
1641 enum target_xfer_status
1642 target_xfer_partial (struct target_ops *ops,
1643 enum target_object object, const char *annex,
1644 gdb_byte *readbuf, const gdb_byte *writebuf,
1645 ULONGEST offset, ULONGEST len,
1646 ULONGEST *xfered_len)
1647 {
1648 enum target_xfer_status retval;
1649
1650 /* Transfer is done when LEN is zero. */
1651 if (len == 0)
1652 return TARGET_XFER_EOF;
1653
1654 if (writebuf && !may_write_memory)
1655 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1656 core_addr_to_string_nz (offset), plongest (len));
1657
1658 *xfered_len = 0;
1659
1660 /* If this is a memory transfer, let the memory-specific code
1661 have a look at it instead. Memory transfers are more
1662 complicated. */
1663 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1664 || object == TARGET_OBJECT_CODE_MEMORY)
1665 retval = memory_xfer_partial (ops, object, readbuf,
1666 writebuf, offset, len, xfered_len);
1667 else if (object == TARGET_OBJECT_RAW_MEMORY)
1668 {
1669 /* Skip/avoid accessing the target if the memory region
1670 attributes block the access. Check this here instead of in
1671 raw_memory_xfer_partial as otherwise we'd end up checking
1672 this twice in the case of the memory_xfer_partial path is
1673 taken; once before checking the dcache, and another in the
1674 tail call to raw_memory_xfer_partial. */
1675 if (!memory_xfer_check_region (readbuf, writebuf, offset, len, &len,
1676 NULL))
1677 return TARGET_XFER_E_IO;
1678
1679 /* Request the normal memory object from other layers. */
1680 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1681 xfered_len);
1682 }
1683 else
1684 retval = ops->xfer_partial (object, annex, readbuf,
1685 writebuf, offset, len, xfered_len);
1686
1687 if (targetdebug)
1688 {
1689 const unsigned char *myaddr = NULL;
1690
1691 gdb_printf (gdb_stdlog,
1692 "%s:target_xfer_partial "
1693 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1694 ops->shortname (),
1695 (int) object,
1696 (annex ? annex : "(null)"),
1697 host_address_to_string (readbuf),
1698 host_address_to_string (writebuf),
1699 core_addr_to_string_nz (offset),
1700 pulongest (len), retval,
1701 pulongest (*xfered_len));
1702
1703 if (readbuf)
1704 myaddr = readbuf;
1705 if (writebuf)
1706 myaddr = writebuf;
1707 if (retval == TARGET_XFER_OK && myaddr != NULL)
1708 {
1709 int i;
1710
1711 gdb_puts (", bytes =", gdb_stdlog);
1712 for (i = 0; i < *xfered_len; i++)
1713 {
1714 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1715 {
1716 if (targetdebug < 2 && i > 0)
1717 {
1718 gdb_printf (gdb_stdlog, " ...");
1719 break;
1720 }
1721 gdb_printf (gdb_stdlog, "\n");
1722 }
1723
1724 gdb_printf (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1725 }
1726 }
1727
1728 gdb_putc ('\n', gdb_stdlog);
1729 }
1730
1731 /* Check implementations of to_xfer_partial update *XFERED_LEN
1732 properly. Do assertion after printing debug messages, so that we
1733 can find more clues on assertion failure from debugging messages. */
1734 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_UNAVAILABLE)
1735 gdb_assert (*xfered_len > 0);
1736
1737 return retval;
1738 }
1739
1740 /* Read LEN bytes of target memory at address MEMADDR, placing the
1741 results in GDB's memory at MYADDR. Returns either 0 for success or
1742 -1 if any error occurs.
1743
1744 If an error occurs, no guarantee is made about the contents of the data at
1745 MYADDR. In particular, the caller should not depend upon partial reads
1746 filling the buffer with good data. There is no way for the caller to know
1747 how much good data might have been transfered anyway. Callers that can
1748 deal with partial reads should call target_read (which will retry until
1749 it makes no progress, and then return how much was transferred). */
1750
1751 int
1752 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1753 {
1754 if (target_read (current_inferior ()->top_target (),
1755 TARGET_OBJECT_MEMORY, NULL,
1756 myaddr, memaddr, len) == len)
1757 return 0;
1758 else
1759 return -1;
1760 }
1761
1762 /* See target/target.h. */
1763
1764 int
1765 target_read_uint32 (CORE_ADDR memaddr, uint32_t *result)
1766 {
1767 gdb_byte buf[4];
1768 int r;
1769
1770 r = target_read_memory (memaddr, buf, sizeof buf);
1771 if (r != 0)
1772 return r;
1773 *result = extract_unsigned_integer
1774 (buf, sizeof buf,
1775 gdbarch_byte_order (current_inferior ()->arch ()));
1776 return 0;
1777 }
1778
1779 /* Like target_read_memory, but specify explicitly that this is a read
1780 from the target's raw memory. That is, this read bypasses the
1781 dcache, breakpoint shadowing, etc. */
1782
1783 int
1784 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1785 {
1786 if (target_read (current_inferior ()->top_target (),
1787 TARGET_OBJECT_RAW_MEMORY, NULL,
1788 myaddr, memaddr, len) == len)
1789 return 0;
1790 else
1791 return -1;
1792 }
1793
1794 /* Like target_read_memory, but specify explicitly that this is a read from
1795 the target's stack. This may trigger different cache behavior. */
1796
1797 int
1798 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1799 {
1800 if (target_read (current_inferior ()->top_target (),
1801 TARGET_OBJECT_STACK_MEMORY, NULL,
1802 myaddr, memaddr, len) == len)
1803 return 0;
1804 else
1805 return -1;
1806 }
1807
1808 /* Like target_read_memory, but specify explicitly that this is a read from
1809 the target's code. This may trigger different cache behavior. */
1810
1811 int
1812 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1813 {
1814 if (target_read (current_inferior ()->top_target (),
1815 TARGET_OBJECT_CODE_MEMORY, NULL,
1816 myaddr, memaddr, len) == len)
1817 return 0;
1818 else
1819 return -1;
1820 }
1821
1822 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1823 Returns either 0 for success or -1 if any error occurs. If an
1824 error occurs, no guarantee is made about how much data got written.
1825 Callers that can deal with partial writes should call
1826 target_write. */
1827
1828 int
1829 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1830 {
1831 if (target_write (current_inferior ()->top_target (),
1832 TARGET_OBJECT_MEMORY, NULL,
1833 myaddr, memaddr, len) == len)
1834 return 0;
1835 else
1836 return -1;
1837 }
1838
1839 /* Write LEN bytes from MYADDR to target raw memory at address
1840 MEMADDR. Returns either 0 for success or -1 if any error occurs.
1841 If an error occurs, no guarantee is made about how much data got
1842 written. Callers that can deal with partial writes should call
1843 target_write. */
1844
1845 int
1846 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1847 {
1848 if (target_write (current_inferior ()->top_target (),
1849 TARGET_OBJECT_RAW_MEMORY, NULL,
1850 myaddr, memaddr, len) == len)
1851 return 0;
1852 else
1853 return -1;
1854 }
1855
1856 /* Fetch the target's memory map. */
1857
1858 std::vector<mem_region>
1859 target_memory_map (void)
1860 {
1861 target_ops *target = current_inferior ()->top_target ();
1862 std::vector<mem_region> result = target->memory_map ();
1863 if (result.empty ())
1864 return result;
1865
1866 std::sort (result.begin (), result.end ());
1867
1868 /* Check that regions do not overlap. Simultaneously assign
1869 a numbering for the "mem" commands to use to refer to
1870 each region. */
1871 mem_region *last_one = NULL;
1872 for (size_t ix = 0; ix < result.size (); ix++)
1873 {
1874 mem_region *this_one = &result[ix];
1875 this_one->number = ix;
1876
1877 if (last_one != NULL && last_one->hi > this_one->lo)
1878 {
1879 warning (_("Overlapping regions in memory map: ignoring"));
1880 return std::vector<mem_region> ();
1881 }
1882
1883 last_one = this_one;
1884 }
1885
1886 return result;
1887 }
1888
1889 void
1890 target_flash_erase (ULONGEST address, LONGEST length)
1891 {
1892 current_inferior ()->top_target ()->flash_erase (address, length);
1893 }
1894
1895 void
1896 target_flash_done (void)
1897 {
1898 current_inferior ()->top_target ()->flash_done ();
1899 }
1900
1901 static void
1902 show_trust_readonly (struct ui_file *file, int from_tty,
1903 struct cmd_list_element *c, const char *value)
1904 {
1905 gdb_printf (file,
1906 _("Mode for reading from readonly sections is %s.\n"),
1907 value);
1908 }
1909
1910 /* Target vector read/write partial wrapper functions. */
1911
1912 static enum target_xfer_status
1913 target_read_partial (struct target_ops *ops,
1914 enum target_object object,
1915 const char *annex, gdb_byte *buf,
1916 ULONGEST offset, ULONGEST len,
1917 ULONGEST *xfered_len)
1918 {
1919 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1920 xfered_len);
1921 }
1922
1923 static enum target_xfer_status
1924 target_write_partial (struct target_ops *ops,
1925 enum target_object object,
1926 const char *annex, const gdb_byte *buf,
1927 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1928 {
1929 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1930 xfered_len);
1931 }
1932
1933 /* Wrappers to perform the full transfer. */
1934
1935 /* For docs on target_read see target.h. */
1936
1937 LONGEST
1938 target_read (struct target_ops *ops,
1939 enum target_object object,
1940 const char *annex, gdb_byte *buf,
1941 ULONGEST offset, LONGEST len)
1942 {
1943 LONGEST xfered_total = 0;
1944 int unit_size = 1;
1945
1946 /* If we are reading from a memory object, find the length of an addressable
1947 unit for that architecture. */
1948 if (object == TARGET_OBJECT_MEMORY
1949 || object == TARGET_OBJECT_STACK_MEMORY
1950 || object == TARGET_OBJECT_CODE_MEMORY
1951 || object == TARGET_OBJECT_RAW_MEMORY)
1952 unit_size = gdbarch_addressable_memory_unit_size
1953 (current_inferior ()->arch ());
1954
1955 while (xfered_total < len)
1956 {
1957 ULONGEST xfered_partial;
1958 enum target_xfer_status status;
1959
1960 status = target_read_partial (ops, object, annex,
1961 buf + xfered_total * unit_size,
1962 offset + xfered_total, len - xfered_total,
1963 &xfered_partial);
1964
1965 /* Call an observer, notifying them of the xfer progress? */
1966 if (status == TARGET_XFER_EOF)
1967 return xfered_total;
1968 else if (status == TARGET_XFER_OK)
1969 {
1970 xfered_total += xfered_partial;
1971 QUIT;
1972 }
1973 else
1974 return TARGET_XFER_E_IO;
1975
1976 }
1977 return len;
1978 }
1979
1980 /* Assuming that the entire [begin, end) range of memory cannot be
1981 read, try to read whatever subrange is possible to read.
1982
1983 The function returns, in RESULT, either zero or one memory block.
1984 If there's a readable subrange at the beginning, it is completely
1985 read and returned. Any further readable subrange will not be read.
1986 Otherwise, if there's a readable subrange at the end, it will be
1987 completely read and returned. Any readable subranges before it
1988 (obviously, not starting at the beginning), will be ignored. In
1989 other cases -- either no readable subrange, or readable subrange(s)
1990 that is neither at the beginning, or end, nothing is returned.
1991
1992 The purpose of this function is to handle a read across a boundary
1993 of accessible memory in a case when memory map is not available.
1994 The above restrictions are fine for this case, but will give
1995 incorrect results if the memory is 'patchy'. However, supporting
1996 'patchy' memory would require trying to read every single byte,
1997 and it seems unacceptable solution. Explicit memory map is
1998 recommended for this case -- and target_read_memory_robust will
1999 take care of reading multiple ranges then. */
2000
2001 static void
2002 read_whatever_is_readable (struct target_ops *ops,
2003 const ULONGEST begin, const ULONGEST end,
2004 int unit_size,
2005 std::vector<memory_read_result> *result)
2006 {
2007 ULONGEST current_begin = begin;
2008 ULONGEST current_end = end;
2009 int forward;
2010 ULONGEST xfered_len;
2011
2012 /* If we previously failed to read 1 byte, nothing can be done here. */
2013 if (end - begin <= 1)
2014 return;
2015
2016 gdb::unique_xmalloc_ptr<gdb_byte> buf ((gdb_byte *) xmalloc (end - begin));
2017
2018 /* Check that either first or the last byte is readable, and give up
2019 if not. This heuristic is meant to permit reading accessible memory
2020 at the boundary of accessible region. */
2021 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2022 buf.get (), begin, 1, &xfered_len) == TARGET_XFER_OK)
2023 {
2024 forward = 1;
2025 ++current_begin;
2026 }
2027 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2028 buf.get () + (end - begin) - 1, end - 1, 1,
2029 &xfered_len) == TARGET_XFER_OK)
2030 {
2031 forward = 0;
2032 --current_end;
2033 }
2034 else
2035 return;
2036
2037 /* Loop invariant is that the [current_begin, current_end) was previously
2038 found to be not readable as a whole.
2039
2040 Note loop condition -- if the range has 1 byte, we can't divide the range
2041 so there's no point trying further. */
2042 while (current_end - current_begin > 1)
2043 {
2044 ULONGEST first_half_begin, first_half_end;
2045 ULONGEST second_half_begin, second_half_end;
2046 LONGEST xfer;
2047 ULONGEST middle = current_begin + (current_end - current_begin) / 2;
2048
2049 if (forward)
2050 {
2051 first_half_begin = current_begin;
2052 first_half_end = middle;
2053 second_half_begin = middle;
2054 second_half_end = current_end;
2055 }
2056 else
2057 {
2058 first_half_begin = middle;
2059 first_half_end = current_end;
2060 second_half_begin = current_begin;
2061 second_half_end = middle;
2062 }
2063
2064 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2065 buf.get () + (first_half_begin - begin) * unit_size,
2066 first_half_begin,
2067 first_half_end - first_half_begin);
2068
2069 if (xfer == first_half_end - first_half_begin)
2070 {
2071 /* This half reads up fine. So, the error must be in the
2072 other half. */
2073 current_begin = second_half_begin;
2074 current_end = second_half_end;
2075 }
2076 else
2077 {
2078 /* This half is not readable. Because we've tried one byte, we
2079 know some part of this half if actually readable. Go to the next
2080 iteration to divide again and try to read.
2081
2082 We don't handle the other half, because this function only tries
2083 to read a single readable subrange. */
2084 current_begin = first_half_begin;
2085 current_end = first_half_end;
2086 }
2087 }
2088
2089 if (forward)
2090 {
2091 /* The [begin, current_begin) range has been read. */
2092 result->emplace_back (begin, current_end, std::move (buf));
2093 }
2094 else
2095 {
2096 /* The [current_end, end) range has been read. */
2097 LONGEST region_len = end - current_end;
2098
2099 gdb::unique_xmalloc_ptr<gdb_byte> data
2100 ((gdb_byte *) xmalloc (region_len * unit_size));
2101 memcpy (data.get (), buf.get () + (current_end - begin) * unit_size,
2102 region_len * unit_size);
2103 result->emplace_back (current_end, end, std::move (data));
2104 }
2105 }
2106
2107 std::vector<memory_read_result>
2108 read_memory_robust (struct target_ops *ops,
2109 const ULONGEST offset, const LONGEST len)
2110 {
2111 std::vector<memory_read_result> result;
2112 int unit_size
2113 = gdbarch_addressable_memory_unit_size (current_inferior ()->arch ());
2114
2115 LONGEST xfered_total = 0;
2116 while (xfered_total < len)
2117 {
2118 struct mem_region *region = lookup_mem_region (offset + xfered_total);
2119 LONGEST region_len;
2120
2121 /* If there is no explicit region, a fake one should be created. */
2122 gdb_assert (region);
2123
2124 if (region->hi == 0)
2125 region_len = len - xfered_total;
2126 else
2127 region_len = region->hi - offset;
2128
2129 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2130 {
2131 /* Cannot read this region. Note that we can end up here only
2132 if the region is explicitly marked inaccessible, or
2133 'inaccessible-by-default' is in effect. */
2134 xfered_total += region_len;
2135 }
2136 else
2137 {
2138 LONGEST to_read = std::min (len - xfered_total, region_len);
2139 gdb::unique_xmalloc_ptr<gdb_byte> buffer
2140 ((gdb_byte *) xmalloc (to_read * unit_size));
2141
2142 LONGEST xfered_partial =
2143 target_read (ops, TARGET_OBJECT_MEMORY, NULL, buffer.get (),
2144 offset + xfered_total, to_read);
2145 /* Call an observer, notifying them of the xfer progress? */
2146 if (xfered_partial <= 0)
2147 {
2148 /* Got an error reading full chunk. See if maybe we can read
2149 some subrange. */
2150 read_whatever_is_readable (ops, offset + xfered_total,
2151 offset + xfered_total + to_read,
2152 unit_size, &result);
2153 xfered_total += to_read;
2154 }
2155 else
2156 {
2157 result.emplace_back (offset + xfered_total,
2158 offset + xfered_total + xfered_partial,
2159 std::move (buffer));
2160 xfered_total += xfered_partial;
2161 }
2162 QUIT;
2163 }
2164 }
2165
2166 return result;
2167 }
2168
2169
2170 /* An alternative to target_write with progress callbacks. */
2171
2172 LONGEST
2173 target_write_with_progress (struct target_ops *ops,
2174 enum target_object object,
2175 const char *annex, const gdb_byte *buf,
2176 ULONGEST offset, LONGEST len,
2177 void (*progress) (ULONGEST, void *), void *baton)
2178 {
2179 LONGEST xfered_total = 0;
2180 int unit_size = 1;
2181
2182 /* If we are writing to a memory object, find the length of an addressable
2183 unit for that architecture. */
2184 if (object == TARGET_OBJECT_MEMORY
2185 || object == TARGET_OBJECT_STACK_MEMORY
2186 || object == TARGET_OBJECT_CODE_MEMORY
2187 || object == TARGET_OBJECT_RAW_MEMORY)
2188 unit_size = gdbarch_addressable_memory_unit_size
2189 (current_inferior ()->arch ());
2190
2191 /* Give the progress callback a chance to set up. */
2192 if (progress)
2193 (*progress) (0, baton);
2194
2195 while (xfered_total < len)
2196 {
2197 ULONGEST xfered_partial;
2198 enum target_xfer_status status;
2199
2200 status = target_write_partial (ops, object, annex,
2201 buf + xfered_total * unit_size,
2202 offset + xfered_total, len - xfered_total,
2203 &xfered_partial);
2204
2205 if (status != TARGET_XFER_OK)
2206 return status == TARGET_XFER_EOF ? xfered_total : TARGET_XFER_E_IO;
2207
2208 if (progress)
2209 (*progress) (xfered_partial, baton);
2210
2211 xfered_total += xfered_partial;
2212 QUIT;
2213 }
2214 return len;
2215 }
2216
2217 /* For docs on target_write see target.h. */
2218
2219 LONGEST
2220 target_write (struct target_ops *ops,
2221 enum target_object object,
2222 const char *annex, const gdb_byte *buf,
2223 ULONGEST offset, LONGEST len)
2224 {
2225 return target_write_with_progress (ops, object, annex, buf, offset, len,
2226 NULL, NULL);
2227 }
2228
2229 /* Help for target_read_alloc and target_read_stralloc. See their comments
2230 for details. */
2231
2232 template <typename T>
2233 gdb::optional<gdb::def_vector<T>>
2234 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2235 const char *annex)
2236 {
2237 gdb::def_vector<T> buf;
2238 size_t buf_pos = 0;
2239 const int chunk = 4096;
2240
2241 /* This function does not have a length parameter; it reads the
2242 entire OBJECT). Also, it doesn't support objects fetched partly
2243 from one target and partly from another (in a different stratum,
2244 e.g. a core file and an executable). Both reasons make it
2245 unsuitable for reading memory. */
2246 gdb_assert (object != TARGET_OBJECT_MEMORY);
2247
2248 /* Start by reading up to 4K at a time. The target will throttle
2249 this number down if necessary. */
2250 while (1)
2251 {
2252 ULONGEST xfered_len;
2253 enum target_xfer_status status;
2254
2255 buf.resize (buf_pos + chunk);
2256
2257 status = target_read_partial (ops, object, annex,
2258 (gdb_byte *) &buf[buf_pos],
2259 buf_pos, chunk,
2260 &xfered_len);
2261
2262 if (status == TARGET_XFER_EOF)
2263 {
2264 /* Read all there was. */
2265 buf.resize (buf_pos);
2266 return buf;
2267 }
2268 else if (status != TARGET_XFER_OK)
2269 {
2270 /* An error occurred. */
2271 return {};
2272 }
2273
2274 buf_pos += xfered_len;
2275
2276 QUIT;
2277 }
2278 }
2279
2280 /* See target.h */
2281
2282 gdb::optional<gdb::byte_vector>
2283 target_read_alloc (struct target_ops *ops, enum target_object object,
2284 const char *annex)
2285 {
2286 return target_read_alloc_1<gdb_byte> (ops, object, annex);
2287 }
2288
2289 /* See target.h. */
2290
2291 gdb::optional<gdb::char_vector>
2292 target_read_stralloc (struct target_ops *ops, enum target_object object,
2293 const char *annex)
2294 {
2295 gdb::optional<gdb::char_vector> buf
2296 = target_read_alloc_1<char> (ops, object, annex);
2297
2298 if (!buf)
2299 return {};
2300
2301 if (buf->empty () || buf->back () != '\0')
2302 buf->push_back ('\0');
2303
2304 /* Check for embedded NUL bytes; but allow trailing NULs. */
2305 for (auto it = std::find (buf->begin (), buf->end (), '\0');
2306 it != buf->end (); it++)
2307 if (*it != '\0')
2308 {
2309 warning (_("target object %d, annex %s, "
2310 "contained unexpected null characters"),
2311 (int) object, annex ? annex : "(none)");
2312 break;
2313 }
2314
2315 return buf;
2316 }
2317
2318 /* Memory transfer methods. */
2319
2320 void
2321 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2322 LONGEST len)
2323 {
2324 /* This method is used to read from an alternate, non-current
2325 target. This read must bypass the overlay support (as symbols
2326 don't match this target), and GDB's internal cache (wrong cache
2327 for this target). */
2328 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2329 != len)
2330 memory_error (TARGET_XFER_E_IO, addr);
2331 }
2332
2333 ULONGEST
2334 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2335 int len, enum bfd_endian byte_order)
2336 {
2337 gdb_byte buf[sizeof (ULONGEST)];
2338
2339 gdb_assert (len <= sizeof (buf));
2340 get_target_memory (ops, addr, buf, len);
2341 return extract_unsigned_integer (buf, len, byte_order);
2342 }
2343
2344 /* See target.h. */
2345
2346 int
2347 target_insert_breakpoint (struct gdbarch *gdbarch,
2348 struct bp_target_info *bp_tgt)
2349 {
2350 if (!may_insert_breakpoints)
2351 {
2352 warning (_("May not insert breakpoints"));
2353 return 1;
2354 }
2355
2356 target_ops *target = current_inferior ()->top_target ();
2357
2358 return target->insert_breakpoint (gdbarch, bp_tgt);
2359 }
2360
2361 /* See target.h. */
2362
2363 int
2364 target_remove_breakpoint (struct gdbarch *gdbarch,
2365 struct bp_target_info *bp_tgt,
2366 enum remove_bp_reason reason)
2367 {
2368 /* This is kind of a weird case to handle, but the permission might
2369 have been changed after breakpoints were inserted - in which case
2370 we should just take the user literally and assume that any
2371 breakpoints should be left in place. */
2372 if (!may_insert_breakpoints)
2373 {
2374 warning (_("May not remove breakpoints"));
2375 return 1;
2376 }
2377
2378 target_ops *target = current_inferior ()->top_target ();
2379
2380 return target->remove_breakpoint (gdbarch, bp_tgt, reason);
2381 }
2382
2383 static void
2384 info_target_command (const char *args, int from_tty)
2385 {
2386 int has_all_mem = 0;
2387
2388 if (current_program_space->symfile_object_file != NULL)
2389 {
2390 objfile *objf = current_program_space->symfile_object_file;
2391 gdb_printf (_("Symbols from \"%s\".\n"),
2392 objfile_name (objf));
2393 }
2394
2395 for (target_ops *t = current_inferior ()->top_target ();
2396 t != NULL;
2397 t = t->beneath ())
2398 {
2399 if (!t->has_memory ())
2400 continue;
2401
2402 if ((int) (t->stratum ()) <= (int) dummy_stratum)
2403 continue;
2404 if (has_all_mem)
2405 gdb_printf (_("\tWhile running this, "
2406 "GDB does not access memory from...\n"));
2407 gdb_printf ("%s:\n", t->longname ());
2408 t->files_info ();
2409 has_all_mem = t->has_all_memory ();
2410 }
2411 }
2412
2413 /* This function is called before any new inferior is created, e.g.
2414 by running a program, attaching, or connecting to a target.
2415 It cleans up any state from previous invocations which might
2416 change between runs. This is a subset of what target_preopen
2417 resets (things which might change between targets). */
2418
2419 void
2420 target_pre_inferior (int from_tty)
2421 {
2422 /* Clear out solib state. Otherwise the solib state of the previous
2423 inferior might have survived and is entirely wrong for the new
2424 target. This has been observed on GNU/Linux using glibc 2.3. How
2425 to reproduce:
2426
2427 bash$ ./foo&
2428 [1] 4711
2429 bash$ ./foo&
2430 [1] 4712
2431 bash$ gdb ./foo
2432 [...]
2433 (gdb) attach 4711
2434 (gdb) detach
2435 (gdb) attach 4712
2436 Cannot access memory at address 0xdeadbeef
2437 */
2438
2439 /* In some OSs, the shared library list is the same/global/shared
2440 across inferiors. If code is shared between processes, so are
2441 memory regions and features. */
2442 if (!gdbarch_has_global_solist (current_inferior ()->arch ()))
2443 {
2444 no_shared_libraries (NULL, from_tty);
2445
2446 invalidate_target_mem_regions ();
2447
2448 target_clear_description ();
2449 }
2450
2451 /* attach_flag may be set if the previous process associated with
2452 the inferior was attached to. */
2453 current_inferior ()->attach_flag = false;
2454
2455 current_inferior ()->highest_thread_num = 0;
2456
2457 update_previous_thread ();
2458
2459 agent_capability_invalidate ();
2460 }
2461
2462 /* This is to be called by the open routine before it does
2463 anything. */
2464
2465 void
2466 target_preopen (int from_tty)
2467 {
2468 dont_repeat ();
2469
2470 if (current_inferior ()->pid != 0)
2471 {
2472 if (!from_tty
2473 || !target_has_execution ()
2474 || query (_("A program is being debugged already. Kill it? ")))
2475 {
2476 /* Core inferiors actually should be detached, not
2477 killed. */
2478 if (target_has_execution ())
2479 target_kill ();
2480 else
2481 target_detach (current_inferior (), 0);
2482 }
2483 else
2484 error (_("Program not killed."));
2485 }
2486
2487 /* Release reference to old previous thread. */
2488 update_previous_thread ();
2489
2490 /* Calling target_kill may remove the target from the stack. But if
2491 it doesn't (which seems like a win for UDI), remove it now. */
2492 /* Leave the exec target, though. The user may be switching from a
2493 live process to a core of the same program. */
2494 current_inferior ()->pop_all_targets_above (file_stratum);
2495
2496 target_pre_inferior (from_tty);
2497 }
2498
2499 /* See target.h. */
2500
2501 void
2502 target_detach (inferior *inf, int from_tty)
2503 {
2504 /* Thread's don't need to be resumed until the end of this function. */
2505 scoped_disable_commit_resumed disable_commit_resumed ("detaching");
2506
2507 /* After we have detached, we will clear the register cache for this inferior
2508 by calling registers_changed_ptid. We must save the pid_ptid before
2509 detaching, as the target detach method will clear inf->pid. */
2510 ptid_t save_pid_ptid = ptid_t (inf->pid);
2511
2512 /* As long as some to_detach implementations rely on the current_inferior
2513 (either directly, or indirectly, like through reading memory), INF needs
2514 to be the current inferior. When that requirement will become no longer
2515 true, then we can remove this assertion. */
2516 gdb_assert (inf == current_inferior ());
2517
2518 prepare_for_detach ();
2519
2520 gdb::observers::inferior_pre_detach.notify (inf);
2521
2522 /* Hold a strong reference because detaching may unpush the
2523 target. */
2524 auto proc_target_ref = target_ops_ref::new_reference (inf->process_target ());
2525
2526 current_inferior ()->top_target ()->detach (inf, from_tty);
2527
2528 process_stratum_target *proc_target
2529 = as_process_stratum_target (proc_target_ref.get ());
2530
2531 registers_changed_ptid (proc_target, save_pid_ptid);
2532
2533 /* We have to ensure we have no frame cache left. Normally,
2534 registers_changed_ptid (save_pid_ptid) calls reinit_frame_cache when
2535 inferior_ptid matches save_pid_ptid, but in our case, it does not
2536 call it, as inferior_ptid has been reset. */
2537 reinit_frame_cache ();
2538
2539 disable_commit_resumed.reset_and_commit ();
2540 }
2541
2542 void
2543 target_disconnect (const char *args, int from_tty)
2544 {
2545 /* If we're in breakpoints-always-inserted mode or if breakpoints
2546 are global across processes, we have to remove them before
2547 disconnecting. */
2548 remove_breakpoints ();
2549
2550 current_inferior ()->top_target ()->disconnect (args, from_tty);
2551 }
2552
2553 /* See target/target.h. */
2554
2555 ptid_t
2556 target_wait (ptid_t ptid, struct target_waitstatus *status,
2557 target_wait_flags options)
2558 {
2559 target_ops *target = current_inferior ()->top_target ();
2560 process_stratum_target *proc_target = current_inferior ()->process_target ();
2561
2562 gdb_assert (!proc_target->commit_resumed_state);
2563
2564 if (!target_can_async_p (target))
2565 gdb_assert ((options & TARGET_WNOHANG) == 0);
2566
2567 try
2568 {
2569 gdb::observers::target_pre_wait.notify (ptid);
2570 ptid_t event_ptid = target->wait (ptid, status, options);
2571 gdb::observers::target_post_wait.notify (event_ptid);
2572 return event_ptid;
2573 }
2574 catch (...)
2575 {
2576 gdb::observers::target_post_wait.notify (null_ptid);
2577 throw;
2578 }
2579 }
2580
2581 /* See target.h. */
2582
2583 ptid_t
2584 default_target_wait (struct target_ops *ops,
2585 ptid_t ptid, struct target_waitstatus *status,
2586 target_wait_flags options)
2587 {
2588 status->set_ignore ();
2589 return minus_one_ptid;
2590 }
2591
2592 std::string
2593 target_pid_to_str (ptid_t ptid)
2594 {
2595 return current_inferior ()->top_target ()->pid_to_str (ptid);
2596 }
2597
2598 const char *
2599 target_thread_name (struct thread_info *info)
2600 {
2601 gdb_assert (info->inf == current_inferior ());
2602
2603 return current_inferior ()->top_target ()->thread_name (info);
2604 }
2605
2606 struct thread_info *
2607 target_thread_handle_to_thread_info (const gdb_byte *thread_handle,
2608 int handle_len,
2609 struct inferior *inf)
2610 {
2611 target_ops *target = current_inferior ()->top_target ();
2612
2613 return target->thread_handle_to_thread_info (thread_handle, handle_len, inf);
2614 }
2615
2616 /* See target.h. */
2617
2618 gdb::array_view<const gdb_byte>
2619 target_thread_info_to_thread_handle (struct thread_info *tip)
2620 {
2621 target_ops *target = current_inferior ()->top_target ();
2622
2623 return target->thread_info_to_thread_handle (tip);
2624 }
2625
2626 void
2627 target_resume (ptid_t scope_ptid, int step, enum gdb_signal signal)
2628 {
2629 process_stratum_target *curr_target = current_inferior ()->process_target ();
2630 gdb_assert (!curr_target->commit_resumed_state);
2631
2632 gdb_assert (inferior_ptid != null_ptid);
2633 gdb_assert (inferior_ptid.matches (scope_ptid));
2634
2635 target_dcache_invalidate ();
2636
2637 current_inferior ()->top_target ()->resume (scope_ptid, step, signal);
2638
2639 registers_changed_ptid (curr_target, scope_ptid);
2640 /* We only set the internal executing state here. The user/frontend
2641 running state is set at a higher level. This also clears the
2642 thread's stop_pc as side effect. */
2643 set_executing (curr_target, scope_ptid, true);
2644 clear_inline_frame_state (curr_target, scope_ptid);
2645
2646 if (target_can_async_p ())
2647 target_async (true);
2648 }
2649
2650 /* See target.h. */
2651
2652 void
2653 target_commit_resumed ()
2654 {
2655 gdb_assert (current_inferior ()->process_target ()->commit_resumed_state);
2656 current_inferior ()->top_target ()->commit_resumed ();
2657 }
2658
2659 /* See target.h. */
2660
2661 bool
2662 target_has_pending_events ()
2663 {
2664 return current_inferior ()->top_target ()->has_pending_events ();
2665 }
2666
2667 void
2668 target_pass_signals (gdb::array_view<const unsigned char> pass_signals)
2669 {
2670 current_inferior ()->top_target ()->pass_signals (pass_signals);
2671 }
2672
2673 void
2674 target_program_signals (gdb::array_view<const unsigned char> program_signals)
2675 {
2676 current_inferior ()->top_target ()->program_signals (program_signals);
2677 }
2678
2679 static void
2680 default_follow_fork (struct target_ops *self, inferior *child_inf,
2681 ptid_t child_ptid, target_waitkind fork_kind,
2682 bool follow_child, bool detach_fork)
2683 {
2684 /* Some target returned a fork event, but did not know how to follow it. */
2685 internal_error (_("could not find a target to follow fork"));
2686 }
2687
2688 static void
2689 default_follow_clone (struct target_ops *self, ptid_t child_ptid)
2690 {
2691 /* Some target returned a clone event, but did not know how to follow it. */
2692 internal_error (_("could not find a target to follow clone"));
2693 }
2694
2695 /* See target.h. */
2696
2697 void
2698 target_follow_fork (inferior *child_inf, ptid_t child_ptid,
2699 target_waitkind fork_kind, bool follow_child,
2700 bool detach_fork)
2701 {
2702 target_ops *target = current_inferior ()->top_target ();
2703
2704 /* Check consistency between CHILD_INF, CHILD_PTID, FOLLOW_CHILD and
2705 DETACH_FORK. */
2706 if (child_inf != nullptr)
2707 {
2708 gdb_assert (follow_child || !detach_fork);
2709 gdb_assert (child_inf->pid == child_ptid.pid ());
2710 }
2711 else
2712 gdb_assert (!follow_child && detach_fork);
2713
2714 return target->follow_fork (child_inf, child_ptid, fork_kind, follow_child,
2715 detach_fork);
2716 }
2717
2718 /* See target.h. */
2719
2720 void
2721 target_follow_exec (inferior *follow_inf, ptid_t ptid,
2722 const char *execd_pathname)
2723 {
2724 current_inferior ()->top_target ()->follow_exec (follow_inf, ptid,
2725 execd_pathname);
2726 }
2727
2728 static void
2729 default_mourn_inferior (struct target_ops *self)
2730 {
2731 internal_error (_("could not find a target to follow mourn inferior"));
2732 }
2733
2734 void
2735 target_mourn_inferior (ptid_t ptid)
2736 {
2737 gdb_assert (ptid.pid () == inferior_ptid.pid ());
2738 current_inferior ()->top_target ()->mourn_inferior ();
2739
2740 /* We no longer need to keep handles on any of the object files.
2741 Make sure to release them to avoid unnecessarily locking any
2742 of them while we're not actually debugging. */
2743 bfd_cache_close_all ();
2744 }
2745
2746 /* Look for a target which can describe architectural features, starting
2747 from TARGET. If we find one, return its description. */
2748
2749 const struct target_desc *
2750 target_read_description (struct target_ops *target)
2751 {
2752 return target->read_description ();
2753 }
2754
2755
2756 /* Default implementation of memory-searching. */
2757
2758 static int
2759 default_search_memory (struct target_ops *self,
2760 CORE_ADDR start_addr, ULONGEST search_space_len,
2761 const gdb_byte *pattern, ULONGEST pattern_len,
2762 CORE_ADDR *found_addrp)
2763 {
2764 auto read_memory = [=] (CORE_ADDR addr, gdb_byte *result, size_t len)
2765 {
2766 return target_read (current_inferior ()->top_target (),
2767 TARGET_OBJECT_MEMORY, NULL,
2768 result, addr, len) == len;
2769 };
2770
2771 /* Start over from the top of the target stack. */
2772 return simple_search_memory (read_memory, start_addr, search_space_len,
2773 pattern, pattern_len, found_addrp);
2774 }
2775
2776 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2777 sequence of bytes in PATTERN with length PATTERN_LEN.
2778
2779 The result is 1 if found, 0 if not found, and -1 if there was an error
2780 requiring halting of the search (e.g. memory read error).
2781 If the pattern is found the address is recorded in FOUND_ADDRP. */
2782
2783 int
2784 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2785 const gdb_byte *pattern, ULONGEST pattern_len,
2786 CORE_ADDR *found_addrp)
2787 {
2788 target_ops *target = current_inferior ()->top_target ();
2789
2790 return target->search_memory (start_addr, search_space_len, pattern,
2791 pattern_len, found_addrp);
2792 }
2793
2794 /* Look through the currently pushed targets. If none of them will
2795 be able to restart the currently running process, issue an error
2796 message. */
2797
2798 void
2799 target_require_runnable (void)
2800 {
2801 for (target_ops *t = current_inferior ()->top_target ();
2802 t != NULL;
2803 t = t->beneath ())
2804 {
2805 /* If this target knows how to create a new program, then
2806 assume we will still be able to after killing the current
2807 one. Either killing and mourning will not pop T, or else
2808 find_default_run_target will find it again. */
2809 if (t->can_create_inferior ())
2810 return;
2811
2812 /* Do not worry about targets at certain strata that can not
2813 create inferiors. Assume they will be pushed again if
2814 necessary, and continue to the process_stratum. */
2815 if (t->stratum () > process_stratum)
2816 continue;
2817
2818 error (_("The \"%s\" target does not support \"run\". "
2819 "Try \"help target\" or \"continue\"."),
2820 t->shortname ());
2821 }
2822
2823 /* This function is only called if the target is running. In that
2824 case there should have been a process_stratum target and it
2825 should either know how to create inferiors, or not... */
2826 internal_error (_("No targets found"));
2827 }
2828
2829 /* Whether GDB is allowed to fall back to the default run target for
2830 "run", "attach", etc. when no target is connected yet. */
2831 static bool auto_connect_native_target = true;
2832
2833 static void
2834 show_auto_connect_native_target (struct ui_file *file, int from_tty,
2835 struct cmd_list_element *c, const char *value)
2836 {
2837 gdb_printf (file,
2838 _("Whether GDB may automatically connect to the "
2839 "native target is %s.\n"),
2840 value);
2841 }
2842
2843 /* A pointer to the target that can respond to "run" or "attach".
2844 Native targets are always singletons and instantiated early at GDB
2845 startup. */
2846 static target_ops *the_native_target;
2847
2848 /* See target.h. */
2849
2850 void
2851 set_native_target (target_ops *target)
2852 {
2853 if (the_native_target != NULL)
2854 internal_error (_("native target already set (\"%s\")."),
2855 the_native_target->longname ());
2856
2857 the_native_target = target;
2858 }
2859
2860 /* See target.h. */
2861
2862 target_ops *
2863 get_native_target ()
2864 {
2865 return the_native_target;
2866 }
2867
2868 /* Look through the list of possible targets for a target that can
2869 execute a run or attach command without any other data. This is
2870 used to locate the default process stratum.
2871
2872 If DO_MESG is not NULL, the result is always valid (error() is
2873 called for errors); else, return NULL on error. */
2874
2875 static struct target_ops *
2876 find_default_run_target (const char *do_mesg)
2877 {
2878 if (auto_connect_native_target && the_native_target != NULL)
2879 return the_native_target;
2880
2881 if (do_mesg != NULL)
2882 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2883 return NULL;
2884 }
2885
2886 /* See target.h. */
2887
2888 struct target_ops *
2889 find_attach_target (void)
2890 {
2891 /* If a target on the current stack can attach, use it. */
2892 for (target_ops *t = current_inferior ()->top_target ();
2893 t != NULL;
2894 t = t->beneath ())
2895 {
2896 if (t->can_attach ())
2897 return t;
2898 }
2899
2900 /* Otherwise, use the default run target for attaching. */
2901 return find_default_run_target ("attach");
2902 }
2903
2904 /* See target.h. */
2905
2906 struct target_ops *
2907 find_run_target (void)
2908 {
2909 /* If a target on the current stack can run, use it. */
2910 for (target_ops *t = current_inferior ()->top_target ();
2911 t != NULL;
2912 t = t->beneath ())
2913 {
2914 if (t->can_create_inferior ())
2915 return t;
2916 }
2917
2918 /* Otherwise, use the default run target. */
2919 return find_default_run_target ("run");
2920 }
2921
2922 bool
2923 target_ops::info_proc (const char *args, enum info_proc_what what)
2924 {
2925 return false;
2926 }
2927
2928 /* Implement the "info proc" command. */
2929
2930 int
2931 target_info_proc (const char *args, enum info_proc_what what)
2932 {
2933 struct target_ops *t;
2934
2935 /* If we're already connected to something that can get us OS
2936 related data, use it. Otherwise, try using the native
2937 target. */
2938 t = find_target_at (process_stratum);
2939 if (t == NULL)
2940 t = find_default_run_target (NULL);
2941
2942 for (; t != NULL; t = t->beneath ())
2943 {
2944 if (t->info_proc (args, what))
2945 {
2946 if (targetdebug)
2947 gdb_printf (gdb_stdlog,
2948 "target_info_proc (\"%s\", %d)\n", args, what);
2949
2950 return 1;
2951 }
2952 }
2953
2954 return 0;
2955 }
2956
2957 static int
2958 find_default_supports_disable_randomization (struct target_ops *self)
2959 {
2960 struct target_ops *t;
2961
2962 t = find_default_run_target (NULL);
2963 if (t != NULL)
2964 return t->supports_disable_randomization ();
2965 return 0;
2966 }
2967
2968 int
2969 target_supports_disable_randomization (void)
2970 {
2971 return current_inferior ()->top_target ()->supports_disable_randomization ();
2972 }
2973
2974 /* See target/target.h. */
2975
2976 int
2977 target_supports_multi_process (void)
2978 {
2979 return current_inferior ()->top_target ()->supports_multi_process ();
2980 }
2981
2982 /* See target.h. */
2983
2984 gdb::optional<gdb::char_vector>
2985 target_get_osdata (const char *type)
2986 {
2987 struct target_ops *t;
2988
2989 /* If we're already connected to something that can get us OS
2990 related data, use it. Otherwise, try using the native
2991 target. */
2992 t = find_target_at (process_stratum);
2993 if (t == NULL)
2994 t = find_default_run_target ("get OS data");
2995
2996 if (!t)
2997 return {};
2998
2999 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3000 }
3001
3002 /* Determine the current address space of thread PTID. */
3003
3004 struct address_space *
3005 target_thread_address_space (ptid_t ptid)
3006 {
3007 struct address_space *aspace;
3008
3009 aspace = current_inferior ()->top_target ()->thread_address_space (ptid);
3010 gdb_assert (aspace != NULL);
3011
3012 return aspace;
3013 }
3014
3015 /* See target.h. */
3016
3017 target_ops *
3018 target_ops::beneath () const
3019 {
3020 return current_inferior ()->find_target_beneath (this);
3021 }
3022
3023 void
3024 target_ops::close ()
3025 {
3026 }
3027
3028 bool
3029 target_ops::can_attach ()
3030 {
3031 return 0;
3032 }
3033
3034 void
3035 target_ops::attach (const char *, int)
3036 {
3037 gdb_assert_not_reached ("target_ops::attach called");
3038 }
3039
3040 bool
3041 target_ops::can_create_inferior ()
3042 {
3043 return 0;
3044 }
3045
3046 void
3047 target_ops::create_inferior (const char *, const std::string &,
3048 char **, int)
3049 {
3050 gdb_assert_not_reached ("target_ops::create_inferior called");
3051 }
3052
3053 bool
3054 target_ops::can_run ()
3055 {
3056 return false;
3057 }
3058
3059 int
3060 target_can_run ()
3061 {
3062 for (target_ops *t = current_inferior ()->top_target ();
3063 t != NULL;
3064 t = t->beneath ())
3065 {
3066 if (t->can_run ())
3067 return 1;
3068 }
3069
3070 return 0;
3071 }
3072
3073 /* Target file operations. */
3074
3075 static struct target_ops *
3076 default_fileio_target (void)
3077 {
3078 struct target_ops *t;
3079
3080 /* If we're already connected to something that can perform
3081 file I/O, use it. Otherwise, try using the native target. */
3082 t = find_target_at (process_stratum);
3083 if (t != NULL)
3084 return t;
3085 return find_default_run_target ("file I/O");
3086 }
3087
3088 /* File handle for target file operations. */
3089
3090 struct fileio_fh_t
3091 {
3092 /* The target on which this file is open. NULL if the target is
3093 meanwhile closed while the handle is open. */
3094 target_ops *target;
3095
3096 /* The file descriptor on the target. */
3097 int target_fd;
3098
3099 /* Check whether this fileio_fh_t represents a closed file. */
3100 bool is_closed ()
3101 {
3102 return target_fd < 0;
3103 }
3104 };
3105
3106 /* Vector of currently open file handles. The value returned by
3107 target_fileio_open and passed as the FD argument to other
3108 target_fileio_* functions is an index into this vector. This
3109 vector's entries are never freed; instead, files are marked as
3110 closed, and the handle becomes available for reuse. */
3111 static std::vector<fileio_fh_t> fileio_fhandles;
3112
3113 /* Index into fileio_fhandles of the lowest handle that might be
3114 closed. This permits handle reuse without searching the whole
3115 list each time a new file is opened. */
3116 static int lowest_closed_fd;
3117
3118 /* See target.h. */
3119
3120 void
3121 fileio_handles_invalidate_target (target_ops *targ)
3122 {
3123 for (fileio_fh_t &fh : fileio_fhandles)
3124 if (fh.target == targ)
3125 fh.target = NULL;
3126 }
3127
3128 /* Acquire a target fileio file descriptor. */
3129
3130 static int
3131 acquire_fileio_fd (target_ops *target, int target_fd)
3132 {
3133 /* Search for closed handles to reuse. */
3134 for (; lowest_closed_fd < fileio_fhandles.size (); lowest_closed_fd++)
3135 {
3136 fileio_fh_t &fh = fileio_fhandles[lowest_closed_fd];
3137
3138 if (fh.is_closed ())
3139 break;
3140 }
3141
3142 /* Push a new handle if no closed handles were found. */
3143 if (lowest_closed_fd == fileio_fhandles.size ())
3144 fileio_fhandles.push_back (fileio_fh_t {target, target_fd});
3145 else
3146 fileio_fhandles[lowest_closed_fd] = {target, target_fd};
3147
3148 /* Should no longer be marked closed. */
3149 gdb_assert (!fileio_fhandles[lowest_closed_fd].is_closed ());
3150
3151 /* Return its index, and start the next lookup at
3152 the next index. */
3153 return lowest_closed_fd++;
3154 }
3155
3156 /* Release a target fileio file descriptor. */
3157
3158 static void
3159 release_fileio_fd (int fd, fileio_fh_t *fh)
3160 {
3161 fh->target_fd = -1;
3162 lowest_closed_fd = std::min (lowest_closed_fd, fd);
3163 }
3164
3165 /* Return a pointer to the fileio_fhandle_t corresponding to FD. */
3166
3167 static fileio_fh_t *
3168 fileio_fd_to_fh (int fd)
3169 {
3170 return &fileio_fhandles[fd];
3171 }
3172
3173
3174 /* Default implementations of file i/o methods. We don't want these
3175 to delegate automatically, because we need to know which target
3176 supported the method, in order to call it directly from within
3177 pread/pwrite, etc. */
3178
3179 int
3180 target_ops::fileio_open (struct inferior *inf, const char *filename,
3181 int flags, int mode, int warn_if_slow,
3182 fileio_error *target_errno)
3183 {
3184 *target_errno = FILEIO_ENOSYS;
3185 return -1;
3186 }
3187
3188 int
3189 target_ops::fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3190 ULONGEST offset, fileio_error *target_errno)
3191 {
3192 *target_errno = FILEIO_ENOSYS;
3193 return -1;
3194 }
3195
3196 int
3197 target_ops::fileio_pread (int fd, gdb_byte *read_buf, int len,
3198 ULONGEST offset, fileio_error *target_errno)
3199 {
3200 *target_errno = FILEIO_ENOSYS;
3201 return -1;
3202 }
3203
3204 int
3205 target_ops::fileio_fstat (int fd, struct stat *sb, fileio_error *target_errno)
3206 {
3207 *target_errno = FILEIO_ENOSYS;
3208 return -1;
3209 }
3210
3211 int
3212 target_ops::fileio_close (int fd, fileio_error *target_errno)
3213 {
3214 *target_errno = FILEIO_ENOSYS;
3215 return -1;
3216 }
3217
3218 int
3219 target_ops::fileio_unlink (struct inferior *inf, const char *filename,
3220 fileio_error *target_errno)
3221 {
3222 *target_errno = FILEIO_ENOSYS;
3223 return -1;
3224 }
3225
3226 gdb::optional<std::string>
3227 target_ops::fileio_readlink (struct inferior *inf, const char *filename,
3228 fileio_error *target_errno)
3229 {
3230 *target_errno = FILEIO_ENOSYS;
3231 return {};
3232 }
3233
3234 /* See target.h. */
3235
3236 int
3237 target_fileio_open (struct inferior *inf, const char *filename,
3238 int flags, int mode, bool warn_if_slow, fileio_error *target_errno)
3239 {
3240 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3241 {
3242 int fd = t->fileio_open (inf, filename, flags, mode,
3243 warn_if_slow, target_errno);
3244
3245 if (fd == -1 && *target_errno == FILEIO_ENOSYS)
3246 continue;
3247
3248 if (fd < 0)
3249 fd = -1;
3250 else
3251 fd = acquire_fileio_fd (t, fd);
3252
3253 if (targetdebug)
3254 gdb_printf (gdb_stdlog,
3255 "target_fileio_open (%d,%s,0x%x,0%o,%d)"
3256 " = %d (%d)\n",
3257 inf == NULL ? 0 : inf->num,
3258 filename, flags, mode,
3259 warn_if_slow, fd,
3260 fd != -1 ? 0 : *target_errno);
3261 return fd;
3262 }
3263
3264 *target_errno = FILEIO_ENOSYS;
3265 return -1;
3266 }
3267
3268 /* See target.h. */
3269
3270 int
3271 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3272 ULONGEST offset, fileio_error *target_errno)
3273 {
3274 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3275 int ret = -1;
3276
3277 if (fh->is_closed ())
3278 *target_errno = FILEIO_EBADF;
3279 else if (fh->target == NULL)
3280 *target_errno = FILEIO_EIO;
3281 else
3282 ret = fh->target->fileio_pwrite (fh->target_fd, write_buf,
3283 len, offset, target_errno);
3284
3285 if (targetdebug)
3286 gdb_printf (gdb_stdlog,
3287 "target_fileio_pwrite (%d,...,%d,%s) "
3288 "= %d (%d)\n",
3289 fd, len, pulongest (offset),
3290 ret, ret != -1 ? 0 : *target_errno);
3291 return ret;
3292 }
3293
3294 /* See target.h. */
3295
3296 int
3297 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3298 ULONGEST offset, fileio_error *target_errno)
3299 {
3300 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3301 int ret = -1;
3302
3303 if (fh->is_closed ())
3304 *target_errno = FILEIO_EBADF;
3305 else if (fh->target == NULL)
3306 *target_errno = FILEIO_EIO;
3307 else
3308 ret = fh->target->fileio_pread (fh->target_fd, read_buf,
3309 len, offset, target_errno);
3310
3311 if (targetdebug)
3312 gdb_printf (gdb_stdlog,
3313 "target_fileio_pread (%d,...,%d,%s) "
3314 "= %d (%d)\n",
3315 fd, len, pulongest (offset),
3316 ret, ret != -1 ? 0 : *target_errno);
3317 return ret;
3318 }
3319
3320 /* See target.h. */
3321
3322 int
3323 target_fileio_fstat (int fd, struct stat *sb, fileio_error *target_errno)
3324 {
3325 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3326 int ret = -1;
3327
3328 if (fh->is_closed ())
3329 *target_errno = FILEIO_EBADF;
3330 else if (fh->target == NULL)
3331 *target_errno = FILEIO_EIO;
3332 else
3333 ret = fh->target->fileio_fstat (fh->target_fd, sb, target_errno);
3334
3335 if (targetdebug)
3336 gdb_printf (gdb_stdlog,
3337 "target_fileio_fstat (%d) = %d (%d)\n",
3338 fd, ret, ret != -1 ? 0 : *target_errno);
3339 return ret;
3340 }
3341
3342 /* See target.h. */
3343
3344 int
3345 target_fileio_close (int fd, fileio_error *target_errno)
3346 {
3347 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3348 int ret = -1;
3349
3350 if (fh->is_closed ())
3351 *target_errno = FILEIO_EBADF;
3352 else
3353 {
3354 if (fh->target != NULL)
3355 ret = fh->target->fileio_close (fh->target_fd,
3356 target_errno);
3357 else
3358 ret = 0;
3359 release_fileio_fd (fd, fh);
3360 }
3361
3362 if (targetdebug)
3363 gdb_printf (gdb_stdlog,
3364 "target_fileio_close (%d) = %d (%d)\n",
3365 fd, ret, ret != -1 ? 0 : *target_errno);
3366 return ret;
3367 }
3368
3369 /* See target.h. */
3370
3371 int
3372 target_fileio_unlink (struct inferior *inf, const char *filename,
3373 fileio_error *target_errno)
3374 {
3375 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3376 {
3377 int ret = t->fileio_unlink (inf, filename, target_errno);
3378
3379 if (ret == -1 && *target_errno == FILEIO_ENOSYS)
3380 continue;
3381
3382 if (targetdebug)
3383 gdb_printf (gdb_stdlog,
3384 "target_fileio_unlink (%d,%s)"
3385 " = %d (%d)\n",
3386 inf == NULL ? 0 : inf->num, filename,
3387 ret, ret != -1 ? 0 : *target_errno);
3388 return ret;
3389 }
3390
3391 *target_errno = FILEIO_ENOSYS;
3392 return -1;
3393 }
3394
3395 /* See target.h. */
3396
3397 gdb::optional<std::string>
3398 target_fileio_readlink (struct inferior *inf, const char *filename,
3399 fileio_error *target_errno)
3400 {
3401 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3402 {
3403 gdb::optional<std::string> ret
3404 = t->fileio_readlink (inf, filename, target_errno);
3405
3406 if (!ret.has_value () && *target_errno == FILEIO_ENOSYS)
3407 continue;
3408
3409 if (targetdebug)
3410 gdb_printf (gdb_stdlog,
3411 "target_fileio_readlink (%d,%s)"
3412 " = %s (%d)\n",
3413 inf == NULL ? 0 : inf->num,
3414 filename, ret ? ret->c_str () : "(nil)",
3415 ret ? 0 : *target_errno);
3416 return ret;
3417 }
3418
3419 *target_errno = FILEIO_ENOSYS;
3420 return {};
3421 }
3422
3423 /* Like scoped_fd, but specific to target fileio. */
3424
3425 class scoped_target_fd
3426 {
3427 public:
3428 explicit scoped_target_fd (int fd) noexcept
3429 : m_fd (fd)
3430 {
3431 }
3432
3433 ~scoped_target_fd ()
3434 {
3435 if (m_fd >= 0)
3436 {
3437 fileio_error target_errno;
3438
3439 target_fileio_close (m_fd, &target_errno);
3440 }
3441 }
3442
3443 DISABLE_COPY_AND_ASSIGN (scoped_target_fd);
3444
3445 int get () const noexcept
3446 {
3447 return m_fd;
3448 }
3449
3450 private:
3451 int m_fd;
3452 };
3453
3454 /* Read target file FILENAME, in the filesystem as seen by INF. If
3455 INF is NULL, use the filesystem seen by the debugger (GDB or, for
3456 remote targets, the remote stub). Store the result in *BUF_P and
3457 return the size of the transferred data. PADDING additional bytes
3458 are available in *BUF_P. This is a helper function for
3459 target_fileio_read_alloc; see the declaration of that function for
3460 more information. */
3461
3462 static LONGEST
3463 target_fileio_read_alloc_1 (struct inferior *inf, const char *filename,
3464 gdb_byte **buf_p, int padding)
3465 {
3466 size_t buf_alloc, buf_pos;
3467 gdb_byte *buf;
3468 LONGEST n;
3469 fileio_error target_errno;
3470
3471 scoped_target_fd fd (target_fileio_open (inf, filename, FILEIO_O_RDONLY,
3472 0700, false, &target_errno));
3473 if (fd.get () == -1)
3474 return -1;
3475
3476 /* Start by reading up to 4K at a time. The target will throttle
3477 this number down if necessary. */
3478 buf_alloc = 4096;
3479 buf = (gdb_byte *) xmalloc (buf_alloc);
3480 buf_pos = 0;
3481 while (1)
3482 {
3483 n = target_fileio_pread (fd.get (), &buf[buf_pos],
3484 buf_alloc - buf_pos - padding, buf_pos,
3485 &target_errno);
3486 if (n < 0)
3487 {
3488 /* An error occurred. */
3489 xfree (buf);
3490 return -1;
3491 }
3492 else if (n == 0)
3493 {
3494 /* Read all there was. */
3495 if (buf_pos == 0)
3496 xfree (buf);
3497 else
3498 *buf_p = buf;
3499 return buf_pos;
3500 }
3501
3502 buf_pos += n;
3503
3504 /* If the buffer is filling up, expand it. */
3505 if (buf_alloc < buf_pos * 2)
3506 {
3507 buf_alloc *= 2;
3508 buf = (gdb_byte *) xrealloc (buf, buf_alloc);
3509 }
3510
3511 QUIT;
3512 }
3513 }
3514
3515 /* See target.h. */
3516
3517 LONGEST
3518 target_fileio_read_alloc (struct inferior *inf, const char *filename,
3519 gdb_byte **buf_p)
3520 {
3521 return target_fileio_read_alloc_1 (inf, filename, buf_p, 0);
3522 }
3523
3524 /* See target.h. */
3525
3526 gdb::unique_xmalloc_ptr<char>
3527 target_fileio_read_stralloc (struct inferior *inf, const char *filename)
3528 {
3529 gdb_byte *buffer;
3530 char *bufstr;
3531 LONGEST i, transferred;
3532
3533 transferred = target_fileio_read_alloc_1 (inf, filename, &buffer, 1);
3534 bufstr = (char *) buffer;
3535
3536 if (transferred < 0)
3537 return gdb::unique_xmalloc_ptr<char> (nullptr);
3538
3539 if (transferred == 0)
3540 return make_unique_xstrdup ("");
3541
3542 bufstr[transferred] = 0;
3543
3544 /* Check for embedded NUL bytes; but allow trailing NULs. */
3545 for (i = strlen (bufstr); i < transferred; i++)
3546 if (bufstr[i] != 0)
3547 {
3548 warning (_("target file %s "
3549 "contained unexpected null characters"),
3550 filename);
3551 break;
3552 }
3553
3554 return gdb::unique_xmalloc_ptr<char> (bufstr);
3555 }
3556
3557
3558 static int
3559 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3560 CORE_ADDR addr, int len)
3561 {
3562 gdbarch *arch = current_inferior ()->arch ();
3563 return (len <= gdbarch_ptr_bit (arch) / TARGET_CHAR_BIT);
3564 }
3565
3566 static int
3567 default_watchpoint_addr_within_range (struct target_ops *target,
3568 CORE_ADDR addr,
3569 CORE_ADDR start, int length)
3570 {
3571 return addr >= start && addr < start + length;
3572 }
3573
3574 /* See target.h. */
3575
3576 target_ops *
3577 target_stack::find_beneath (const target_ops *t) const
3578 {
3579 /* Look for a non-empty slot at stratum levels beneath T's. */
3580 for (int stratum = t->stratum () - 1; stratum >= 0; --stratum)
3581 if (m_stack[stratum].get () != NULL)
3582 return m_stack[stratum].get ();
3583
3584 return NULL;
3585 }
3586
3587 /* See target.h. */
3588
3589 struct target_ops *
3590 find_target_at (enum strata stratum)
3591 {
3592 return current_inferior ()->target_at (stratum);
3593 }
3594
3595 \f
3596
3597 /* See target.h */
3598
3599 void
3600 target_announce_detach (int from_tty)
3601 {
3602 pid_t pid;
3603 const char *exec_file;
3604
3605 if (!from_tty)
3606 return;
3607
3608 pid = inferior_ptid.pid ();
3609 exec_file = get_exec_file (0);
3610 if (exec_file == nullptr)
3611 gdb_printf ("Detaching from pid %s\n",
3612 target_pid_to_str (ptid_t (pid)).c_str ());
3613 else
3614 gdb_printf (_("Detaching from program: %s, %s\n"), exec_file,
3615 target_pid_to_str (ptid_t (pid)).c_str ());
3616 }
3617
3618 /* See target.h */
3619
3620 void
3621 target_announce_attach (int from_tty, int pid)
3622 {
3623 if (!from_tty)
3624 return;
3625
3626 const char *exec_file = get_exec_file (0);
3627
3628 if (exec_file != nullptr)
3629 gdb_printf ("Attaching to program: %s, %s\n", exec_file,
3630 target_pid_to_str (ptid_t (pid)).c_str ());
3631 else
3632 gdb_printf ("Attaching to %s\n",
3633 target_pid_to_str (ptid_t (pid)).c_str ());
3634 }
3635
3636 /* The inferior process has died. Long live the inferior! */
3637
3638 void
3639 generic_mourn_inferior (void)
3640 {
3641 inferior *inf = current_inferior ();
3642
3643 switch_to_no_thread ();
3644
3645 /* Mark breakpoints uninserted in case something tries to delete a
3646 breakpoint while we delete the inferior's threads (which would
3647 fail, since the inferior is long gone). */
3648 mark_breakpoints_out ();
3649
3650 if (inf->pid != 0)
3651 exit_inferior (inf);
3652
3653 /* Note this wipes step-resume breakpoints, so needs to be done
3654 after exit_inferior, which ends up referencing the step-resume
3655 breakpoints through clear_thread_inferior_resources. */
3656 breakpoint_init_inferior (inf_exited);
3657
3658 registers_changed ();
3659
3660 reopen_exec_file ();
3661 reinit_frame_cache ();
3662
3663 if (deprecated_detach_hook)
3664 deprecated_detach_hook ();
3665 }
3666 \f
3667 /* Convert a normal process ID to a string. Returns the string in a
3668 static buffer. */
3669
3670 std::string
3671 normal_pid_to_str (ptid_t ptid)
3672 {
3673 return string_printf ("process %d", ptid.pid ());
3674 }
3675
3676 static std::string
3677 default_pid_to_str (struct target_ops *ops, ptid_t ptid)
3678 {
3679 return normal_pid_to_str (ptid);
3680 }
3681
3682 /* Error-catcher for target_find_memory_regions. */
3683 static int
3684 dummy_find_memory_regions (struct target_ops *self,
3685 find_memory_region_ftype ignore1, void *ignore2)
3686 {
3687 error (_("Command not implemented for this target."));
3688 return 0;
3689 }
3690
3691 /* Error-catcher for target_make_corefile_notes. */
3692 static gdb::unique_xmalloc_ptr<char>
3693 dummy_make_corefile_notes (struct target_ops *self,
3694 bfd *ignore1, int *ignore2)
3695 {
3696 error (_("Command not implemented for this target."));
3697 return NULL;
3698 }
3699
3700 #include "target-delegates.c"
3701
3702 /* The initial current target, so that there is always a semi-valid
3703 current target. */
3704
3705 static dummy_target the_dummy_target;
3706
3707 /* See target.h. */
3708
3709 target_ops *
3710 get_dummy_target ()
3711 {
3712 return &the_dummy_target;
3713 }
3714
3715 static const target_info dummy_target_info = {
3716 "None",
3717 N_("None"),
3718 ""
3719 };
3720
3721 strata
3722 dummy_target::stratum () const
3723 {
3724 return dummy_stratum;
3725 }
3726
3727 strata
3728 debug_target::stratum () const
3729 {
3730 return debug_stratum;
3731 }
3732
3733 const target_info &
3734 dummy_target::info () const
3735 {
3736 return dummy_target_info;
3737 }
3738
3739 const target_info &
3740 debug_target::info () const
3741 {
3742 return beneath ()->info ();
3743 }
3744
3745 \f
3746
3747 int
3748 target_thread_alive (ptid_t ptid)
3749 {
3750 return current_inferior ()->top_target ()->thread_alive (ptid);
3751 }
3752
3753 void
3754 target_update_thread_list (void)
3755 {
3756 current_inferior ()->top_target ()->update_thread_list ();
3757 }
3758
3759 void
3760 target_stop (ptid_t ptid)
3761 {
3762 process_stratum_target *proc_target = current_inferior ()->process_target ();
3763
3764 gdb_assert (!proc_target->commit_resumed_state);
3765
3766 if (!may_stop)
3767 {
3768 warning (_("May not interrupt or stop the target, ignoring attempt"));
3769 return;
3770 }
3771
3772 current_inferior ()->top_target ()->stop (ptid);
3773 }
3774
3775 void
3776 target_interrupt ()
3777 {
3778 if (!may_stop)
3779 {
3780 warning (_("May not interrupt or stop the target, ignoring attempt"));
3781 return;
3782 }
3783
3784 current_inferior ()->top_target ()->interrupt ();
3785 }
3786
3787 /* See target.h. */
3788
3789 void
3790 target_pass_ctrlc (void)
3791 {
3792 /* Pass the Ctrl-C to the first target that has a thread
3793 running. */
3794 for (inferior *inf : all_inferiors ())
3795 {
3796 target_ops *proc_target = inf->process_target ();
3797 if (proc_target == NULL)
3798 continue;
3799
3800 for (thread_info *thr : inf->non_exited_threads ())
3801 {
3802 /* A thread can be THREAD_STOPPED and executing, while
3803 running an infcall. */
3804 if (thr->state == THREAD_RUNNING || thr->executing ())
3805 {
3806 /* We can get here quite deep in target layers. Avoid
3807 switching thread context or anything that would
3808 communicate with the target (e.g., to fetch
3809 registers), or flushing e.g., the frame cache. We
3810 just switch inferior in order to be able to call
3811 through the target_stack. */
3812 scoped_restore_current_inferior restore_inferior;
3813 set_current_inferior (inf);
3814 current_inferior ()->top_target ()->pass_ctrlc ();
3815 return;
3816 }
3817 }
3818 }
3819 }
3820
3821 /* See target.h. */
3822
3823 void
3824 default_target_pass_ctrlc (struct target_ops *ops)
3825 {
3826 target_interrupt ();
3827 }
3828
3829 /* See target/target.h. */
3830
3831 void
3832 target_stop_and_wait (ptid_t ptid)
3833 {
3834 struct target_waitstatus status;
3835 bool was_non_stop = non_stop;
3836
3837 non_stop = true;
3838 target_stop (ptid);
3839
3840 target_wait (ptid, &status, 0);
3841
3842 non_stop = was_non_stop;
3843 }
3844
3845 /* See target/target.h. */
3846
3847 void
3848 target_continue_no_signal (ptid_t ptid)
3849 {
3850 target_resume (ptid, 0, GDB_SIGNAL_0);
3851 }
3852
3853 /* See target/target.h. */
3854
3855 void
3856 target_continue (ptid_t ptid, enum gdb_signal signal)
3857 {
3858 target_resume (ptid, 0, signal);
3859 }
3860
3861 /* Concatenate ELEM to LIST, a comma-separated list. */
3862
3863 static void
3864 str_comma_list_concat_elem (std::string *list, const char *elem)
3865 {
3866 if (!list->empty ())
3867 list->append (", ");
3868
3869 list->append (elem);
3870 }
3871
3872 /* Helper for target_options_to_string. If OPT is present in
3873 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3874 OPT is removed from TARGET_OPTIONS. */
3875
3876 static void
3877 do_option (target_wait_flags *target_options, std::string *ret,
3878 target_wait_flag opt, const char *opt_str)
3879 {
3880 if ((*target_options & opt) != 0)
3881 {
3882 str_comma_list_concat_elem (ret, opt_str);
3883 *target_options &= ~opt;
3884 }
3885 }
3886
3887 /* See target.h. */
3888
3889 std::string
3890 target_options_to_string (target_wait_flags target_options)
3891 {
3892 std::string ret;
3893
3894 #define DO_TARG_OPTION(OPT) \
3895 do_option (&target_options, &ret, OPT, #OPT)
3896
3897 DO_TARG_OPTION (TARGET_WNOHANG);
3898
3899 if (target_options != 0)
3900 str_comma_list_concat_elem (&ret, "unknown???");
3901
3902 return ret;
3903 }
3904
3905 void
3906 target_fetch_registers (struct regcache *regcache, int regno)
3907 {
3908 current_inferior ()->top_target ()->fetch_registers (regcache, regno);
3909 if (targetdebug)
3910 regcache->debug_print_register ("target_fetch_registers", regno);
3911 }
3912
3913 void
3914 target_store_registers (struct regcache *regcache, int regno)
3915 {
3916 if (!may_write_registers)
3917 error (_("Writing to registers is not allowed (regno %d)"), regno);
3918
3919 current_inferior ()->top_target ()->store_registers (regcache, regno);
3920 if (targetdebug)
3921 {
3922 regcache->debug_print_register ("target_store_registers", regno);
3923 }
3924 }
3925
3926 int
3927 target_core_of_thread (ptid_t ptid)
3928 {
3929 return current_inferior ()->top_target ()->core_of_thread (ptid);
3930 }
3931
3932 int
3933 simple_verify_memory (struct target_ops *ops,
3934 const gdb_byte *data, CORE_ADDR lma, ULONGEST size)
3935 {
3936 LONGEST total_xfered = 0;
3937
3938 while (total_xfered < size)
3939 {
3940 ULONGEST xfered_len;
3941 enum target_xfer_status status;
3942 gdb_byte buf[1024];
3943 ULONGEST howmuch = std::min<ULONGEST> (sizeof (buf), size - total_xfered);
3944
3945 status = target_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
3946 buf, NULL, lma + total_xfered, howmuch,
3947 &xfered_len);
3948 if (status == TARGET_XFER_OK
3949 && memcmp (data + total_xfered, buf, xfered_len) == 0)
3950 {
3951 total_xfered += xfered_len;
3952 QUIT;
3953 }
3954 else
3955 return 0;
3956 }
3957 return 1;
3958 }
3959
3960 /* Default implementation of memory verification. */
3961
3962 static int
3963 default_verify_memory (struct target_ops *self,
3964 const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3965 {
3966 /* Start over from the top of the target stack. */
3967 return simple_verify_memory (current_inferior ()->top_target (),
3968 data, memaddr, size);
3969 }
3970
3971 int
3972 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3973 {
3974 target_ops *target = current_inferior ()->top_target ();
3975
3976 return target->verify_memory (data, memaddr, size);
3977 }
3978
3979 /* The documentation for this function is in its prototype declaration in
3980 target.h. */
3981
3982 int
3983 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask,
3984 enum target_hw_bp_type rw)
3985 {
3986 target_ops *target = current_inferior ()->top_target ();
3987
3988 return target->insert_mask_watchpoint (addr, mask, rw);
3989 }
3990
3991 /* The documentation for this function is in its prototype declaration in
3992 target.h. */
3993
3994 int
3995 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask,
3996 enum target_hw_bp_type rw)
3997 {
3998 target_ops *target = current_inferior ()->top_target ();
3999
4000 return target->remove_mask_watchpoint (addr, mask, rw);
4001 }
4002
4003 /* The documentation for this function is in its prototype declaration
4004 in target.h. */
4005
4006 int
4007 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4008 {
4009 target_ops *target = current_inferior ()->top_target ();
4010
4011 return target->masked_watch_num_registers (addr, mask);
4012 }
4013
4014 /* The documentation for this function is in its prototype declaration
4015 in target.h. */
4016
4017 int
4018 target_ranged_break_num_registers (void)
4019 {
4020 return current_inferior ()->top_target ()->ranged_break_num_registers ();
4021 }
4022
4023 /* See target.h. */
4024
4025 struct btrace_target_info *
4026 target_enable_btrace (thread_info *tp, const struct btrace_config *conf)
4027 {
4028 return current_inferior ()->top_target ()->enable_btrace (tp, conf);
4029 }
4030
4031 /* See target.h. */
4032
4033 void
4034 target_disable_btrace (struct btrace_target_info *btinfo)
4035 {
4036 current_inferior ()->top_target ()->disable_btrace (btinfo);
4037 }
4038
4039 /* See target.h. */
4040
4041 void
4042 target_teardown_btrace (struct btrace_target_info *btinfo)
4043 {
4044 current_inferior ()->top_target ()->teardown_btrace (btinfo);
4045 }
4046
4047 /* See target.h. */
4048
4049 enum btrace_error
4050 target_read_btrace (struct btrace_data *btrace,
4051 struct btrace_target_info *btinfo,
4052 enum btrace_read_type type)
4053 {
4054 target_ops *target = current_inferior ()->top_target ();
4055
4056 return target->read_btrace (btrace, btinfo, type);
4057 }
4058
4059 /* See target.h. */
4060
4061 const struct btrace_config *
4062 target_btrace_conf (const struct btrace_target_info *btinfo)
4063 {
4064 return current_inferior ()->top_target ()->btrace_conf (btinfo);
4065 }
4066
4067 /* See target.h. */
4068
4069 void
4070 target_stop_recording (void)
4071 {
4072 current_inferior ()->top_target ()->stop_recording ();
4073 }
4074
4075 /* See target.h. */
4076
4077 void
4078 target_save_record (const char *filename)
4079 {
4080 current_inferior ()->top_target ()->save_record (filename);
4081 }
4082
4083 /* See target.h. */
4084
4085 int
4086 target_supports_delete_record ()
4087 {
4088 return current_inferior ()->top_target ()->supports_delete_record ();
4089 }
4090
4091 /* See target.h. */
4092
4093 void
4094 target_delete_record (void)
4095 {
4096 current_inferior ()->top_target ()->delete_record ();
4097 }
4098
4099 /* See target.h. */
4100
4101 enum record_method
4102 target_record_method (ptid_t ptid)
4103 {
4104 return current_inferior ()->top_target ()->record_method (ptid);
4105 }
4106
4107 /* See target.h. */
4108
4109 int
4110 target_record_is_replaying (ptid_t ptid)
4111 {
4112 return current_inferior ()->top_target ()->record_is_replaying (ptid);
4113 }
4114
4115 /* See target.h. */
4116
4117 int
4118 target_record_will_replay (ptid_t ptid, int dir)
4119 {
4120 return current_inferior ()->top_target ()->record_will_replay (ptid, dir);
4121 }
4122
4123 /* See target.h. */
4124
4125 void
4126 target_record_stop_replaying (void)
4127 {
4128 current_inferior ()->top_target ()->record_stop_replaying ();
4129 }
4130
4131 /* See target.h. */
4132
4133 void
4134 target_goto_record_begin (void)
4135 {
4136 current_inferior ()->top_target ()->goto_record_begin ();
4137 }
4138
4139 /* See target.h. */
4140
4141 void
4142 target_goto_record_end (void)
4143 {
4144 current_inferior ()->top_target ()->goto_record_end ();
4145 }
4146
4147 /* See target.h. */
4148
4149 void
4150 target_goto_record (ULONGEST insn)
4151 {
4152 current_inferior ()->top_target ()->goto_record (insn);
4153 }
4154
4155 /* See target.h. */
4156
4157 void
4158 target_insn_history (int size, gdb_disassembly_flags flags)
4159 {
4160 current_inferior ()->top_target ()->insn_history (size, flags);
4161 }
4162
4163 /* See target.h. */
4164
4165 void
4166 target_insn_history_from (ULONGEST from, int size,
4167 gdb_disassembly_flags flags)
4168 {
4169 current_inferior ()->top_target ()->insn_history_from (from, size, flags);
4170 }
4171
4172 /* See target.h. */
4173
4174 void
4175 target_insn_history_range (ULONGEST begin, ULONGEST end,
4176 gdb_disassembly_flags flags)
4177 {
4178 current_inferior ()->top_target ()->insn_history_range (begin, end, flags);
4179 }
4180
4181 /* See target.h. */
4182
4183 void
4184 target_call_history (int size, record_print_flags flags)
4185 {
4186 current_inferior ()->top_target ()->call_history (size, flags);
4187 }
4188
4189 /* See target.h. */
4190
4191 void
4192 target_call_history_from (ULONGEST begin, int size, record_print_flags flags)
4193 {
4194 current_inferior ()->top_target ()->call_history_from (begin, size, flags);
4195 }
4196
4197 /* See target.h. */
4198
4199 void
4200 target_call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
4201 {
4202 current_inferior ()->top_target ()->call_history_range (begin, end, flags);
4203 }
4204
4205 /* See target.h. */
4206
4207 const struct frame_unwind *
4208 target_get_unwinder (void)
4209 {
4210 return current_inferior ()->top_target ()->get_unwinder ();
4211 }
4212
4213 /* See target.h. */
4214
4215 const struct frame_unwind *
4216 target_get_tailcall_unwinder (void)
4217 {
4218 return current_inferior ()->top_target ()->get_tailcall_unwinder ();
4219 }
4220
4221 /* See target.h. */
4222
4223 void
4224 target_prepare_to_generate_core (void)
4225 {
4226 current_inferior ()->top_target ()->prepare_to_generate_core ();
4227 }
4228
4229 /* See target.h. */
4230
4231 void
4232 target_done_generating_core (void)
4233 {
4234 current_inferior ()->top_target ()->done_generating_core ();
4235 }
4236
4237 \f
4238
4239 static char targ_desc[] =
4240 "Names of targets and files being debugged.\nShows the entire \
4241 stack of targets currently in use (including the exec-file,\n\
4242 core-file, and process, if any), as well as the symbol file name.";
4243
4244 static void
4245 default_rcmd (struct target_ops *self, const char *command,
4246 struct ui_file *output)
4247 {
4248 error (_("\"monitor\" command not supported by this target."));
4249 }
4250
4251 static void
4252 do_monitor_command (const char *cmd, int from_tty)
4253 {
4254 target_rcmd (cmd, gdb_stdtarg);
4255 }
4256
4257 /* Erases all the memory regions marked as flash. CMD and FROM_TTY are
4258 ignored. */
4259
4260 void
4261 flash_erase_command (const char *cmd, int from_tty)
4262 {
4263 /* Used to communicate termination of flash operations to the target. */
4264 bool found_flash_region = false;
4265 gdbarch *gdbarch = current_inferior ()->arch ();
4266
4267 std::vector<mem_region> mem_regions = target_memory_map ();
4268
4269 /* Iterate over all memory regions. */
4270 for (const mem_region &m : mem_regions)
4271 {
4272 /* Is this a flash memory region? */
4273 if (m.attrib.mode == MEM_FLASH)
4274 {
4275 found_flash_region = true;
4276 target_flash_erase (m.lo, m.hi - m.lo);
4277
4278 ui_out_emit_tuple tuple_emitter (current_uiout, "erased-regions");
4279
4280 current_uiout->message (_("Erasing flash memory region at address "));
4281 current_uiout->field_core_addr ("address", gdbarch, m.lo);
4282 current_uiout->message (", size = ");
4283 current_uiout->field_string ("size", hex_string (m.hi - m.lo));
4284 current_uiout->message ("\n");
4285 }
4286 }
4287
4288 /* Did we do any flash operations? If so, we need to finalize them. */
4289 if (found_flash_region)
4290 target_flash_done ();
4291 else
4292 current_uiout->message (_("No flash memory regions found.\n"));
4293 }
4294
4295 /* Print the name of each layers of our target stack. */
4296
4297 static void
4298 maintenance_print_target_stack (const char *cmd, int from_tty)
4299 {
4300 gdb_printf (_("The current target stack is:\n"));
4301
4302 for (target_ops *t = current_inferior ()->top_target ();
4303 t != NULL;
4304 t = t->beneath ())
4305 {
4306 if (t->stratum () == debug_stratum)
4307 continue;
4308 gdb_printf (" - %s (%s)\n", t->shortname (), t->longname ());
4309 }
4310 }
4311
4312 /* See target.h. */
4313
4314 void
4315 target_async (bool enable)
4316 {
4317 /* If we are trying to enable async mode then it must be the case that
4318 async mode is possible for this target. */
4319 gdb_assert (!enable || target_can_async_p ());
4320 infrun_async (enable);
4321 current_inferior ()->top_target ()->async (enable);
4322 }
4323
4324 /* See target.h. */
4325
4326 void
4327 target_thread_events (int enable)
4328 {
4329 current_inferior ()->top_target ()->thread_events (enable);
4330 }
4331
4332 /* Controls if targets can report that they can/are async. This is
4333 just for maintainers to use when debugging gdb. */
4334 bool target_async_permitted = true;
4335
4336 static void
4337 set_maint_target_async (bool permitted)
4338 {
4339 if (have_live_inferiors ())
4340 error (_("Cannot change this setting while the inferior is running."));
4341
4342 target_async_permitted = permitted;
4343 }
4344
4345 static bool
4346 get_maint_target_async ()
4347 {
4348 return target_async_permitted;
4349 }
4350
4351 static void
4352 show_maint_target_async (ui_file *file, int from_tty,
4353 cmd_list_element *c, const char *value)
4354 {
4355 gdb_printf (file,
4356 _("Controlling the inferior in "
4357 "asynchronous mode is %s.\n"), value);
4358 }
4359
4360 /* Return true if the target operates in non-stop mode even with "set
4361 non-stop off". */
4362
4363 static int
4364 target_always_non_stop_p (void)
4365 {
4366 return current_inferior ()->top_target ()->always_non_stop_p ();
4367 }
4368
4369 /* See target.h. */
4370
4371 bool
4372 target_is_non_stop_p ()
4373 {
4374 return ((non_stop
4375 || target_non_stop_enabled == AUTO_BOOLEAN_TRUE
4376 || (target_non_stop_enabled == AUTO_BOOLEAN_AUTO
4377 && target_always_non_stop_p ()))
4378 && target_can_async_p ());
4379 }
4380
4381 /* See target.h. */
4382
4383 bool
4384 exists_non_stop_target ()
4385 {
4386 if (target_is_non_stop_p ())
4387 return true;
4388
4389 scoped_restore_current_thread restore_thread;
4390
4391 for (inferior *inf : all_inferiors ())
4392 {
4393 switch_to_inferior_no_thread (inf);
4394 if (target_is_non_stop_p ())
4395 return true;
4396 }
4397
4398 return false;
4399 }
4400
4401 /* Controls if targets can report that they always run in non-stop
4402 mode. This is just for maintainers to use when debugging gdb. */
4403 enum auto_boolean target_non_stop_enabled = AUTO_BOOLEAN_AUTO;
4404
4405 /* Set callback for maint target-non-stop setting. */
4406
4407 static void
4408 set_maint_target_non_stop (auto_boolean enabled)
4409 {
4410 if (have_live_inferiors ())
4411 error (_("Cannot change this setting while the inferior is running."));
4412
4413 target_non_stop_enabled = enabled;
4414 }
4415
4416 /* Get callback for maint target-non-stop setting. */
4417
4418 static auto_boolean
4419 get_maint_target_non_stop ()
4420 {
4421 return target_non_stop_enabled;
4422 }
4423
4424 static void
4425 show_maint_target_non_stop (ui_file *file, int from_tty,
4426 cmd_list_element *c, const char *value)
4427 {
4428 if (target_non_stop_enabled == AUTO_BOOLEAN_AUTO)
4429 gdb_printf (file,
4430 _("Whether the target is always in non-stop mode "
4431 "is %s (currently %s).\n"), value,
4432 target_always_non_stop_p () ? "on" : "off");
4433 else
4434 gdb_printf (file,
4435 _("Whether the target is always in non-stop mode "
4436 "is %s.\n"), value);
4437 }
4438
4439 /* Temporary copies of permission settings. */
4440
4441 static bool may_write_registers_1 = true;
4442 static bool may_write_memory_1 = true;
4443 static bool may_insert_breakpoints_1 = true;
4444 static bool may_insert_tracepoints_1 = true;
4445 static bool may_insert_fast_tracepoints_1 = true;
4446 static bool may_stop_1 = true;
4447
4448 /* Make the user-set values match the real values again. */
4449
4450 void
4451 update_target_permissions (void)
4452 {
4453 may_write_registers_1 = may_write_registers;
4454 may_write_memory_1 = may_write_memory;
4455 may_insert_breakpoints_1 = may_insert_breakpoints;
4456 may_insert_tracepoints_1 = may_insert_tracepoints;
4457 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4458 may_stop_1 = may_stop;
4459 }
4460
4461 /* The one function handles (most of) the permission flags in the same
4462 way. */
4463
4464 static void
4465 set_target_permissions (const char *args, int from_tty,
4466 struct cmd_list_element *c)
4467 {
4468 if (target_has_execution ())
4469 {
4470 update_target_permissions ();
4471 error (_("Cannot change this setting while the inferior is running."));
4472 }
4473
4474 /* Make the real values match the user-changed values. */
4475 may_insert_breakpoints = may_insert_breakpoints_1;
4476 may_insert_tracepoints = may_insert_tracepoints_1;
4477 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4478 may_stop = may_stop_1;
4479 update_observer_mode ();
4480 }
4481
4482 /* Set some permissions independently of observer mode. */
4483
4484 static void
4485 set_write_memory_registers_permission (const char *args, int from_tty,
4486 struct cmd_list_element *c)
4487 {
4488 /* Make the real values match the user-changed values. */
4489 may_write_memory = may_write_memory_1;
4490 may_write_registers = may_write_registers_1;
4491 update_observer_mode ();
4492 }
4493
4494 void _initialize_target ();
4495
4496 void
4497 _initialize_target ()
4498 {
4499 the_debug_target = new debug_target ();
4500
4501 add_info ("target", info_target_command, targ_desc);
4502 add_info ("files", info_target_command, targ_desc);
4503
4504 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4505 Set target debugging."), _("\
4506 Show target debugging."), _("\
4507 When non-zero, target debugging is enabled. Higher numbers are more\n\
4508 verbose."),
4509 set_targetdebug,
4510 show_targetdebug,
4511 &setdebuglist, &showdebuglist);
4512
4513 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4514 &trust_readonly, _("\
4515 Set mode for reading from readonly sections."), _("\
4516 Show mode for reading from readonly sections."), _("\
4517 When this mode is on, memory reads from readonly sections (such as .text)\n\
4518 will be read from the object file instead of from the target. This will\n\
4519 result in significant performance improvement for remote targets."),
4520 NULL,
4521 show_trust_readonly,
4522 &setlist, &showlist);
4523
4524 add_com ("monitor", class_obscure, do_monitor_command,
4525 _("Send a command to the remote monitor (remote targets only)."));
4526
4527 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4528 _("Print the name of each layer of the internal target stack."),
4529 &maintenanceprintlist);
4530
4531 add_setshow_boolean_cmd ("target-async", no_class,
4532 _("\
4533 Set whether gdb controls the inferior in asynchronous mode."), _("\
4534 Show whether gdb controls the inferior in asynchronous mode."), _("\
4535 Tells gdb whether to control the inferior in asynchronous mode."),
4536 set_maint_target_async,
4537 get_maint_target_async,
4538 show_maint_target_async,
4539 &maintenance_set_cmdlist,
4540 &maintenance_show_cmdlist);
4541
4542 add_setshow_auto_boolean_cmd ("target-non-stop", no_class,
4543 _("\
4544 Set whether gdb always controls the inferior in non-stop mode."), _("\
4545 Show whether gdb always controls the inferior in non-stop mode."), _("\
4546 Tells gdb whether to control the inferior in non-stop mode."),
4547 set_maint_target_non_stop,
4548 get_maint_target_non_stop,
4549 show_maint_target_non_stop,
4550 &maintenance_set_cmdlist,
4551 &maintenance_show_cmdlist);
4552
4553 add_setshow_boolean_cmd ("may-write-registers", class_support,
4554 &may_write_registers_1, _("\
4555 Set permission to write into registers."), _("\
4556 Show permission to write into registers."), _("\
4557 When this permission is on, GDB may write into the target's registers.\n\
4558 Otherwise, any sort of write attempt will result in an error."),
4559 set_write_memory_registers_permission, NULL,
4560 &setlist, &showlist);
4561
4562 add_setshow_boolean_cmd ("may-write-memory", class_support,
4563 &may_write_memory_1, _("\
4564 Set permission to write into target memory."), _("\
4565 Show permission to write into target memory."), _("\
4566 When this permission is on, GDB may write into the target's memory.\n\
4567 Otherwise, any sort of write attempt will result in an error."),
4568 set_write_memory_registers_permission, NULL,
4569 &setlist, &showlist);
4570
4571 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4572 &may_insert_breakpoints_1, _("\
4573 Set permission to insert breakpoints in the target."), _("\
4574 Show permission to insert breakpoints in the target."), _("\
4575 When this permission is on, GDB may insert breakpoints in the program.\n\
4576 Otherwise, any sort of insertion attempt will result in an error."),
4577 set_target_permissions, NULL,
4578 &setlist, &showlist);
4579
4580 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4581 &may_insert_tracepoints_1, _("\
4582 Set permission to insert tracepoints in the target."), _("\
4583 Show permission to insert tracepoints in the target."), _("\
4584 When this permission is on, GDB may insert tracepoints in the program.\n\
4585 Otherwise, any sort of insertion attempt will result in an error."),
4586 set_target_permissions, NULL,
4587 &setlist, &showlist);
4588
4589 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4590 &may_insert_fast_tracepoints_1, _("\
4591 Set permission to insert fast tracepoints in the target."), _("\
4592 Show permission to insert fast tracepoints in the target."), _("\
4593 When this permission is on, GDB may insert fast tracepoints.\n\
4594 Otherwise, any sort of insertion attempt will result in an error."),
4595 set_target_permissions, NULL,
4596 &setlist, &showlist);
4597
4598 add_setshow_boolean_cmd ("may-interrupt", class_support,
4599 &may_stop_1, _("\
4600 Set permission to interrupt or signal the target."), _("\
4601 Show permission to interrupt or signal the target."), _("\
4602 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4603 Otherwise, any attempt to interrupt or stop will be ignored."),
4604 set_target_permissions, NULL,
4605 &setlist, &showlist);
4606
4607 add_com ("flash-erase", no_class, flash_erase_command,
4608 _("Erase all flash memory regions."));
4609
4610 add_setshow_boolean_cmd ("auto-connect-native-target", class_support,
4611 &auto_connect_native_target, _("\
4612 Set whether GDB may automatically connect to the native target."), _("\
4613 Show whether GDB may automatically connect to the native target."), _("\
4614 When on, and GDB is not connected to a target yet, GDB\n\
4615 attempts \"run\" and other commands with the native target."),
4616 NULL, show_auto_connect_native_target,
4617 &setlist, &showlist);
4618 }