f688ff33e3b0e157bbe4042a5287436c46159d82
[binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2023 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "target.h"
24 #include "target-dcache.h"
25 #include "gdbcmd.h"
26 #include "symtab.h"
27 #include "inferior.h"
28 #include "infrun.h"
29 #include "observable.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdbcore.h"
37 #include "target-descriptions.h"
38 #include "gdbthread.h"
39 #include "solib.h"
40 #include "exec.h"
41 #include "inline-frame.h"
42 #include "tracepoint.h"
43 #include "gdbsupport/fileio.h"
44 #include "gdbsupport/agent.h"
45 #include "auxv.h"
46 #include "target-debug.h"
47 #include "ui.h"
48 #include "event-top.h"
49 #include <algorithm>
50 #include "gdbsupport/byte-vector.h"
51 #include "gdbsupport/search.h"
52 #include "terminal.h"
53 #include <unordered_map>
54 #include "target-connection.h"
55 #include "valprint.h"
56 #include "cli/cli-decode.h"
57
58 static void generic_tls_error (void) ATTRIBUTE_NORETURN;
59
60 static void default_rcmd (struct target_ops *, const char *, struct ui_file *);
61
62 static int default_verify_memory (struct target_ops *self,
63 const gdb_byte *data,
64 CORE_ADDR memaddr, ULONGEST size);
65
66 static void tcomplain (void) ATTRIBUTE_NORETURN;
67
68 /* Mapping between target_info objects (which have address identity)
69 and corresponding open/factory function/callback. Each add_target
70 call adds one entry to this map, and registers a "target
71 TARGET_NAME" command that when invoked calls the factory registered
72 here. The target_info object is associated with the command via
73 the command's context. */
74 static std::unordered_map<const target_info *, target_open_ftype *>
75 target_factories;
76
77 /* The singleton debug target. */
78
79 static struct target_ops *the_debug_target;
80
81 /* Command list for target. */
82
83 static struct cmd_list_element *targetlist = NULL;
84
85 /* See target.h. */
86
87 bool trust_readonly = false;
88
89 /* Nonzero if we should show true memory content including
90 memory breakpoint inserted by gdb. */
91
92 static int show_memory_breakpoints = 0;
93
94 /* These globals control whether GDB attempts to perform these
95 operations; they are useful for targets that need to prevent
96 inadvertent disruption, such as in non-stop mode. */
97
98 bool may_write_registers = true;
99
100 bool may_write_memory = true;
101
102 bool may_insert_breakpoints = true;
103
104 bool may_insert_tracepoints = true;
105
106 bool may_insert_fast_tracepoints = true;
107
108 bool may_stop = true;
109
110 /* Non-zero if we want to see trace of target level stuff. */
111
112 static unsigned int targetdebug = 0;
113
114 static void
115 set_targetdebug (const char *args, int from_tty, struct cmd_list_element *c)
116 {
117 if (targetdebug)
118 current_inferior ()->push_target (the_debug_target);
119 else
120 current_inferior ()->unpush_target (the_debug_target);
121 }
122
123 static void
124 show_targetdebug (struct ui_file *file, int from_tty,
125 struct cmd_list_element *c, const char *value)
126 {
127 gdb_printf (file, _("Target debugging is %s.\n"), value);
128 }
129
130 int
131 target_has_memory ()
132 {
133 for (target_ops *t = current_inferior ()->top_target ();
134 t != NULL;
135 t = t->beneath ())
136 if (t->has_memory ())
137 return 1;
138
139 return 0;
140 }
141
142 int
143 target_has_stack ()
144 {
145 for (target_ops *t = current_inferior ()->top_target ();
146 t != NULL;
147 t = t->beneath ())
148 if (t->has_stack ())
149 return 1;
150
151 return 0;
152 }
153
154 int
155 target_has_registers ()
156 {
157 for (target_ops *t = current_inferior ()->top_target ();
158 t != NULL;
159 t = t->beneath ())
160 if (t->has_registers ())
161 return 1;
162
163 return 0;
164 }
165
166 bool
167 target_has_execution (inferior *inf)
168 {
169 if (inf == nullptr)
170 inf = current_inferior ();
171
172 for (target_ops *t = inf->top_target ();
173 t != nullptr;
174 t = inf->find_target_beneath (t))
175 if (t->has_execution (inf))
176 return true;
177
178 return false;
179 }
180
181 const char *
182 target_shortname ()
183 {
184 return current_inferior ()->top_target ()->shortname ();
185 }
186
187 /* See target.h. */
188
189 bool
190 target_attach_no_wait ()
191 {
192 return current_inferior ()->top_target ()->attach_no_wait ();
193 }
194
195 /* See target.h. */
196
197 void
198 target_post_attach (int pid)
199 {
200 return current_inferior ()->top_target ()->post_attach (pid);
201 }
202
203 /* See target.h. */
204
205 void
206 target_prepare_to_store (regcache *regcache)
207 {
208 return current_inferior ()->top_target ()->prepare_to_store (regcache);
209 }
210
211 /* See target.h. */
212
213 bool
214 target_supports_enable_disable_tracepoint ()
215 {
216 target_ops *target = current_inferior ()->top_target ();
217
218 return target->supports_enable_disable_tracepoint ();
219 }
220
221 bool
222 target_supports_string_tracing ()
223 {
224 return current_inferior ()->top_target ()->supports_string_tracing ();
225 }
226
227 /* See target.h. */
228
229 bool
230 target_supports_evaluation_of_breakpoint_conditions ()
231 {
232 target_ops *target = current_inferior ()->top_target ();
233
234 return target->supports_evaluation_of_breakpoint_conditions ();
235 }
236
237 /* See target.h. */
238
239 bool
240 target_supports_dumpcore ()
241 {
242 return current_inferior ()->top_target ()->supports_dumpcore ();
243 }
244
245 /* See target.h. */
246
247 void
248 target_dumpcore (const char *filename)
249 {
250 return current_inferior ()->top_target ()->dumpcore (filename);
251 }
252
253 /* See target.h. */
254
255 bool
256 target_can_run_breakpoint_commands ()
257 {
258 return current_inferior ()->top_target ()->can_run_breakpoint_commands ();
259 }
260
261 /* See target.h. */
262
263 void
264 target_files_info ()
265 {
266 return current_inferior ()->top_target ()->files_info ();
267 }
268
269 /* See target.h. */
270
271 int
272 target_insert_fork_catchpoint (int pid)
273 {
274 return current_inferior ()->top_target ()->insert_fork_catchpoint (pid);
275 }
276
277 /* See target.h. */
278
279 int
280 target_remove_fork_catchpoint (int pid)
281 {
282 return current_inferior ()->top_target ()->remove_fork_catchpoint (pid);
283 }
284
285 /* See target.h. */
286
287 int
288 target_insert_vfork_catchpoint (int pid)
289 {
290 return current_inferior ()->top_target ()->insert_vfork_catchpoint (pid);
291 }
292
293 /* See target.h. */
294
295 int
296 target_remove_vfork_catchpoint (int pid)
297 {
298 return current_inferior ()->top_target ()->remove_vfork_catchpoint (pid);
299 }
300
301 /* See target.h. */
302
303 int
304 target_insert_exec_catchpoint (int pid)
305 {
306 return current_inferior ()->top_target ()->insert_exec_catchpoint (pid);
307 }
308
309 /* See target.h. */
310
311 int
312 target_remove_exec_catchpoint (int pid)
313 {
314 return current_inferior ()->top_target ()->remove_exec_catchpoint (pid);
315 }
316
317 /* See target.h. */
318
319 int
320 target_set_syscall_catchpoint (int pid, bool needed, int any_count,
321 gdb::array_view<const int> syscall_counts)
322 {
323 target_ops *target = current_inferior ()->top_target ();
324
325 return target->set_syscall_catchpoint (pid, needed, any_count,
326 syscall_counts);
327 }
328
329 /* See target.h. */
330
331 void
332 target_rcmd (const char *command, struct ui_file *outbuf)
333 {
334 return current_inferior ()->top_target ()->rcmd (command, outbuf);
335 }
336
337 /* See target.h. */
338
339 bool
340 target_can_lock_scheduler ()
341 {
342 target_ops *target = current_inferior ()->top_target ();
343
344 return (target->get_thread_control_capabilities ()& tc_schedlock) != 0;
345 }
346
347 /* See target.h. */
348
349 bool
350 target_can_async_p ()
351 {
352 return target_can_async_p (current_inferior ()->top_target ());
353 }
354
355 /* See target.h. */
356
357 bool
358 target_can_async_p (struct target_ops *target)
359 {
360 if (!target_async_permitted)
361 return false;
362 return target->can_async_p ();
363 }
364
365 /* See target.h. */
366
367 bool
368 target_is_async_p ()
369 {
370 bool result = current_inferior ()->top_target ()->is_async_p ();
371 gdb_assert (target_async_permitted || !result);
372 return result;
373 }
374
375 exec_direction_kind
376 target_execution_direction ()
377 {
378 return current_inferior ()->top_target ()->execution_direction ();
379 }
380
381 /* See target.h. */
382
383 const char *
384 target_extra_thread_info (thread_info *tp)
385 {
386 return current_inferior ()->top_target ()->extra_thread_info (tp);
387 }
388
389 /* See target.h. */
390
391 const char *
392 target_pid_to_exec_file (int pid)
393 {
394 return current_inferior ()->top_target ()->pid_to_exec_file (pid);
395 }
396
397 /* See target.h. */
398
399 gdbarch *
400 target_thread_architecture (ptid_t ptid)
401 {
402 return current_inferior ()->top_target ()->thread_architecture (ptid);
403 }
404
405 /* See target.h. */
406
407 int
408 target_find_memory_regions (find_memory_region_ftype func, void *data)
409 {
410 return current_inferior ()->top_target ()->find_memory_regions (func, data);
411 }
412
413 /* See target.h. */
414
415 gdb::unique_xmalloc_ptr<char>
416 target_make_corefile_notes (bfd *bfd, int *size_p)
417 {
418 return current_inferior ()->top_target ()->make_corefile_notes (bfd, size_p);
419 }
420
421 gdb_byte *
422 target_get_bookmark (const char *args, int from_tty)
423 {
424 return current_inferior ()->top_target ()->get_bookmark (args, from_tty);
425 }
426
427 void
428 target_goto_bookmark (const gdb_byte *arg, int from_tty)
429 {
430 return current_inferior ()->top_target ()->goto_bookmark (arg, from_tty);
431 }
432
433 /* See target.h. */
434
435 bool
436 target_stopped_by_watchpoint ()
437 {
438 return current_inferior ()->top_target ()->stopped_by_watchpoint ();
439 }
440
441 /* See target.h. */
442
443 bool
444 target_stopped_by_sw_breakpoint ()
445 {
446 return current_inferior ()->top_target ()->stopped_by_sw_breakpoint ();
447 }
448
449 bool
450 target_supports_stopped_by_sw_breakpoint ()
451 {
452 target_ops *target = current_inferior ()->top_target ();
453
454 return target->supports_stopped_by_sw_breakpoint ();
455 }
456
457 bool
458 target_stopped_by_hw_breakpoint ()
459 {
460 return current_inferior ()->top_target ()->stopped_by_hw_breakpoint ();
461 }
462
463 bool
464 target_supports_stopped_by_hw_breakpoint ()
465 {
466 target_ops *target = current_inferior ()->top_target ();
467
468 return target->supports_stopped_by_hw_breakpoint ();
469 }
470
471 /* See target.h. */
472
473 bool
474 target_have_steppable_watchpoint ()
475 {
476 return current_inferior ()->top_target ()->have_steppable_watchpoint ();
477 }
478
479 /* See target.h. */
480
481 int
482 target_can_use_hardware_watchpoint (bptype type, int cnt, int othertype)
483 {
484 target_ops *target = current_inferior ()->top_target ();
485
486 return target->can_use_hw_breakpoint (type, cnt, othertype);
487 }
488
489 /* See target.h. */
490
491 int
492 target_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
493 {
494 target_ops *target = current_inferior ()->top_target ();
495
496 return target->region_ok_for_hw_watchpoint (addr, len);
497 }
498
499
500 int
501 target_can_do_single_step ()
502 {
503 return current_inferior ()->top_target ()->can_do_single_step ();
504 }
505
506 /* See target.h. */
507
508 int
509 target_insert_watchpoint (CORE_ADDR addr, int len, target_hw_bp_type type,
510 expression *cond)
511 {
512 target_ops *target = current_inferior ()->top_target ();
513
514 return target->insert_watchpoint (addr, len, type, cond);
515 }
516
517 /* See target.h. */
518
519 int
520 target_remove_watchpoint (CORE_ADDR addr, int len, target_hw_bp_type type,
521 expression *cond)
522 {
523 target_ops *target = current_inferior ()->top_target ();
524
525 return target->remove_watchpoint (addr, len, type, cond);
526 }
527
528 /* See target.h. */
529
530 int
531 target_insert_hw_breakpoint (gdbarch *gdbarch, bp_target_info *bp_tgt)
532 {
533 target_ops *target = current_inferior ()->top_target ();
534
535 return target->insert_hw_breakpoint (gdbarch, bp_tgt);
536 }
537
538 /* See target.h. */
539
540 int
541 target_remove_hw_breakpoint (gdbarch *gdbarch, bp_target_info *bp_tgt)
542 {
543 target_ops *target = current_inferior ()->top_target ();
544
545 return target->remove_hw_breakpoint (gdbarch, bp_tgt);
546 }
547
548 /* See target.h. */
549
550 bool
551 target_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int type,
552 expression *cond)
553 {
554 target_ops *target = current_inferior ()->top_target ();
555
556 return target->can_accel_watchpoint_condition (addr, len, type, cond);
557 }
558
559 /* See target.h. */
560
561 bool
562 target_can_execute_reverse ()
563 {
564 return current_inferior ()->top_target ()->can_execute_reverse ();
565 }
566
567 ptid_t
568 target_get_ada_task_ptid (long lwp, ULONGEST tid)
569 {
570 return current_inferior ()->top_target ()->get_ada_task_ptid (lwp, tid);
571 }
572
573 bool
574 target_filesystem_is_local ()
575 {
576 return current_inferior ()->top_target ()->filesystem_is_local ();
577 }
578
579 void
580 target_trace_init ()
581 {
582 return current_inferior ()->top_target ()->trace_init ();
583 }
584
585 void
586 target_download_tracepoint (bp_location *location)
587 {
588 return current_inferior ()->top_target ()->download_tracepoint (location);
589 }
590
591 bool
592 target_can_download_tracepoint ()
593 {
594 return current_inferior ()->top_target ()->can_download_tracepoint ();
595 }
596
597 void
598 target_download_trace_state_variable (const trace_state_variable &tsv)
599 {
600 target_ops *target = current_inferior ()->top_target ();
601
602 return target->download_trace_state_variable (tsv);
603 }
604
605 void
606 target_enable_tracepoint (bp_location *loc)
607 {
608 return current_inferior ()->top_target ()->enable_tracepoint (loc);
609 }
610
611 void
612 target_disable_tracepoint (bp_location *loc)
613 {
614 return current_inferior ()->top_target ()->disable_tracepoint (loc);
615 }
616
617 void
618 target_trace_start ()
619 {
620 return current_inferior ()->top_target ()->trace_start ();
621 }
622
623 void
624 target_trace_set_readonly_regions ()
625 {
626 return current_inferior ()->top_target ()->trace_set_readonly_regions ();
627 }
628
629 int
630 target_get_trace_status (trace_status *ts)
631 {
632 return current_inferior ()->top_target ()->get_trace_status (ts);
633 }
634
635 void
636 target_get_tracepoint_status (tracepoint *tp, uploaded_tp *utp)
637 {
638 return current_inferior ()->top_target ()->get_tracepoint_status (tp, utp);
639 }
640
641 void
642 target_trace_stop ()
643 {
644 return current_inferior ()->top_target ()->trace_stop ();
645 }
646
647 int
648 target_trace_find (trace_find_type type, int num,
649 CORE_ADDR addr1, CORE_ADDR addr2, int *tpp)
650 {
651 target_ops *target = current_inferior ()->top_target ();
652
653 return target->trace_find (type, num, addr1, addr2, tpp);
654 }
655
656 bool
657 target_get_trace_state_variable_value (int tsv, LONGEST *val)
658 {
659 target_ops *target = current_inferior ()->top_target ();
660
661 return target->get_trace_state_variable_value (tsv, val);
662 }
663
664 int
665 target_save_trace_data (const char *filename)
666 {
667 return current_inferior ()->top_target ()->save_trace_data (filename);
668 }
669
670 int
671 target_upload_tracepoints (uploaded_tp **utpp)
672 {
673 return current_inferior ()->top_target ()->upload_tracepoints (utpp);
674 }
675
676 int
677 target_upload_trace_state_variables (uploaded_tsv **utsvp)
678 {
679 target_ops *target = current_inferior ()->top_target ();
680
681 return target->upload_trace_state_variables (utsvp);
682 }
683
684 LONGEST
685 target_get_raw_trace_data (gdb_byte *buf, ULONGEST offset, LONGEST len)
686 {
687 target_ops *target = current_inferior ()->top_target ();
688
689 return target->get_raw_trace_data (buf, offset, len);
690 }
691
692 int
693 target_get_min_fast_tracepoint_insn_len ()
694 {
695 target_ops *target = current_inferior ()->top_target ();
696
697 return target->get_min_fast_tracepoint_insn_len ();
698 }
699
700 void
701 target_set_disconnected_tracing (int val)
702 {
703 return current_inferior ()->top_target ()->set_disconnected_tracing (val);
704 }
705
706 void
707 target_set_circular_trace_buffer (int val)
708 {
709 return current_inferior ()->top_target ()->set_circular_trace_buffer (val);
710 }
711
712 void
713 target_set_trace_buffer_size (LONGEST val)
714 {
715 return current_inferior ()->top_target ()->set_trace_buffer_size (val);
716 }
717
718 bool
719 target_set_trace_notes (const char *user, const char *notes,
720 const char *stopnotes)
721 {
722 target_ops *target = current_inferior ()->top_target ();
723
724 return target->set_trace_notes (user, notes, stopnotes);
725 }
726
727 bool
728 target_get_tib_address (ptid_t ptid, CORE_ADDR *addr)
729 {
730 return current_inferior ()->top_target ()->get_tib_address (ptid, addr);
731 }
732
733 void
734 target_set_permissions ()
735 {
736 return current_inferior ()->top_target ()->set_permissions ();
737 }
738
739 bool
740 target_static_tracepoint_marker_at (CORE_ADDR addr,
741 static_tracepoint_marker *marker)
742 {
743 target_ops *target = current_inferior ()->top_target ();
744
745 return target->static_tracepoint_marker_at (addr, marker);
746 }
747
748 std::vector<static_tracepoint_marker>
749 target_static_tracepoint_markers_by_strid (const char *marker_id)
750 {
751 target_ops *target = current_inferior ()->top_target ();
752
753 return target->static_tracepoint_markers_by_strid (marker_id);
754 }
755
756 traceframe_info_up
757 target_traceframe_info ()
758 {
759 return current_inferior ()->top_target ()->traceframe_info ();
760 }
761
762 bool
763 target_use_agent (bool use)
764 {
765 return current_inferior ()->top_target ()->use_agent (use);
766 }
767
768 bool
769 target_can_use_agent ()
770 {
771 return current_inferior ()->top_target ()->can_use_agent ();
772 }
773
774 bool
775 target_augmented_libraries_svr4_read ()
776 {
777 return current_inferior ()->top_target ()->augmented_libraries_svr4_read ();
778 }
779
780 bool
781 target_supports_memory_tagging ()
782 {
783 return current_inferior ()->top_target ()->supports_memory_tagging ();
784 }
785
786 bool
787 target_fetch_memtags (CORE_ADDR address, size_t len, gdb::byte_vector &tags,
788 int type)
789 {
790 return current_inferior ()->top_target ()->fetch_memtags (address, len, tags, type);
791 }
792
793 bool
794 target_store_memtags (CORE_ADDR address, size_t len,
795 const gdb::byte_vector &tags, int type)
796 {
797 return current_inferior ()->top_target ()->store_memtags (address, len, tags, type);
798 }
799
800 x86_xsave_layout
801 target_fetch_x86_xsave_layout ()
802 {
803 return current_inferior ()->top_target ()->fetch_x86_xsave_layout ();
804 }
805
806 void
807 target_log_command (const char *p)
808 {
809 return current_inferior ()->top_target ()->log_command (p);
810 }
811
812 /* This is used to implement the various target commands. */
813
814 static void
815 open_target (const char *args, int from_tty, struct cmd_list_element *command)
816 {
817 auto *ti = static_cast<target_info *> (command->context ());
818 target_open_ftype *func = target_factories[ti];
819
820 if (targetdebug)
821 gdb_printf (gdb_stdlog, "-> %s->open (...)\n",
822 ti->shortname);
823
824 func (args, from_tty);
825
826 if (targetdebug)
827 gdb_printf (gdb_stdlog, "<- %s->open (%s, %d)\n",
828 ti->shortname, args, from_tty);
829 }
830
831 /* See target.h. */
832
833 void
834 add_target (const target_info &t, target_open_ftype *func,
835 completer_ftype *completer)
836 {
837 struct cmd_list_element *c;
838
839 auto &func_slot = target_factories[&t];
840 if (func_slot != nullptr)
841 internal_error (_("target already added (\"%s\")."), t.shortname);
842 func_slot = func;
843
844 if (targetlist == NULL)
845 add_basic_prefix_cmd ("target", class_run, _("\
846 Connect to a target machine or process.\n\
847 The first argument is the type or protocol of the target machine.\n\
848 Remaining arguments are interpreted by the target protocol. For more\n\
849 information on the arguments for a particular protocol, type\n\
850 `help target ' followed by the protocol name."),
851 &targetlist, 0, &cmdlist);
852 c = add_cmd (t.shortname, no_class, t.doc, &targetlist);
853 c->set_context ((void *) &t);
854 c->func = open_target;
855 if (completer != NULL)
856 set_cmd_completer (c, completer);
857 }
858
859 /* See target.h. */
860
861 void
862 add_deprecated_target_alias (const target_info &tinfo, const char *alias)
863 {
864 struct cmd_list_element *c;
865
866 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
867 see PR cli/15104. */
868 c = add_cmd (alias, no_class, tinfo.doc, &targetlist);
869 c->func = open_target;
870 c->set_context ((void *) &tinfo);
871 gdb::unique_xmalloc_ptr<char> alt
872 = xstrprintf ("target %s", tinfo.shortname);
873 deprecate_cmd (c, alt.release ());
874 }
875
876 /* Stub functions */
877
878 void
879 target_kill (void)
880 {
881
882 /* If the commit_resume_state of the to-be-killed-inferior's process stratum
883 is true, and this inferior is the last live inferior with resumed threads
884 of that target, then we want to leave commit_resume_state to false, as the
885 target won't have any resumed threads anymore. We achieve this with
886 this scoped_disable_commit_resumed. On construction, it will set the flag
887 to false. On destruction, it will only set it to true if there are resumed
888 threads left. */
889 scoped_disable_commit_resumed disable ("killing");
890 current_inferior ()->top_target ()->kill ();
891 }
892
893 void
894 target_load (const char *arg, int from_tty)
895 {
896 target_dcache_invalidate ();
897 current_inferior ()->top_target ()->load (arg, from_tty);
898 }
899
900 /* Define it. */
901
902 target_terminal_state target_terminal::m_terminal_state
903 = target_terminal_state::is_ours;
904
905 /* See target/target.h. */
906
907 void
908 target_terminal::init (void)
909 {
910 current_inferior ()->top_target ()->terminal_init ();
911
912 m_terminal_state = target_terminal_state::is_ours;
913 }
914
915 /* See target/target.h. */
916
917 void
918 target_terminal::inferior (void)
919 {
920 struct ui *ui = current_ui;
921
922 /* A background resume (``run&'') should leave GDB in control of the
923 terminal. */
924 if (ui->prompt_state != PROMPT_BLOCKED)
925 return;
926
927 /* Since we always run the inferior in the main console (unless "set
928 inferior-tty" is in effect), when some UI other than the main one
929 calls target_terminal::inferior, then we leave the main UI's
930 terminal settings as is. */
931 if (ui != main_ui)
932 return;
933
934 /* If GDB is resuming the inferior in the foreground, install
935 inferior's terminal modes. */
936
937 struct inferior *inf = current_inferior ();
938
939 if (inf->terminal_state != target_terminal_state::is_inferior)
940 {
941 current_inferior ()->top_target ()->terminal_inferior ();
942 inf->terminal_state = target_terminal_state::is_inferior;
943 }
944
945 m_terminal_state = target_terminal_state::is_inferior;
946
947 /* If the user hit C-c before, pretend that it was hit right
948 here. */
949 if (check_quit_flag ())
950 target_pass_ctrlc ();
951 }
952
953 /* See target/target.h. */
954
955 void
956 target_terminal::restore_inferior (void)
957 {
958 struct ui *ui = current_ui;
959
960 /* See target_terminal::inferior(). */
961 if (ui->prompt_state != PROMPT_BLOCKED || ui != main_ui)
962 return;
963
964 /* Restore the terminal settings of inferiors that were in the
965 foreground but are now ours_for_output due to a temporary
966 target_target::ours_for_output() call. */
967
968 {
969 scoped_restore_current_inferior restore_inferior;
970
971 for (::inferior *inf : all_inferiors ())
972 {
973 if (inf->terminal_state == target_terminal_state::is_ours_for_output)
974 {
975 set_current_inferior (inf);
976 current_inferior ()->top_target ()->terminal_inferior ();
977 inf->terminal_state = target_terminal_state::is_inferior;
978 }
979 }
980 }
981
982 m_terminal_state = target_terminal_state::is_inferior;
983
984 /* If the user hit C-c before, pretend that it was hit right
985 here. */
986 if (check_quit_flag ())
987 target_pass_ctrlc ();
988 }
989
990 /* Switch terminal state to DESIRED_STATE, either is_ours, or
991 is_ours_for_output. */
992
993 static void
994 target_terminal_is_ours_kind (target_terminal_state desired_state)
995 {
996 scoped_restore_current_inferior restore_inferior;
997
998 /* Must do this in two passes. First, have all inferiors save the
999 current terminal settings. Then, after all inferiors have add a
1000 chance to safely save the terminal settings, restore GDB's
1001 terminal settings. */
1002
1003 for (inferior *inf : all_inferiors ())
1004 {
1005 if (inf->terminal_state == target_terminal_state::is_inferior)
1006 {
1007 set_current_inferior (inf);
1008 current_inferior ()->top_target ()->terminal_save_inferior ();
1009 }
1010 }
1011
1012 for (inferior *inf : all_inferiors ())
1013 {
1014 /* Note we don't check is_inferior here like above because we
1015 need to handle 'is_ours_for_output -> is_ours' too. Careful
1016 to never transition from 'is_ours' to 'is_ours_for_output',
1017 though. */
1018 if (inf->terminal_state != target_terminal_state::is_ours
1019 && inf->terminal_state != desired_state)
1020 {
1021 set_current_inferior (inf);
1022 if (desired_state == target_terminal_state::is_ours)
1023 current_inferior ()->top_target ()->terminal_ours ();
1024 else if (desired_state == target_terminal_state::is_ours_for_output)
1025 current_inferior ()->top_target ()->terminal_ours_for_output ();
1026 else
1027 gdb_assert_not_reached ("unhandled desired state");
1028 inf->terminal_state = desired_state;
1029 }
1030 }
1031 }
1032
1033 /* See target/target.h. */
1034
1035 void
1036 target_terminal::ours ()
1037 {
1038 struct ui *ui = current_ui;
1039
1040 /* See target_terminal::inferior. */
1041 if (ui != main_ui)
1042 return;
1043
1044 if (m_terminal_state == target_terminal_state::is_ours)
1045 return;
1046
1047 target_terminal_is_ours_kind (target_terminal_state::is_ours);
1048 m_terminal_state = target_terminal_state::is_ours;
1049 }
1050
1051 /* See target/target.h. */
1052
1053 void
1054 target_terminal::ours_for_output ()
1055 {
1056 struct ui *ui = current_ui;
1057
1058 /* See target_terminal::inferior. */
1059 if (ui != main_ui)
1060 return;
1061
1062 if (!target_terminal::is_inferior ())
1063 return;
1064
1065 target_terminal_is_ours_kind (target_terminal_state::is_ours_for_output);
1066 target_terminal::m_terminal_state = target_terminal_state::is_ours_for_output;
1067 }
1068
1069 /* See target/target.h. */
1070
1071 void
1072 target_terminal::info (const char *arg, int from_tty)
1073 {
1074 current_inferior ()->top_target ()->terminal_info (arg, from_tty);
1075 }
1076
1077 /* See target.h. */
1078
1079 bool
1080 target_supports_terminal_ours (void)
1081 {
1082 /* The current top target is the target at the top of the target
1083 stack of the current inferior. While normally there's always an
1084 inferior, we must check for nullptr here because we can get here
1085 very early during startup, before the initial inferior is first
1086 created. */
1087 inferior *inf = current_inferior ();
1088
1089 if (inf == nullptr)
1090 return false;
1091 return inf->top_target ()->supports_terminal_ours ();
1092 }
1093
1094 static void
1095 tcomplain (void)
1096 {
1097 error (_("You can't do that when your target is `%s'"),
1098 current_inferior ()->top_target ()->shortname ());
1099 }
1100
1101 void
1102 noprocess (void)
1103 {
1104 error (_("You can't do that without a process to debug."));
1105 }
1106
1107 static void
1108 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
1109 {
1110 gdb_printf (_("No saved terminal information.\n"));
1111 }
1112
1113 /* A default implementation for the to_get_ada_task_ptid target method.
1114
1115 This function builds the PTID by using both LWP and TID as part of
1116 the PTID lwp and tid elements. The pid used is the pid of the
1117 inferior_ptid. */
1118
1119 static ptid_t
1120 default_get_ada_task_ptid (struct target_ops *self, long lwp, ULONGEST tid)
1121 {
1122 return ptid_t (inferior_ptid.pid (), lwp, tid);
1123 }
1124
1125 static enum exec_direction_kind
1126 default_execution_direction (struct target_ops *self)
1127 {
1128 if (!target_can_execute_reverse ())
1129 return EXEC_FORWARD;
1130 else if (!target_can_async_p ())
1131 return EXEC_FORWARD;
1132 else
1133 gdb_assert_not_reached ("\
1134 to_execution_direction must be implemented for reverse async");
1135 }
1136
1137 /* See target.h. */
1138
1139 void
1140 target_ops_ref_policy::decref (target_ops *t)
1141 {
1142 t->decref ();
1143 if (t->refcount () == 0)
1144 {
1145 if (t->stratum () == process_stratum)
1146 connection_list_remove (as_process_stratum_target (t));
1147
1148 for (inferior *inf : all_inferiors ())
1149 gdb_assert (!inf->target_is_pushed (t));
1150
1151 fileio_handles_invalidate_target (t);
1152
1153 t->close ();
1154
1155 if (targetdebug)
1156 gdb_printf (gdb_stdlog, "closing target\n");
1157 }
1158 }
1159
1160 /* See target.h. */
1161
1162 void
1163 target_stack::push (target_ops *t)
1164 {
1165 /* We must create a new reference first. It is possible that T is
1166 already pushed on this target stack, in which case we will first
1167 unpush it below, before re-pushing it. If we don't increment the
1168 reference count now, then when we unpush it, we might end up deleting
1169 T, which is not good. */
1170 auto ref = target_ops_ref::new_reference (t);
1171
1172 strata stratum = t->stratum ();
1173
1174 /* If there's already a target at this stratum, remove it. */
1175
1176 if (m_stack[stratum].get () != nullptr)
1177 unpush (m_stack[stratum].get ());
1178
1179 /* Now add the new one. */
1180 m_stack[stratum] = std::move (ref);
1181
1182 if (m_top < stratum)
1183 m_top = stratum;
1184
1185 if (stratum == process_stratum)
1186 connection_list_add (as_process_stratum_target (t));
1187 }
1188
1189 /* See target.h. */
1190
1191 bool
1192 target_stack::unpush (target_ops *t)
1193 {
1194 gdb_assert (t != NULL);
1195
1196 strata stratum = t->stratum ();
1197
1198 if (stratum == dummy_stratum)
1199 internal_error (_("Attempt to unpush the dummy target"));
1200
1201 /* Look for the specified target. Note that a target can only occur
1202 once in the target stack. */
1203
1204 if (m_stack[stratum] != t)
1205 {
1206 /* If T wasn't pushed, quit. Only open targets should be
1207 closed. */
1208 return false;
1209 }
1210
1211 if (m_top == stratum)
1212 m_top = this->find_beneath (t)->stratum ();
1213
1214 /* Move the target reference off the target stack, this sets the pointer
1215 held in m_stack to nullptr, and places the reference in ref. When
1216 ref goes out of scope its reference count will be decremented, which
1217 might cause the target to close.
1218
1219 We have to do it this way, and not just set the value in m_stack to
1220 nullptr directly, because doing so would decrement the reference
1221 count first, which might close the target, and closing the target
1222 does a check that the target is not on any inferiors target_stack. */
1223 auto ref = std::move (m_stack[stratum]);
1224
1225 return true;
1226 }
1227
1228 void
1229 target_unpusher::operator() (struct target_ops *ops) const
1230 {
1231 current_inferior ()->unpush_target (ops);
1232 }
1233
1234 /* Default implementation of to_get_thread_local_address. */
1235
1236 static void
1237 generic_tls_error (void)
1238 {
1239 throw_error (TLS_GENERIC_ERROR,
1240 _("Cannot find thread-local variables on this target"));
1241 }
1242
1243 /* Using the objfile specified in OBJFILE, find the address for the
1244 current thread's thread-local storage with offset OFFSET. */
1245 CORE_ADDR
1246 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1247 {
1248 volatile CORE_ADDR addr = 0;
1249 struct target_ops *target = current_inferior ()->top_target ();
1250 gdbarch *gdbarch = current_inferior ()->arch ();
1251
1252 /* If OBJFILE is a separate debug object file, look for the
1253 original object file. */
1254 if (objfile->separate_debug_objfile_backlink != NULL)
1255 objfile = objfile->separate_debug_objfile_backlink;
1256
1257 if (gdbarch_fetch_tls_load_module_address_p (gdbarch))
1258 {
1259 ptid_t ptid = inferior_ptid;
1260
1261 try
1262 {
1263 CORE_ADDR lm_addr;
1264
1265 /* Fetch the load module address for this objfile. */
1266 lm_addr = gdbarch_fetch_tls_load_module_address (gdbarch,
1267 objfile);
1268
1269 if (gdbarch_get_thread_local_address_p (gdbarch))
1270 addr = gdbarch_get_thread_local_address (gdbarch, ptid, lm_addr,
1271 offset);
1272 else
1273 addr = target->get_thread_local_address (ptid, lm_addr, offset);
1274 }
1275 /* If an error occurred, print TLS related messages here. Otherwise,
1276 throw the error to some higher catcher. */
1277 catch (const gdb_exception &ex)
1278 {
1279 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1280
1281 switch (ex.error)
1282 {
1283 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1284 error (_("Cannot find thread-local variables "
1285 "in this thread library."));
1286 break;
1287 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1288 if (objfile_is_library)
1289 error (_("Cannot find shared library `%s' in dynamic"
1290 " linker's load module list"), objfile_name (objfile));
1291 else
1292 error (_("Cannot find executable file `%s' in dynamic"
1293 " linker's load module list"), objfile_name (objfile));
1294 break;
1295 case TLS_NOT_ALLOCATED_YET_ERROR:
1296 if (objfile_is_library)
1297 error (_("The inferior has not yet allocated storage for"
1298 " thread-local variables in\n"
1299 "the shared library `%s'\n"
1300 "for %s"),
1301 objfile_name (objfile),
1302 target_pid_to_str (ptid).c_str ());
1303 else
1304 error (_("The inferior has not yet allocated storage for"
1305 " thread-local variables in\n"
1306 "the executable `%s'\n"
1307 "for %s"),
1308 objfile_name (objfile),
1309 target_pid_to_str (ptid).c_str ());
1310 break;
1311 case TLS_GENERIC_ERROR:
1312 if (objfile_is_library)
1313 error (_("Cannot find thread-local storage for %s, "
1314 "shared library %s:\n%s"),
1315 target_pid_to_str (ptid).c_str (),
1316 objfile_name (objfile), ex.what ());
1317 else
1318 error (_("Cannot find thread-local storage for %s, "
1319 "executable file %s:\n%s"),
1320 target_pid_to_str (ptid).c_str (),
1321 objfile_name (objfile), ex.what ());
1322 break;
1323 default:
1324 throw;
1325 break;
1326 }
1327 }
1328 }
1329 else
1330 error (_("Cannot find thread-local variables on this target"));
1331
1332 return addr;
1333 }
1334
1335 const char *
1336 target_xfer_status_to_string (enum target_xfer_status status)
1337 {
1338 #define CASE(X) case X: return #X
1339 switch (status)
1340 {
1341 CASE(TARGET_XFER_E_IO);
1342 CASE(TARGET_XFER_UNAVAILABLE);
1343 default:
1344 return "<unknown>";
1345 }
1346 #undef CASE
1347 };
1348
1349
1350 const std::vector<target_section> *
1351 target_get_section_table (struct target_ops *target)
1352 {
1353 return target->get_section_table ();
1354 }
1355
1356 /* Find a section containing ADDR. */
1357
1358 const struct target_section *
1359 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1360 {
1361 const std::vector<target_section> *table = target_get_section_table (target);
1362
1363 if (table == NULL)
1364 return NULL;
1365
1366 for (const target_section &secp : *table)
1367 {
1368 if (addr >= secp.addr && addr < secp.endaddr)
1369 return &secp;
1370 }
1371 return NULL;
1372 }
1373
1374 /* See target.h. */
1375
1376 const std::vector<target_section> *
1377 default_get_section_table ()
1378 {
1379 return &current_program_space->target_sections ();
1380 }
1381
1382 /* Helper for the memory xfer routines. Checks the attributes of the
1383 memory region of MEMADDR against the read or write being attempted.
1384 If the access is permitted returns true, otherwise returns false.
1385 REGION_P is an optional output parameter. If not-NULL, it is
1386 filled with a pointer to the memory region of MEMADDR. REG_LEN
1387 returns LEN trimmed to the end of the region. This is how much the
1388 caller can continue requesting, if the access is permitted. A
1389 single xfer request must not straddle memory region boundaries. */
1390
1391 static int
1392 memory_xfer_check_region (gdb_byte *readbuf, const gdb_byte *writebuf,
1393 ULONGEST memaddr, ULONGEST len, ULONGEST *reg_len,
1394 struct mem_region **region_p)
1395 {
1396 struct mem_region *region;
1397
1398 region = lookup_mem_region (memaddr);
1399
1400 if (region_p != NULL)
1401 *region_p = region;
1402
1403 switch (region->attrib.mode)
1404 {
1405 case MEM_RO:
1406 if (writebuf != NULL)
1407 return 0;
1408 break;
1409
1410 case MEM_WO:
1411 if (readbuf != NULL)
1412 return 0;
1413 break;
1414
1415 case MEM_FLASH:
1416 /* We only support writing to flash during "load" for now. */
1417 if (writebuf != NULL)
1418 error (_("Writing to flash memory forbidden in this context"));
1419 break;
1420
1421 case MEM_NONE:
1422 return 0;
1423 }
1424
1425 /* region->hi == 0 means there's no upper bound. */
1426 if (memaddr + len < region->hi || region->hi == 0)
1427 *reg_len = len;
1428 else
1429 *reg_len = region->hi - memaddr;
1430
1431 return 1;
1432 }
1433
1434 /* Read memory from more than one valid target. A core file, for
1435 instance, could have some of memory but delegate other bits to
1436 the target below it. So, we must manually try all targets. */
1437
1438 enum target_xfer_status
1439 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1440 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1441 ULONGEST *xfered_len)
1442 {
1443 enum target_xfer_status res;
1444
1445 do
1446 {
1447 res = ops->xfer_partial (TARGET_OBJECT_MEMORY, NULL,
1448 readbuf, writebuf, memaddr, len,
1449 xfered_len);
1450 if (res == TARGET_XFER_OK)
1451 break;
1452
1453 /* Stop if the target reports that the memory is not available. */
1454 if (res == TARGET_XFER_UNAVAILABLE)
1455 break;
1456
1457 /* Don't continue past targets which have all the memory.
1458 At one time, this code was necessary to read data from
1459 executables / shared libraries when data for the requested
1460 addresses weren't available in the core file. But now the
1461 core target handles this case itself. */
1462 if (ops->has_all_memory ())
1463 break;
1464
1465 ops = ops->beneath ();
1466 }
1467 while (ops != NULL);
1468
1469 /* The cache works at the raw memory level. Make sure the cache
1470 gets updated with raw contents no matter what kind of memory
1471 object was originally being written. Note we do write-through
1472 first, so that if it fails, we don't write to the cache contents
1473 that never made it to the target. */
1474 if (writebuf != NULL
1475 && inferior_ptid != null_ptid
1476 && target_dcache_init_p ()
1477 && (stack_cache_enabled_p () || code_cache_enabled_p ()))
1478 {
1479 DCACHE *dcache = target_dcache_get ();
1480
1481 /* Note that writing to an area of memory which wasn't present
1482 in the cache doesn't cause it to be loaded in. */
1483 dcache_update (dcache, res, memaddr, writebuf, *xfered_len);
1484 }
1485
1486 return res;
1487 }
1488
1489 /* Perform a partial memory transfer.
1490 For docs see target.h, to_xfer_partial. */
1491
1492 static enum target_xfer_status
1493 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1494 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1495 ULONGEST len, ULONGEST *xfered_len)
1496 {
1497 enum target_xfer_status res;
1498 ULONGEST reg_len;
1499 struct mem_region *region;
1500 struct inferior *inf;
1501
1502 /* For accesses to unmapped overlay sections, read directly from
1503 files. Must do this first, as MEMADDR may need adjustment. */
1504 if (readbuf != NULL && overlay_debugging)
1505 {
1506 struct obj_section *section = find_pc_overlay (memaddr);
1507
1508 if (pc_in_unmapped_range (memaddr, section))
1509 {
1510 const std::vector<target_section> *table = target_get_section_table (ops);
1511 const char *section_name = section->the_bfd_section->name;
1512
1513 memaddr = overlay_mapped_address (memaddr, section);
1514
1515 auto match_cb = [=] (const struct target_section *s)
1516 {
1517 return (strcmp (section_name, s->the_bfd_section->name) == 0);
1518 };
1519
1520 return section_table_xfer_memory_partial (readbuf, writebuf,
1521 memaddr, len, xfered_len,
1522 *table, match_cb);
1523 }
1524 }
1525
1526 /* Try the executable files, if "trust-readonly-sections" is set. */
1527 if (readbuf != NULL && trust_readonly)
1528 {
1529 const struct target_section *secp
1530 = target_section_by_addr (ops, memaddr);
1531 if (secp != NULL
1532 && (bfd_section_flags (secp->the_bfd_section) & SEC_READONLY))
1533 {
1534 const std::vector<target_section> *table = target_get_section_table (ops);
1535 return section_table_xfer_memory_partial (readbuf, writebuf,
1536 memaddr, len, xfered_len,
1537 *table);
1538 }
1539 }
1540
1541 /* Try GDB's internal data cache. */
1542
1543 if (!memory_xfer_check_region (readbuf, writebuf, memaddr, len, &reg_len,
1544 &region))
1545 return TARGET_XFER_E_IO;
1546
1547 if (inferior_ptid != null_ptid)
1548 inf = current_inferior ();
1549 else
1550 inf = NULL;
1551
1552 if (inf != NULL
1553 && readbuf != NULL
1554 /* The dcache reads whole cache lines; that doesn't play well
1555 with reading from a trace buffer, because reading outside of
1556 the collected memory range fails. */
1557 && get_traceframe_number () == -1
1558 && (region->attrib.cache
1559 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1560 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1561 {
1562 DCACHE *dcache = target_dcache_get_or_init ();
1563
1564 return dcache_read_memory_partial (ops, dcache, memaddr, readbuf,
1565 reg_len, xfered_len);
1566 }
1567
1568 /* If none of those methods found the memory we wanted, fall back
1569 to a target partial transfer. Normally a single call to
1570 to_xfer_partial is enough; if it doesn't recognize an object
1571 it will call the to_xfer_partial of the next target down.
1572 But for memory this won't do. Memory is the only target
1573 object which can be read from more than one valid target.
1574 A core file, for instance, could have some of memory but
1575 delegate other bits to the target below it. So, we must
1576 manually try all targets. */
1577
1578 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1579 xfered_len);
1580
1581 /* If we still haven't got anything, return the last error. We
1582 give up. */
1583 return res;
1584 }
1585
1586 /* Perform a partial memory transfer. For docs see target.h,
1587 to_xfer_partial. */
1588
1589 static enum target_xfer_status
1590 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1591 gdb_byte *readbuf, const gdb_byte *writebuf,
1592 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1593 {
1594 enum target_xfer_status res;
1595
1596 /* Zero length requests are ok and require no work. */
1597 if (len == 0)
1598 return TARGET_XFER_EOF;
1599
1600 memaddr = gdbarch_remove_non_address_bits (current_inferior ()->arch (),
1601 memaddr);
1602
1603 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1604 breakpoint insns, thus hiding out from higher layers whether
1605 there are software breakpoints inserted in the code stream. */
1606 if (readbuf != NULL)
1607 {
1608 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1609 xfered_len);
1610
1611 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1612 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, *xfered_len);
1613 }
1614 else
1615 {
1616 /* A large write request is likely to be partially satisfied
1617 by memory_xfer_partial_1. We will continually malloc
1618 and free a copy of the entire write request for breakpoint
1619 shadow handling even though we only end up writing a small
1620 subset of it. Cap writes to a limit specified by the target
1621 to mitigate this. */
1622 len = std::min (ops->get_memory_xfer_limit (), len);
1623
1624 gdb::byte_vector buf (writebuf, writebuf + len);
1625 breakpoint_xfer_memory (NULL, buf.data (), writebuf, memaddr, len);
1626 res = memory_xfer_partial_1 (ops, object, NULL, buf.data (), memaddr, len,
1627 xfered_len);
1628 }
1629
1630 return res;
1631 }
1632
1633 scoped_restore_tmpl<int>
1634 make_scoped_restore_show_memory_breakpoints (int show)
1635 {
1636 return make_scoped_restore (&show_memory_breakpoints, show);
1637 }
1638
1639 /* For docs see target.h, to_xfer_partial. */
1640
1641 enum target_xfer_status
1642 target_xfer_partial (struct target_ops *ops,
1643 enum target_object object, const char *annex,
1644 gdb_byte *readbuf, const gdb_byte *writebuf,
1645 ULONGEST offset, ULONGEST len,
1646 ULONGEST *xfered_len)
1647 {
1648 enum target_xfer_status retval;
1649
1650 /* Transfer is done when LEN is zero. */
1651 if (len == 0)
1652 return TARGET_XFER_EOF;
1653
1654 if (writebuf && !may_write_memory)
1655 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1656 core_addr_to_string_nz (offset), plongest (len));
1657
1658 *xfered_len = 0;
1659
1660 /* If this is a memory transfer, let the memory-specific code
1661 have a look at it instead. Memory transfers are more
1662 complicated. */
1663 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1664 || object == TARGET_OBJECT_CODE_MEMORY)
1665 retval = memory_xfer_partial (ops, object, readbuf,
1666 writebuf, offset, len, xfered_len);
1667 else if (object == TARGET_OBJECT_RAW_MEMORY)
1668 {
1669 /* Skip/avoid accessing the target if the memory region
1670 attributes block the access. Check this here instead of in
1671 raw_memory_xfer_partial as otherwise we'd end up checking
1672 this twice in the case of the memory_xfer_partial path is
1673 taken; once before checking the dcache, and another in the
1674 tail call to raw_memory_xfer_partial. */
1675 if (!memory_xfer_check_region (readbuf, writebuf, offset, len, &len,
1676 NULL))
1677 return TARGET_XFER_E_IO;
1678
1679 /* Request the normal memory object from other layers. */
1680 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1681 xfered_len);
1682 }
1683 else
1684 retval = ops->xfer_partial (object, annex, readbuf,
1685 writebuf, offset, len, xfered_len);
1686
1687 if (targetdebug)
1688 {
1689 const unsigned char *myaddr = NULL;
1690
1691 gdb_printf (gdb_stdlog,
1692 "%s:target_xfer_partial "
1693 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1694 ops->shortname (),
1695 (int) object,
1696 (annex ? annex : "(null)"),
1697 host_address_to_string (readbuf),
1698 host_address_to_string (writebuf),
1699 core_addr_to_string_nz (offset),
1700 pulongest (len), retval,
1701 pulongest (*xfered_len));
1702
1703 if (readbuf)
1704 myaddr = readbuf;
1705 if (writebuf)
1706 myaddr = writebuf;
1707 if (retval == TARGET_XFER_OK && myaddr != NULL)
1708 {
1709 int i;
1710
1711 gdb_puts (", bytes =", gdb_stdlog);
1712 for (i = 0; i < *xfered_len; i++)
1713 {
1714 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1715 {
1716 if (targetdebug < 2 && i > 0)
1717 {
1718 gdb_printf (gdb_stdlog, " ...");
1719 break;
1720 }
1721 gdb_printf (gdb_stdlog, "\n");
1722 }
1723
1724 gdb_printf (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1725 }
1726 }
1727
1728 gdb_putc ('\n', gdb_stdlog);
1729 }
1730
1731 /* Check implementations of to_xfer_partial update *XFERED_LEN
1732 properly. Do assertion after printing debug messages, so that we
1733 can find more clues on assertion failure from debugging messages. */
1734 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_UNAVAILABLE)
1735 gdb_assert (*xfered_len > 0);
1736
1737 return retval;
1738 }
1739
1740 /* Read LEN bytes of target memory at address MEMADDR, placing the
1741 results in GDB's memory at MYADDR. Returns either 0 for success or
1742 -1 if any error occurs.
1743
1744 If an error occurs, no guarantee is made about the contents of the data at
1745 MYADDR. In particular, the caller should not depend upon partial reads
1746 filling the buffer with good data. There is no way for the caller to know
1747 how much good data might have been transfered anyway. Callers that can
1748 deal with partial reads should call target_read (which will retry until
1749 it makes no progress, and then return how much was transferred). */
1750
1751 int
1752 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1753 {
1754 if (target_read (current_inferior ()->top_target (),
1755 TARGET_OBJECT_MEMORY, NULL,
1756 myaddr, memaddr, len) == len)
1757 return 0;
1758 else
1759 return -1;
1760 }
1761
1762 /* See target/target.h. */
1763
1764 int
1765 target_read_uint32 (CORE_ADDR memaddr, uint32_t *result)
1766 {
1767 gdb_byte buf[4];
1768 int r;
1769
1770 r = target_read_memory (memaddr, buf, sizeof buf);
1771 if (r != 0)
1772 return r;
1773 *result = extract_unsigned_integer
1774 (buf, sizeof buf,
1775 gdbarch_byte_order (current_inferior ()->arch ()));
1776 return 0;
1777 }
1778
1779 /* Like target_read_memory, but specify explicitly that this is a read
1780 from the target's raw memory. That is, this read bypasses the
1781 dcache, breakpoint shadowing, etc. */
1782
1783 int
1784 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1785 {
1786 if (target_read (current_inferior ()->top_target (),
1787 TARGET_OBJECT_RAW_MEMORY, NULL,
1788 myaddr, memaddr, len) == len)
1789 return 0;
1790 else
1791 return -1;
1792 }
1793
1794 /* Like target_read_memory, but specify explicitly that this is a read from
1795 the target's stack. This may trigger different cache behavior. */
1796
1797 int
1798 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1799 {
1800 if (target_read (current_inferior ()->top_target (),
1801 TARGET_OBJECT_STACK_MEMORY, NULL,
1802 myaddr, memaddr, len) == len)
1803 return 0;
1804 else
1805 return -1;
1806 }
1807
1808 /* Like target_read_memory, but specify explicitly that this is a read from
1809 the target's code. This may trigger different cache behavior. */
1810
1811 int
1812 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1813 {
1814 if (target_read (current_inferior ()->top_target (),
1815 TARGET_OBJECT_CODE_MEMORY, NULL,
1816 myaddr, memaddr, len) == len)
1817 return 0;
1818 else
1819 return -1;
1820 }
1821
1822 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1823 Returns either 0 for success or -1 if any error occurs. If an
1824 error occurs, no guarantee is made about how much data got written.
1825 Callers that can deal with partial writes should call
1826 target_write. */
1827
1828 int
1829 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1830 {
1831 if (target_write (current_inferior ()->top_target (),
1832 TARGET_OBJECT_MEMORY, NULL,
1833 myaddr, memaddr, len) == len)
1834 return 0;
1835 else
1836 return -1;
1837 }
1838
1839 /* Write LEN bytes from MYADDR to target raw memory at address
1840 MEMADDR. Returns either 0 for success or -1 if any error occurs.
1841 If an error occurs, no guarantee is made about how much data got
1842 written. Callers that can deal with partial writes should call
1843 target_write. */
1844
1845 int
1846 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1847 {
1848 if (target_write (current_inferior ()->top_target (),
1849 TARGET_OBJECT_RAW_MEMORY, NULL,
1850 myaddr, memaddr, len) == len)
1851 return 0;
1852 else
1853 return -1;
1854 }
1855
1856 /* Fetch the target's memory map. */
1857
1858 std::vector<mem_region>
1859 target_memory_map (void)
1860 {
1861 target_ops *target = current_inferior ()->top_target ();
1862 std::vector<mem_region> result = target->memory_map ();
1863 if (result.empty ())
1864 return result;
1865
1866 std::sort (result.begin (), result.end ());
1867
1868 /* Check that regions do not overlap. Simultaneously assign
1869 a numbering for the "mem" commands to use to refer to
1870 each region. */
1871 mem_region *last_one = NULL;
1872 for (size_t ix = 0; ix < result.size (); ix++)
1873 {
1874 mem_region *this_one = &result[ix];
1875 this_one->number = ix;
1876
1877 if (last_one != NULL && last_one->hi > this_one->lo)
1878 {
1879 warning (_("Overlapping regions in memory map: ignoring"));
1880 return std::vector<mem_region> ();
1881 }
1882
1883 last_one = this_one;
1884 }
1885
1886 return result;
1887 }
1888
1889 void
1890 target_flash_erase (ULONGEST address, LONGEST length)
1891 {
1892 current_inferior ()->top_target ()->flash_erase (address, length);
1893 }
1894
1895 void
1896 target_flash_done (void)
1897 {
1898 current_inferior ()->top_target ()->flash_done ();
1899 }
1900
1901 static void
1902 show_trust_readonly (struct ui_file *file, int from_tty,
1903 struct cmd_list_element *c, const char *value)
1904 {
1905 gdb_printf (file,
1906 _("Mode for reading from readonly sections is %s.\n"),
1907 value);
1908 }
1909
1910 /* Target vector read/write partial wrapper functions. */
1911
1912 static enum target_xfer_status
1913 target_read_partial (struct target_ops *ops,
1914 enum target_object object,
1915 const char *annex, gdb_byte *buf,
1916 ULONGEST offset, ULONGEST len,
1917 ULONGEST *xfered_len)
1918 {
1919 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1920 xfered_len);
1921 }
1922
1923 static enum target_xfer_status
1924 target_write_partial (struct target_ops *ops,
1925 enum target_object object,
1926 const char *annex, const gdb_byte *buf,
1927 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1928 {
1929 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1930 xfered_len);
1931 }
1932
1933 /* Wrappers to perform the full transfer. */
1934
1935 /* For docs on target_read see target.h. */
1936
1937 LONGEST
1938 target_read (struct target_ops *ops,
1939 enum target_object object,
1940 const char *annex, gdb_byte *buf,
1941 ULONGEST offset, LONGEST len)
1942 {
1943 LONGEST xfered_total = 0;
1944 int unit_size = 1;
1945
1946 /* If we are reading from a memory object, find the length of an addressable
1947 unit for that architecture. */
1948 if (object == TARGET_OBJECT_MEMORY
1949 || object == TARGET_OBJECT_STACK_MEMORY
1950 || object == TARGET_OBJECT_CODE_MEMORY
1951 || object == TARGET_OBJECT_RAW_MEMORY)
1952 unit_size = gdbarch_addressable_memory_unit_size
1953 (current_inferior ()->arch ());
1954
1955 while (xfered_total < len)
1956 {
1957 ULONGEST xfered_partial;
1958 enum target_xfer_status status;
1959
1960 status = target_read_partial (ops, object, annex,
1961 buf + xfered_total * unit_size,
1962 offset + xfered_total, len - xfered_total,
1963 &xfered_partial);
1964
1965 /* Call an observer, notifying them of the xfer progress? */
1966 if (status == TARGET_XFER_EOF)
1967 return xfered_total;
1968 else if (status == TARGET_XFER_OK)
1969 {
1970 xfered_total += xfered_partial;
1971 QUIT;
1972 }
1973 else
1974 return TARGET_XFER_E_IO;
1975
1976 }
1977 return len;
1978 }
1979
1980 /* Assuming that the entire [begin, end) range of memory cannot be
1981 read, try to read whatever subrange is possible to read.
1982
1983 The function returns, in RESULT, either zero or one memory block.
1984 If there's a readable subrange at the beginning, it is completely
1985 read and returned. Any further readable subrange will not be read.
1986 Otherwise, if there's a readable subrange at the end, it will be
1987 completely read and returned. Any readable subranges before it
1988 (obviously, not starting at the beginning), will be ignored. In
1989 other cases -- either no readable subrange, or readable subrange(s)
1990 that is neither at the beginning, or end, nothing is returned.
1991
1992 The purpose of this function is to handle a read across a boundary
1993 of accessible memory in a case when memory map is not available.
1994 The above restrictions are fine for this case, but will give
1995 incorrect results if the memory is 'patchy'. However, supporting
1996 'patchy' memory would require trying to read every single byte,
1997 and it seems unacceptable solution. Explicit memory map is
1998 recommended for this case -- and target_read_memory_robust will
1999 take care of reading multiple ranges then. */
2000
2001 static void
2002 read_whatever_is_readable (struct target_ops *ops,
2003 const ULONGEST begin, const ULONGEST end,
2004 int unit_size,
2005 std::vector<memory_read_result> *result)
2006 {
2007 ULONGEST current_begin = begin;
2008 ULONGEST current_end = end;
2009 int forward;
2010 ULONGEST xfered_len;
2011
2012 /* If we previously failed to read 1 byte, nothing can be done here. */
2013 if (end - begin <= 1)
2014 return;
2015
2016 gdb::unique_xmalloc_ptr<gdb_byte> buf ((gdb_byte *) xmalloc (end - begin));
2017
2018 /* Check that either first or the last byte is readable, and give up
2019 if not. This heuristic is meant to permit reading accessible memory
2020 at the boundary of accessible region. */
2021 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2022 buf.get (), begin, 1, &xfered_len) == TARGET_XFER_OK)
2023 {
2024 forward = 1;
2025 ++current_begin;
2026 }
2027 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2028 buf.get () + (end - begin) - 1, end - 1, 1,
2029 &xfered_len) == TARGET_XFER_OK)
2030 {
2031 forward = 0;
2032 --current_end;
2033 }
2034 else
2035 return;
2036
2037 /* Loop invariant is that the [current_begin, current_end) was previously
2038 found to be not readable as a whole.
2039
2040 Note loop condition -- if the range has 1 byte, we can't divide the range
2041 so there's no point trying further. */
2042 while (current_end - current_begin > 1)
2043 {
2044 ULONGEST first_half_begin, first_half_end;
2045 ULONGEST second_half_begin, second_half_end;
2046 LONGEST xfer;
2047 ULONGEST middle = current_begin + (current_end - current_begin) / 2;
2048
2049 if (forward)
2050 {
2051 first_half_begin = current_begin;
2052 first_half_end = middle;
2053 second_half_begin = middle;
2054 second_half_end = current_end;
2055 }
2056 else
2057 {
2058 first_half_begin = middle;
2059 first_half_end = current_end;
2060 second_half_begin = current_begin;
2061 second_half_end = middle;
2062 }
2063
2064 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2065 buf.get () + (first_half_begin - begin) * unit_size,
2066 first_half_begin,
2067 first_half_end - first_half_begin);
2068
2069 if (xfer == first_half_end - first_half_begin)
2070 {
2071 /* This half reads up fine. So, the error must be in the
2072 other half. */
2073 current_begin = second_half_begin;
2074 current_end = second_half_end;
2075 }
2076 else
2077 {
2078 /* This half is not readable. Because we've tried one byte, we
2079 know some part of this half if actually readable. Go to the next
2080 iteration to divide again and try to read.
2081
2082 We don't handle the other half, because this function only tries
2083 to read a single readable subrange. */
2084 current_begin = first_half_begin;
2085 current_end = first_half_end;
2086 }
2087 }
2088
2089 if (forward)
2090 {
2091 /* The [begin, current_begin) range has been read. */
2092 result->emplace_back (begin, current_end, std::move (buf));
2093 }
2094 else
2095 {
2096 /* The [current_end, end) range has been read. */
2097 LONGEST region_len = end - current_end;
2098
2099 gdb::unique_xmalloc_ptr<gdb_byte> data
2100 ((gdb_byte *) xmalloc (region_len * unit_size));
2101 memcpy (data.get (), buf.get () + (current_end - begin) * unit_size,
2102 region_len * unit_size);
2103 result->emplace_back (current_end, end, std::move (data));
2104 }
2105 }
2106
2107 std::vector<memory_read_result>
2108 read_memory_robust (struct target_ops *ops,
2109 const ULONGEST offset, const LONGEST len)
2110 {
2111 std::vector<memory_read_result> result;
2112 int unit_size
2113 = gdbarch_addressable_memory_unit_size (current_inferior ()->arch ());
2114
2115 LONGEST xfered_total = 0;
2116 while (xfered_total < len)
2117 {
2118 struct mem_region *region = lookup_mem_region (offset + xfered_total);
2119 LONGEST region_len;
2120
2121 /* If there is no explicit region, a fake one should be created. */
2122 gdb_assert (region);
2123
2124 if (region->hi == 0)
2125 region_len = len - xfered_total;
2126 else
2127 region_len = region->hi - offset;
2128
2129 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2130 {
2131 /* Cannot read this region. Note that we can end up here only
2132 if the region is explicitly marked inaccessible, or
2133 'inaccessible-by-default' is in effect. */
2134 xfered_total += region_len;
2135 }
2136 else
2137 {
2138 LONGEST to_read = std::min (len - xfered_total, region_len);
2139 gdb::unique_xmalloc_ptr<gdb_byte> buffer
2140 ((gdb_byte *) xmalloc (to_read * unit_size));
2141
2142 LONGEST xfered_partial =
2143 target_read (ops, TARGET_OBJECT_MEMORY, NULL, buffer.get (),
2144 offset + xfered_total, to_read);
2145 /* Call an observer, notifying them of the xfer progress? */
2146 if (xfered_partial <= 0)
2147 {
2148 /* Got an error reading full chunk. See if maybe we can read
2149 some subrange. */
2150 read_whatever_is_readable (ops, offset + xfered_total,
2151 offset + xfered_total + to_read,
2152 unit_size, &result);
2153 xfered_total += to_read;
2154 }
2155 else
2156 {
2157 result.emplace_back (offset + xfered_total,
2158 offset + xfered_total + xfered_partial,
2159 std::move (buffer));
2160 xfered_total += xfered_partial;
2161 }
2162 QUIT;
2163 }
2164 }
2165
2166 return result;
2167 }
2168
2169
2170 /* An alternative to target_write with progress callbacks. */
2171
2172 LONGEST
2173 target_write_with_progress (struct target_ops *ops,
2174 enum target_object object,
2175 const char *annex, const gdb_byte *buf,
2176 ULONGEST offset, LONGEST len,
2177 void (*progress) (ULONGEST, void *), void *baton)
2178 {
2179 LONGEST xfered_total = 0;
2180 int unit_size = 1;
2181
2182 /* If we are writing to a memory object, find the length of an addressable
2183 unit for that architecture. */
2184 if (object == TARGET_OBJECT_MEMORY
2185 || object == TARGET_OBJECT_STACK_MEMORY
2186 || object == TARGET_OBJECT_CODE_MEMORY
2187 || object == TARGET_OBJECT_RAW_MEMORY)
2188 unit_size = gdbarch_addressable_memory_unit_size
2189 (current_inferior ()->arch ());
2190
2191 /* Give the progress callback a chance to set up. */
2192 if (progress)
2193 (*progress) (0, baton);
2194
2195 while (xfered_total < len)
2196 {
2197 ULONGEST xfered_partial;
2198 enum target_xfer_status status;
2199
2200 status = target_write_partial (ops, object, annex,
2201 buf + xfered_total * unit_size,
2202 offset + xfered_total, len - xfered_total,
2203 &xfered_partial);
2204
2205 if (status != TARGET_XFER_OK)
2206 return status == TARGET_XFER_EOF ? xfered_total : TARGET_XFER_E_IO;
2207
2208 if (progress)
2209 (*progress) (xfered_partial, baton);
2210
2211 xfered_total += xfered_partial;
2212 QUIT;
2213 }
2214 return len;
2215 }
2216
2217 /* For docs on target_write see target.h. */
2218
2219 LONGEST
2220 target_write (struct target_ops *ops,
2221 enum target_object object,
2222 const char *annex, const gdb_byte *buf,
2223 ULONGEST offset, LONGEST len)
2224 {
2225 return target_write_with_progress (ops, object, annex, buf, offset, len,
2226 NULL, NULL);
2227 }
2228
2229 /* Help for target_read_alloc and target_read_stralloc. See their comments
2230 for details. */
2231
2232 template <typename T>
2233 gdb::optional<gdb::def_vector<T>>
2234 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2235 const char *annex)
2236 {
2237 gdb::def_vector<T> buf;
2238 size_t buf_pos = 0;
2239 const int chunk = 4096;
2240
2241 /* This function does not have a length parameter; it reads the
2242 entire OBJECT). Also, it doesn't support objects fetched partly
2243 from one target and partly from another (in a different stratum,
2244 e.g. a core file and an executable). Both reasons make it
2245 unsuitable for reading memory. */
2246 gdb_assert (object != TARGET_OBJECT_MEMORY);
2247
2248 /* Start by reading up to 4K at a time. The target will throttle
2249 this number down if necessary. */
2250 while (1)
2251 {
2252 ULONGEST xfered_len;
2253 enum target_xfer_status status;
2254
2255 buf.resize (buf_pos + chunk);
2256
2257 status = target_read_partial (ops, object, annex,
2258 (gdb_byte *) &buf[buf_pos],
2259 buf_pos, chunk,
2260 &xfered_len);
2261
2262 if (status == TARGET_XFER_EOF)
2263 {
2264 /* Read all there was. */
2265 buf.resize (buf_pos);
2266 return buf;
2267 }
2268 else if (status != TARGET_XFER_OK)
2269 {
2270 /* An error occurred. */
2271 return {};
2272 }
2273
2274 buf_pos += xfered_len;
2275
2276 QUIT;
2277 }
2278 }
2279
2280 /* See target.h */
2281
2282 gdb::optional<gdb::byte_vector>
2283 target_read_alloc (struct target_ops *ops, enum target_object object,
2284 const char *annex)
2285 {
2286 return target_read_alloc_1<gdb_byte> (ops, object, annex);
2287 }
2288
2289 /* See target.h. */
2290
2291 gdb::optional<gdb::char_vector>
2292 target_read_stralloc (struct target_ops *ops, enum target_object object,
2293 const char *annex)
2294 {
2295 gdb::optional<gdb::char_vector> buf
2296 = target_read_alloc_1<char> (ops, object, annex);
2297
2298 if (!buf)
2299 return {};
2300
2301 if (buf->empty () || buf->back () != '\0')
2302 buf->push_back ('\0');
2303
2304 /* Check for embedded NUL bytes; but allow trailing NULs. */
2305 for (auto it = std::find (buf->begin (), buf->end (), '\0');
2306 it != buf->end (); it++)
2307 if (*it != '\0')
2308 {
2309 warning (_("target object %d, annex %s, "
2310 "contained unexpected null characters"),
2311 (int) object, annex ? annex : "(none)");
2312 break;
2313 }
2314
2315 return buf;
2316 }
2317
2318 /* Memory transfer methods. */
2319
2320 void
2321 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2322 LONGEST len)
2323 {
2324 /* This method is used to read from an alternate, non-current
2325 target. This read must bypass the overlay support (as symbols
2326 don't match this target), and GDB's internal cache (wrong cache
2327 for this target). */
2328 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2329 != len)
2330 memory_error (TARGET_XFER_E_IO, addr);
2331 }
2332
2333 ULONGEST
2334 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2335 int len, enum bfd_endian byte_order)
2336 {
2337 gdb_byte buf[sizeof (ULONGEST)];
2338
2339 gdb_assert (len <= sizeof (buf));
2340 get_target_memory (ops, addr, buf, len);
2341 return extract_unsigned_integer (buf, len, byte_order);
2342 }
2343
2344 /* See target.h. */
2345
2346 int
2347 target_insert_breakpoint (struct gdbarch *gdbarch,
2348 struct bp_target_info *bp_tgt)
2349 {
2350 if (!may_insert_breakpoints)
2351 {
2352 warning (_("May not insert breakpoints"));
2353 return 1;
2354 }
2355
2356 target_ops *target = current_inferior ()->top_target ();
2357
2358 return target->insert_breakpoint (gdbarch, bp_tgt);
2359 }
2360
2361 /* See target.h. */
2362
2363 int
2364 target_remove_breakpoint (struct gdbarch *gdbarch,
2365 struct bp_target_info *bp_tgt,
2366 enum remove_bp_reason reason)
2367 {
2368 /* This is kind of a weird case to handle, but the permission might
2369 have been changed after breakpoints were inserted - in which case
2370 we should just take the user literally and assume that any
2371 breakpoints should be left in place. */
2372 if (!may_insert_breakpoints)
2373 {
2374 warning (_("May not remove breakpoints"));
2375 return 1;
2376 }
2377
2378 target_ops *target = current_inferior ()->top_target ();
2379
2380 return target->remove_breakpoint (gdbarch, bp_tgt, reason);
2381 }
2382
2383 static void
2384 info_target_command (const char *args, int from_tty)
2385 {
2386 int has_all_mem = 0;
2387
2388 if (current_program_space->symfile_object_file != NULL)
2389 {
2390 objfile *objf = current_program_space->symfile_object_file;
2391 gdb_printf (_("Symbols from \"%s\".\n"),
2392 objfile_name (objf));
2393 }
2394
2395 for (target_ops *t = current_inferior ()->top_target ();
2396 t != NULL;
2397 t = t->beneath ())
2398 {
2399 if (!t->has_memory ())
2400 continue;
2401
2402 if ((int) (t->stratum ()) <= (int) dummy_stratum)
2403 continue;
2404 if (has_all_mem)
2405 gdb_printf (_("\tWhile running this, "
2406 "GDB does not access memory from...\n"));
2407 gdb_printf ("%s:\n", t->longname ());
2408 t->files_info ();
2409 has_all_mem = t->has_all_memory ();
2410 }
2411 }
2412
2413 /* This function is called before any new inferior is created, e.g.
2414 by running a program, attaching, or connecting to a target.
2415 It cleans up any state from previous invocations which might
2416 change between runs. This is a subset of what target_preopen
2417 resets (things which might change between targets). */
2418
2419 void
2420 target_pre_inferior (int from_tty)
2421 {
2422 /* Clear out solib state. Otherwise the solib state of the previous
2423 inferior might have survived and is entirely wrong for the new
2424 target. This has been observed on GNU/Linux using glibc 2.3. How
2425 to reproduce:
2426
2427 bash$ ./foo&
2428 [1] 4711
2429 bash$ ./foo&
2430 [1] 4712
2431 bash$ gdb ./foo
2432 [...]
2433 (gdb) attach 4711
2434 (gdb) detach
2435 (gdb) attach 4712
2436 Cannot access memory at address 0xdeadbeef
2437 */
2438
2439 /* In some OSs, the shared library list is the same/global/shared
2440 across inferiors. If code is shared between processes, so are
2441 memory regions and features. */
2442 if (!gdbarch_has_global_solist (current_inferior ()->arch ()))
2443 {
2444 no_shared_libraries (NULL, from_tty);
2445
2446 invalidate_target_mem_regions ();
2447
2448 target_clear_description ();
2449 }
2450
2451 /* attach_flag may be set if the previous process associated with
2452 the inferior was attached to. */
2453 current_inferior ()->attach_flag = false;
2454
2455 current_inferior ()->highest_thread_num = 0;
2456
2457 update_previous_thread ();
2458
2459 agent_capability_invalidate ();
2460 }
2461
2462 /* This is to be called by the open routine before it does
2463 anything. */
2464
2465 void
2466 target_preopen (int from_tty)
2467 {
2468 dont_repeat ();
2469
2470 if (current_inferior ()->pid != 0)
2471 {
2472 if (!from_tty
2473 || !target_has_execution ()
2474 || query (_("A program is being debugged already. Kill it? ")))
2475 {
2476 /* Core inferiors actually should be detached, not
2477 killed. */
2478 if (target_has_execution ())
2479 target_kill ();
2480 else
2481 target_detach (current_inferior (), 0);
2482 }
2483 else
2484 error (_("Program not killed."));
2485 }
2486
2487 /* Release reference to old previous thread. */
2488 update_previous_thread ();
2489
2490 /* Calling target_kill may remove the target from the stack. But if
2491 it doesn't (which seems like a win for UDI), remove it now. */
2492 /* Leave the exec target, though. The user may be switching from a
2493 live process to a core of the same program. */
2494 current_inferior ()->pop_all_targets_above (file_stratum);
2495
2496 target_pre_inferior (from_tty);
2497 }
2498
2499 /* See target.h. */
2500
2501 void
2502 target_detach (inferior *inf, int from_tty)
2503 {
2504 /* Thread's don't need to be resumed until the end of this function. */
2505 scoped_disable_commit_resumed disable_commit_resumed ("detaching");
2506
2507 /* After we have detached, we will clear the register cache for this inferior
2508 by calling registers_changed_ptid. We must save the pid_ptid before
2509 detaching, as the target detach method will clear inf->pid. */
2510 ptid_t save_pid_ptid = ptid_t (inf->pid);
2511
2512 /* As long as some to_detach implementations rely on the current_inferior
2513 (either directly, or indirectly, like through reading memory), INF needs
2514 to be the current inferior. When that requirement will become no longer
2515 true, then we can remove this assertion. */
2516 gdb_assert (inf == current_inferior ());
2517
2518 prepare_for_detach ();
2519
2520 gdb::observers::inferior_pre_detach.notify (inf);
2521
2522 /* Hold a strong reference because detaching may unpush the
2523 target. */
2524 auto proc_target_ref = target_ops_ref::new_reference (inf->process_target ());
2525
2526 current_inferior ()->top_target ()->detach (inf, from_tty);
2527
2528 process_stratum_target *proc_target
2529 = as_process_stratum_target (proc_target_ref.get ());
2530
2531 registers_changed_ptid (proc_target, save_pid_ptid);
2532
2533 /* We have to ensure we have no frame cache left. Normally,
2534 registers_changed_ptid (save_pid_ptid) calls reinit_frame_cache when
2535 inferior_ptid matches save_pid_ptid, but in our case, it does not
2536 call it, as inferior_ptid has been reset. */
2537 reinit_frame_cache ();
2538
2539 disable_commit_resumed.reset_and_commit ();
2540 }
2541
2542 void
2543 target_disconnect (const char *args, int from_tty)
2544 {
2545 /* If we're in breakpoints-always-inserted mode or if breakpoints
2546 are global across processes, we have to remove them before
2547 disconnecting. */
2548 remove_breakpoints ();
2549
2550 current_inferior ()->top_target ()->disconnect (args, from_tty);
2551 }
2552
2553 /* See target/target.h. */
2554
2555 ptid_t
2556 target_wait (ptid_t ptid, struct target_waitstatus *status,
2557 target_wait_flags options)
2558 {
2559 target_ops *target = current_inferior ()->top_target ();
2560 process_stratum_target *proc_target = current_inferior ()->process_target ();
2561
2562 gdb_assert (!proc_target->commit_resumed_state);
2563
2564 if (!target_can_async_p (target))
2565 gdb_assert ((options & TARGET_WNOHANG) == 0);
2566
2567 try
2568 {
2569 gdb::observers::target_pre_wait.notify (ptid);
2570 ptid_t event_ptid = target->wait (ptid, status, options);
2571 gdb::observers::target_post_wait.notify (event_ptid);
2572 return event_ptid;
2573 }
2574 catch (...)
2575 {
2576 gdb::observers::target_post_wait.notify (null_ptid);
2577 throw;
2578 }
2579 }
2580
2581 /* See target.h. */
2582
2583 ptid_t
2584 default_target_wait (struct target_ops *ops,
2585 ptid_t ptid, struct target_waitstatus *status,
2586 target_wait_flags options)
2587 {
2588 status->set_ignore ();
2589 return minus_one_ptid;
2590 }
2591
2592 std::string
2593 target_pid_to_str (ptid_t ptid)
2594 {
2595 return current_inferior ()->top_target ()->pid_to_str (ptid);
2596 }
2597
2598 const char *
2599 target_thread_name (struct thread_info *info)
2600 {
2601 gdb_assert (info->inf == current_inferior ());
2602
2603 return current_inferior ()->top_target ()->thread_name (info);
2604 }
2605
2606 struct thread_info *
2607 target_thread_handle_to_thread_info (const gdb_byte *thread_handle,
2608 int handle_len,
2609 struct inferior *inf)
2610 {
2611 target_ops *target = current_inferior ()->top_target ();
2612
2613 return target->thread_handle_to_thread_info (thread_handle, handle_len, inf);
2614 }
2615
2616 /* See target.h. */
2617
2618 gdb::array_view<const gdb_byte>
2619 target_thread_info_to_thread_handle (struct thread_info *tip)
2620 {
2621 target_ops *target = current_inferior ()->top_target ();
2622
2623 return target->thread_info_to_thread_handle (tip);
2624 }
2625
2626 void
2627 target_resume (ptid_t scope_ptid, int step, enum gdb_signal signal)
2628 {
2629 process_stratum_target *curr_target = current_inferior ()->process_target ();
2630 gdb_assert (!curr_target->commit_resumed_state);
2631
2632 gdb_assert (inferior_ptid != null_ptid);
2633 gdb_assert (inferior_ptid.matches (scope_ptid));
2634
2635 target_dcache_invalidate ();
2636
2637 current_inferior ()->top_target ()->resume (scope_ptid, step, signal);
2638
2639 registers_changed_ptid (curr_target, scope_ptid);
2640 /* We only set the internal executing state here. The user/frontend
2641 running state is set at a higher level. This also clears the
2642 thread's stop_pc as side effect. */
2643 set_executing (curr_target, scope_ptid, true);
2644 clear_inline_frame_state (curr_target, scope_ptid);
2645
2646 if (target_can_async_p ())
2647 target_async (true);
2648 }
2649
2650 /* See target.h. */
2651
2652 void
2653 target_commit_resumed ()
2654 {
2655 gdb_assert (current_inferior ()->process_target ()->commit_resumed_state);
2656 current_inferior ()->top_target ()->commit_resumed ();
2657 }
2658
2659 /* See target.h. */
2660
2661 bool
2662 target_has_pending_events ()
2663 {
2664 return current_inferior ()->top_target ()->has_pending_events ();
2665 }
2666
2667 void
2668 target_pass_signals (gdb::array_view<const unsigned char> pass_signals)
2669 {
2670 current_inferior ()->top_target ()->pass_signals (pass_signals);
2671 }
2672
2673 void
2674 target_program_signals (gdb::array_view<const unsigned char> program_signals)
2675 {
2676 current_inferior ()->top_target ()->program_signals (program_signals);
2677 }
2678
2679 static void
2680 default_follow_fork (struct target_ops *self, inferior *child_inf,
2681 ptid_t child_ptid, target_waitkind fork_kind,
2682 bool follow_child, bool detach_fork)
2683 {
2684 /* Some target returned a fork event, but did not know how to follow it. */
2685 internal_error (_("could not find a target to follow fork"));
2686 }
2687
2688 /* See target.h. */
2689
2690 void
2691 target_follow_fork (inferior *child_inf, ptid_t child_ptid,
2692 target_waitkind fork_kind, bool follow_child,
2693 bool detach_fork)
2694 {
2695 target_ops *target = current_inferior ()->top_target ();
2696
2697 /* Check consistency between CHILD_INF, CHILD_PTID, FOLLOW_CHILD and
2698 DETACH_FORK. */
2699 if (child_inf != nullptr)
2700 {
2701 gdb_assert (follow_child || !detach_fork);
2702 gdb_assert (child_inf->pid == child_ptid.pid ());
2703 }
2704 else
2705 gdb_assert (!follow_child && detach_fork);
2706
2707 return target->follow_fork (child_inf, child_ptid, fork_kind, follow_child,
2708 detach_fork);
2709 }
2710
2711 /* See target.h. */
2712
2713 void
2714 target_follow_exec (inferior *follow_inf, ptid_t ptid,
2715 const char *execd_pathname)
2716 {
2717 current_inferior ()->top_target ()->follow_exec (follow_inf, ptid,
2718 execd_pathname);
2719 }
2720
2721 static void
2722 default_mourn_inferior (struct target_ops *self)
2723 {
2724 internal_error (_("could not find a target to follow mourn inferior"));
2725 }
2726
2727 void
2728 target_mourn_inferior (ptid_t ptid)
2729 {
2730 gdb_assert (ptid.pid () == inferior_ptid.pid ());
2731 current_inferior ()->top_target ()->mourn_inferior ();
2732
2733 /* We no longer need to keep handles on any of the object files.
2734 Make sure to release them to avoid unnecessarily locking any
2735 of them while we're not actually debugging. */
2736 bfd_cache_close_all ();
2737 }
2738
2739 /* Look for a target which can describe architectural features, starting
2740 from TARGET. If we find one, return its description. */
2741
2742 const struct target_desc *
2743 target_read_description (struct target_ops *target)
2744 {
2745 return target->read_description ();
2746 }
2747
2748
2749 /* Default implementation of memory-searching. */
2750
2751 static int
2752 default_search_memory (struct target_ops *self,
2753 CORE_ADDR start_addr, ULONGEST search_space_len,
2754 const gdb_byte *pattern, ULONGEST pattern_len,
2755 CORE_ADDR *found_addrp)
2756 {
2757 auto read_memory = [=] (CORE_ADDR addr, gdb_byte *result, size_t len)
2758 {
2759 return target_read (current_inferior ()->top_target (),
2760 TARGET_OBJECT_MEMORY, NULL,
2761 result, addr, len) == len;
2762 };
2763
2764 /* Start over from the top of the target stack. */
2765 return simple_search_memory (read_memory, start_addr, search_space_len,
2766 pattern, pattern_len, found_addrp);
2767 }
2768
2769 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2770 sequence of bytes in PATTERN with length PATTERN_LEN.
2771
2772 The result is 1 if found, 0 if not found, and -1 if there was an error
2773 requiring halting of the search (e.g. memory read error).
2774 If the pattern is found the address is recorded in FOUND_ADDRP. */
2775
2776 int
2777 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2778 const gdb_byte *pattern, ULONGEST pattern_len,
2779 CORE_ADDR *found_addrp)
2780 {
2781 target_ops *target = current_inferior ()->top_target ();
2782
2783 return target->search_memory (start_addr, search_space_len, pattern,
2784 pattern_len, found_addrp);
2785 }
2786
2787 /* Look through the currently pushed targets. If none of them will
2788 be able to restart the currently running process, issue an error
2789 message. */
2790
2791 void
2792 target_require_runnable (void)
2793 {
2794 for (target_ops *t = current_inferior ()->top_target ();
2795 t != NULL;
2796 t = t->beneath ())
2797 {
2798 /* If this target knows how to create a new program, then
2799 assume we will still be able to after killing the current
2800 one. Either killing and mourning will not pop T, or else
2801 find_default_run_target will find it again. */
2802 if (t->can_create_inferior ())
2803 return;
2804
2805 /* Do not worry about targets at certain strata that can not
2806 create inferiors. Assume they will be pushed again if
2807 necessary, and continue to the process_stratum. */
2808 if (t->stratum () > process_stratum)
2809 continue;
2810
2811 error (_("The \"%s\" target does not support \"run\". "
2812 "Try \"help target\" or \"continue\"."),
2813 t->shortname ());
2814 }
2815
2816 /* This function is only called if the target is running. In that
2817 case there should have been a process_stratum target and it
2818 should either know how to create inferiors, or not... */
2819 internal_error (_("No targets found"));
2820 }
2821
2822 /* Whether GDB is allowed to fall back to the default run target for
2823 "run", "attach", etc. when no target is connected yet. */
2824 static bool auto_connect_native_target = true;
2825
2826 static void
2827 show_auto_connect_native_target (struct ui_file *file, int from_tty,
2828 struct cmd_list_element *c, const char *value)
2829 {
2830 gdb_printf (file,
2831 _("Whether GDB may automatically connect to the "
2832 "native target is %s.\n"),
2833 value);
2834 }
2835
2836 /* A pointer to the target that can respond to "run" or "attach".
2837 Native targets are always singletons and instantiated early at GDB
2838 startup. */
2839 static target_ops *the_native_target;
2840
2841 /* See target.h. */
2842
2843 void
2844 set_native_target (target_ops *target)
2845 {
2846 if (the_native_target != NULL)
2847 internal_error (_("native target already set (\"%s\")."),
2848 the_native_target->longname ());
2849
2850 the_native_target = target;
2851 }
2852
2853 /* See target.h. */
2854
2855 target_ops *
2856 get_native_target ()
2857 {
2858 return the_native_target;
2859 }
2860
2861 /* Look through the list of possible targets for a target that can
2862 execute a run or attach command without any other data. This is
2863 used to locate the default process stratum.
2864
2865 If DO_MESG is not NULL, the result is always valid (error() is
2866 called for errors); else, return NULL on error. */
2867
2868 static struct target_ops *
2869 find_default_run_target (const char *do_mesg)
2870 {
2871 if (auto_connect_native_target && the_native_target != NULL)
2872 return the_native_target;
2873
2874 if (do_mesg != NULL)
2875 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2876 return NULL;
2877 }
2878
2879 /* See target.h. */
2880
2881 struct target_ops *
2882 find_attach_target (void)
2883 {
2884 /* If a target on the current stack can attach, use it. */
2885 for (target_ops *t = current_inferior ()->top_target ();
2886 t != NULL;
2887 t = t->beneath ())
2888 {
2889 if (t->can_attach ())
2890 return t;
2891 }
2892
2893 /* Otherwise, use the default run target for attaching. */
2894 return find_default_run_target ("attach");
2895 }
2896
2897 /* See target.h. */
2898
2899 struct target_ops *
2900 find_run_target (void)
2901 {
2902 /* If a target on the current stack can run, use it. */
2903 for (target_ops *t = current_inferior ()->top_target ();
2904 t != NULL;
2905 t = t->beneath ())
2906 {
2907 if (t->can_create_inferior ())
2908 return t;
2909 }
2910
2911 /* Otherwise, use the default run target. */
2912 return find_default_run_target ("run");
2913 }
2914
2915 bool
2916 target_ops::info_proc (const char *args, enum info_proc_what what)
2917 {
2918 return false;
2919 }
2920
2921 /* Implement the "info proc" command. */
2922
2923 int
2924 target_info_proc (const char *args, enum info_proc_what what)
2925 {
2926 struct target_ops *t;
2927
2928 /* If we're already connected to something that can get us OS
2929 related data, use it. Otherwise, try using the native
2930 target. */
2931 t = find_target_at (process_stratum);
2932 if (t == NULL)
2933 t = find_default_run_target (NULL);
2934
2935 for (; t != NULL; t = t->beneath ())
2936 {
2937 if (t->info_proc (args, what))
2938 {
2939 if (targetdebug)
2940 gdb_printf (gdb_stdlog,
2941 "target_info_proc (\"%s\", %d)\n", args, what);
2942
2943 return 1;
2944 }
2945 }
2946
2947 return 0;
2948 }
2949
2950 static int
2951 find_default_supports_disable_randomization (struct target_ops *self)
2952 {
2953 struct target_ops *t;
2954
2955 t = find_default_run_target (NULL);
2956 if (t != NULL)
2957 return t->supports_disable_randomization ();
2958 return 0;
2959 }
2960
2961 int
2962 target_supports_disable_randomization (void)
2963 {
2964 return current_inferior ()->top_target ()->supports_disable_randomization ();
2965 }
2966
2967 /* See target/target.h. */
2968
2969 int
2970 target_supports_multi_process (void)
2971 {
2972 return current_inferior ()->top_target ()->supports_multi_process ();
2973 }
2974
2975 /* See target.h. */
2976
2977 gdb::optional<gdb::char_vector>
2978 target_get_osdata (const char *type)
2979 {
2980 struct target_ops *t;
2981
2982 /* If we're already connected to something that can get us OS
2983 related data, use it. Otherwise, try using the native
2984 target. */
2985 t = find_target_at (process_stratum);
2986 if (t == NULL)
2987 t = find_default_run_target ("get OS data");
2988
2989 if (!t)
2990 return {};
2991
2992 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
2993 }
2994
2995 /* Determine the current address space of thread PTID. */
2996
2997 struct address_space *
2998 target_thread_address_space (ptid_t ptid)
2999 {
3000 struct address_space *aspace;
3001
3002 aspace = current_inferior ()->top_target ()->thread_address_space (ptid);
3003 gdb_assert (aspace != NULL);
3004
3005 return aspace;
3006 }
3007
3008 /* See target.h. */
3009
3010 target_ops *
3011 target_ops::beneath () const
3012 {
3013 return current_inferior ()->find_target_beneath (this);
3014 }
3015
3016 void
3017 target_ops::close ()
3018 {
3019 }
3020
3021 bool
3022 target_ops::can_attach ()
3023 {
3024 return 0;
3025 }
3026
3027 void
3028 target_ops::attach (const char *, int)
3029 {
3030 gdb_assert_not_reached ("target_ops::attach called");
3031 }
3032
3033 bool
3034 target_ops::can_create_inferior ()
3035 {
3036 return 0;
3037 }
3038
3039 void
3040 target_ops::create_inferior (const char *, const std::string &,
3041 char **, int)
3042 {
3043 gdb_assert_not_reached ("target_ops::create_inferior called");
3044 }
3045
3046 bool
3047 target_ops::can_run ()
3048 {
3049 return false;
3050 }
3051
3052 int
3053 target_can_run ()
3054 {
3055 for (target_ops *t = current_inferior ()->top_target ();
3056 t != NULL;
3057 t = t->beneath ())
3058 {
3059 if (t->can_run ())
3060 return 1;
3061 }
3062
3063 return 0;
3064 }
3065
3066 /* Target file operations. */
3067
3068 static struct target_ops *
3069 default_fileio_target (void)
3070 {
3071 struct target_ops *t;
3072
3073 /* If we're already connected to something that can perform
3074 file I/O, use it. Otherwise, try using the native target. */
3075 t = find_target_at (process_stratum);
3076 if (t != NULL)
3077 return t;
3078 return find_default_run_target ("file I/O");
3079 }
3080
3081 /* File handle for target file operations. */
3082
3083 struct fileio_fh_t
3084 {
3085 /* The target on which this file is open. NULL if the target is
3086 meanwhile closed while the handle is open. */
3087 target_ops *target;
3088
3089 /* The file descriptor on the target. */
3090 int target_fd;
3091
3092 /* Check whether this fileio_fh_t represents a closed file. */
3093 bool is_closed ()
3094 {
3095 return target_fd < 0;
3096 }
3097 };
3098
3099 /* Vector of currently open file handles. The value returned by
3100 target_fileio_open and passed as the FD argument to other
3101 target_fileio_* functions is an index into this vector. This
3102 vector's entries are never freed; instead, files are marked as
3103 closed, and the handle becomes available for reuse. */
3104 static std::vector<fileio_fh_t> fileio_fhandles;
3105
3106 /* Index into fileio_fhandles of the lowest handle that might be
3107 closed. This permits handle reuse without searching the whole
3108 list each time a new file is opened. */
3109 static int lowest_closed_fd;
3110
3111 /* See target.h. */
3112
3113 void
3114 fileio_handles_invalidate_target (target_ops *targ)
3115 {
3116 for (fileio_fh_t &fh : fileio_fhandles)
3117 if (fh.target == targ)
3118 fh.target = NULL;
3119 }
3120
3121 /* Acquire a target fileio file descriptor. */
3122
3123 static int
3124 acquire_fileio_fd (target_ops *target, int target_fd)
3125 {
3126 /* Search for closed handles to reuse. */
3127 for (; lowest_closed_fd < fileio_fhandles.size (); lowest_closed_fd++)
3128 {
3129 fileio_fh_t &fh = fileio_fhandles[lowest_closed_fd];
3130
3131 if (fh.is_closed ())
3132 break;
3133 }
3134
3135 /* Push a new handle if no closed handles were found. */
3136 if (lowest_closed_fd == fileio_fhandles.size ())
3137 fileio_fhandles.push_back (fileio_fh_t {target, target_fd});
3138 else
3139 fileio_fhandles[lowest_closed_fd] = {target, target_fd};
3140
3141 /* Should no longer be marked closed. */
3142 gdb_assert (!fileio_fhandles[lowest_closed_fd].is_closed ());
3143
3144 /* Return its index, and start the next lookup at
3145 the next index. */
3146 return lowest_closed_fd++;
3147 }
3148
3149 /* Release a target fileio file descriptor. */
3150
3151 static void
3152 release_fileio_fd (int fd, fileio_fh_t *fh)
3153 {
3154 fh->target_fd = -1;
3155 lowest_closed_fd = std::min (lowest_closed_fd, fd);
3156 }
3157
3158 /* Return a pointer to the fileio_fhandle_t corresponding to FD. */
3159
3160 static fileio_fh_t *
3161 fileio_fd_to_fh (int fd)
3162 {
3163 return &fileio_fhandles[fd];
3164 }
3165
3166
3167 /* Default implementations of file i/o methods. We don't want these
3168 to delegate automatically, because we need to know which target
3169 supported the method, in order to call it directly from within
3170 pread/pwrite, etc. */
3171
3172 int
3173 target_ops::fileio_open (struct inferior *inf, const char *filename,
3174 int flags, int mode, int warn_if_slow,
3175 fileio_error *target_errno)
3176 {
3177 *target_errno = FILEIO_ENOSYS;
3178 return -1;
3179 }
3180
3181 int
3182 target_ops::fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3183 ULONGEST offset, fileio_error *target_errno)
3184 {
3185 *target_errno = FILEIO_ENOSYS;
3186 return -1;
3187 }
3188
3189 int
3190 target_ops::fileio_pread (int fd, gdb_byte *read_buf, int len,
3191 ULONGEST offset, fileio_error *target_errno)
3192 {
3193 *target_errno = FILEIO_ENOSYS;
3194 return -1;
3195 }
3196
3197 int
3198 target_ops::fileio_fstat (int fd, struct stat *sb, fileio_error *target_errno)
3199 {
3200 *target_errno = FILEIO_ENOSYS;
3201 return -1;
3202 }
3203
3204 int
3205 target_ops::fileio_close (int fd, fileio_error *target_errno)
3206 {
3207 *target_errno = FILEIO_ENOSYS;
3208 return -1;
3209 }
3210
3211 int
3212 target_ops::fileio_unlink (struct inferior *inf, const char *filename,
3213 fileio_error *target_errno)
3214 {
3215 *target_errno = FILEIO_ENOSYS;
3216 return -1;
3217 }
3218
3219 gdb::optional<std::string>
3220 target_ops::fileio_readlink (struct inferior *inf, const char *filename,
3221 fileio_error *target_errno)
3222 {
3223 *target_errno = FILEIO_ENOSYS;
3224 return {};
3225 }
3226
3227 /* See target.h. */
3228
3229 int
3230 target_fileio_open (struct inferior *inf, const char *filename,
3231 int flags, int mode, bool warn_if_slow, fileio_error *target_errno)
3232 {
3233 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3234 {
3235 int fd = t->fileio_open (inf, filename, flags, mode,
3236 warn_if_slow, target_errno);
3237
3238 if (fd == -1 && *target_errno == FILEIO_ENOSYS)
3239 continue;
3240
3241 if (fd < 0)
3242 fd = -1;
3243 else
3244 fd = acquire_fileio_fd (t, fd);
3245
3246 if (targetdebug)
3247 gdb_printf (gdb_stdlog,
3248 "target_fileio_open (%d,%s,0x%x,0%o,%d)"
3249 " = %d (%d)\n",
3250 inf == NULL ? 0 : inf->num,
3251 filename, flags, mode,
3252 warn_if_slow, fd,
3253 fd != -1 ? 0 : *target_errno);
3254 return fd;
3255 }
3256
3257 *target_errno = FILEIO_ENOSYS;
3258 return -1;
3259 }
3260
3261 /* See target.h. */
3262
3263 int
3264 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3265 ULONGEST offset, fileio_error *target_errno)
3266 {
3267 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3268 int ret = -1;
3269
3270 if (fh->is_closed ())
3271 *target_errno = FILEIO_EBADF;
3272 else if (fh->target == NULL)
3273 *target_errno = FILEIO_EIO;
3274 else
3275 ret = fh->target->fileio_pwrite (fh->target_fd, write_buf,
3276 len, offset, target_errno);
3277
3278 if (targetdebug)
3279 gdb_printf (gdb_stdlog,
3280 "target_fileio_pwrite (%d,...,%d,%s) "
3281 "= %d (%d)\n",
3282 fd, len, pulongest (offset),
3283 ret, ret != -1 ? 0 : *target_errno);
3284 return ret;
3285 }
3286
3287 /* See target.h. */
3288
3289 int
3290 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3291 ULONGEST offset, fileio_error *target_errno)
3292 {
3293 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3294 int ret = -1;
3295
3296 if (fh->is_closed ())
3297 *target_errno = FILEIO_EBADF;
3298 else if (fh->target == NULL)
3299 *target_errno = FILEIO_EIO;
3300 else
3301 ret = fh->target->fileio_pread (fh->target_fd, read_buf,
3302 len, offset, target_errno);
3303
3304 if (targetdebug)
3305 gdb_printf (gdb_stdlog,
3306 "target_fileio_pread (%d,...,%d,%s) "
3307 "= %d (%d)\n",
3308 fd, len, pulongest (offset),
3309 ret, ret != -1 ? 0 : *target_errno);
3310 return ret;
3311 }
3312
3313 /* See target.h. */
3314
3315 int
3316 target_fileio_fstat (int fd, struct stat *sb, fileio_error *target_errno)
3317 {
3318 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3319 int ret = -1;
3320
3321 if (fh->is_closed ())
3322 *target_errno = FILEIO_EBADF;
3323 else if (fh->target == NULL)
3324 *target_errno = FILEIO_EIO;
3325 else
3326 ret = fh->target->fileio_fstat (fh->target_fd, sb, target_errno);
3327
3328 if (targetdebug)
3329 gdb_printf (gdb_stdlog,
3330 "target_fileio_fstat (%d) = %d (%d)\n",
3331 fd, ret, ret != -1 ? 0 : *target_errno);
3332 return ret;
3333 }
3334
3335 /* See target.h. */
3336
3337 int
3338 target_fileio_close (int fd, fileio_error *target_errno)
3339 {
3340 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3341 int ret = -1;
3342
3343 if (fh->is_closed ())
3344 *target_errno = FILEIO_EBADF;
3345 else
3346 {
3347 if (fh->target != NULL)
3348 ret = fh->target->fileio_close (fh->target_fd,
3349 target_errno);
3350 else
3351 ret = 0;
3352 release_fileio_fd (fd, fh);
3353 }
3354
3355 if (targetdebug)
3356 gdb_printf (gdb_stdlog,
3357 "target_fileio_close (%d) = %d (%d)\n",
3358 fd, ret, ret != -1 ? 0 : *target_errno);
3359 return ret;
3360 }
3361
3362 /* See target.h. */
3363
3364 int
3365 target_fileio_unlink (struct inferior *inf, const char *filename,
3366 fileio_error *target_errno)
3367 {
3368 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3369 {
3370 int ret = t->fileio_unlink (inf, filename, target_errno);
3371
3372 if (ret == -1 && *target_errno == FILEIO_ENOSYS)
3373 continue;
3374
3375 if (targetdebug)
3376 gdb_printf (gdb_stdlog,
3377 "target_fileio_unlink (%d,%s)"
3378 " = %d (%d)\n",
3379 inf == NULL ? 0 : inf->num, filename,
3380 ret, ret != -1 ? 0 : *target_errno);
3381 return ret;
3382 }
3383
3384 *target_errno = FILEIO_ENOSYS;
3385 return -1;
3386 }
3387
3388 /* See target.h. */
3389
3390 gdb::optional<std::string>
3391 target_fileio_readlink (struct inferior *inf, const char *filename,
3392 fileio_error *target_errno)
3393 {
3394 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3395 {
3396 gdb::optional<std::string> ret
3397 = t->fileio_readlink (inf, filename, target_errno);
3398
3399 if (!ret.has_value () && *target_errno == FILEIO_ENOSYS)
3400 continue;
3401
3402 if (targetdebug)
3403 gdb_printf (gdb_stdlog,
3404 "target_fileio_readlink (%d,%s)"
3405 " = %s (%d)\n",
3406 inf == NULL ? 0 : inf->num,
3407 filename, ret ? ret->c_str () : "(nil)",
3408 ret ? 0 : *target_errno);
3409 return ret;
3410 }
3411
3412 *target_errno = FILEIO_ENOSYS;
3413 return {};
3414 }
3415
3416 /* Like scoped_fd, but specific to target fileio. */
3417
3418 class scoped_target_fd
3419 {
3420 public:
3421 explicit scoped_target_fd (int fd) noexcept
3422 : m_fd (fd)
3423 {
3424 }
3425
3426 ~scoped_target_fd ()
3427 {
3428 if (m_fd >= 0)
3429 {
3430 fileio_error target_errno;
3431
3432 target_fileio_close (m_fd, &target_errno);
3433 }
3434 }
3435
3436 DISABLE_COPY_AND_ASSIGN (scoped_target_fd);
3437
3438 int get () const noexcept
3439 {
3440 return m_fd;
3441 }
3442
3443 private:
3444 int m_fd;
3445 };
3446
3447 /* Read target file FILENAME, in the filesystem as seen by INF. If
3448 INF is NULL, use the filesystem seen by the debugger (GDB or, for
3449 remote targets, the remote stub). Store the result in *BUF_P and
3450 return the size of the transferred data. PADDING additional bytes
3451 are available in *BUF_P. This is a helper function for
3452 target_fileio_read_alloc; see the declaration of that function for
3453 more information. */
3454
3455 static LONGEST
3456 target_fileio_read_alloc_1 (struct inferior *inf, const char *filename,
3457 gdb_byte **buf_p, int padding)
3458 {
3459 size_t buf_alloc, buf_pos;
3460 gdb_byte *buf;
3461 LONGEST n;
3462 fileio_error target_errno;
3463
3464 scoped_target_fd fd (target_fileio_open (inf, filename, FILEIO_O_RDONLY,
3465 0700, false, &target_errno));
3466 if (fd.get () == -1)
3467 return -1;
3468
3469 /* Start by reading up to 4K at a time. The target will throttle
3470 this number down if necessary. */
3471 buf_alloc = 4096;
3472 buf = (gdb_byte *) xmalloc (buf_alloc);
3473 buf_pos = 0;
3474 while (1)
3475 {
3476 n = target_fileio_pread (fd.get (), &buf[buf_pos],
3477 buf_alloc - buf_pos - padding, buf_pos,
3478 &target_errno);
3479 if (n < 0)
3480 {
3481 /* An error occurred. */
3482 xfree (buf);
3483 return -1;
3484 }
3485 else if (n == 0)
3486 {
3487 /* Read all there was. */
3488 if (buf_pos == 0)
3489 xfree (buf);
3490 else
3491 *buf_p = buf;
3492 return buf_pos;
3493 }
3494
3495 buf_pos += n;
3496
3497 /* If the buffer is filling up, expand it. */
3498 if (buf_alloc < buf_pos * 2)
3499 {
3500 buf_alloc *= 2;
3501 buf = (gdb_byte *) xrealloc (buf, buf_alloc);
3502 }
3503
3504 QUIT;
3505 }
3506 }
3507
3508 /* See target.h. */
3509
3510 LONGEST
3511 target_fileio_read_alloc (struct inferior *inf, const char *filename,
3512 gdb_byte **buf_p)
3513 {
3514 return target_fileio_read_alloc_1 (inf, filename, buf_p, 0);
3515 }
3516
3517 /* See target.h. */
3518
3519 gdb::unique_xmalloc_ptr<char>
3520 target_fileio_read_stralloc (struct inferior *inf, const char *filename)
3521 {
3522 gdb_byte *buffer;
3523 char *bufstr;
3524 LONGEST i, transferred;
3525
3526 transferred = target_fileio_read_alloc_1 (inf, filename, &buffer, 1);
3527 bufstr = (char *) buffer;
3528
3529 if (transferred < 0)
3530 return gdb::unique_xmalloc_ptr<char> (nullptr);
3531
3532 if (transferred == 0)
3533 return make_unique_xstrdup ("");
3534
3535 bufstr[transferred] = 0;
3536
3537 /* Check for embedded NUL bytes; but allow trailing NULs. */
3538 for (i = strlen (bufstr); i < transferred; i++)
3539 if (bufstr[i] != 0)
3540 {
3541 warning (_("target file %s "
3542 "contained unexpected null characters"),
3543 filename);
3544 break;
3545 }
3546
3547 return gdb::unique_xmalloc_ptr<char> (bufstr);
3548 }
3549
3550
3551 static int
3552 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3553 CORE_ADDR addr, int len)
3554 {
3555 gdbarch *arch = current_inferior ()->arch ();
3556 return (len <= gdbarch_ptr_bit (arch) / TARGET_CHAR_BIT);
3557 }
3558
3559 static int
3560 default_watchpoint_addr_within_range (struct target_ops *target,
3561 CORE_ADDR addr,
3562 CORE_ADDR start, int length)
3563 {
3564 return addr >= start && addr < start + length;
3565 }
3566
3567 /* See target.h. */
3568
3569 target_ops *
3570 target_stack::find_beneath (const target_ops *t) const
3571 {
3572 /* Look for a non-empty slot at stratum levels beneath T's. */
3573 for (int stratum = t->stratum () - 1; stratum >= 0; --stratum)
3574 if (m_stack[stratum].get () != NULL)
3575 return m_stack[stratum].get ();
3576
3577 return NULL;
3578 }
3579
3580 /* See target.h. */
3581
3582 struct target_ops *
3583 find_target_at (enum strata stratum)
3584 {
3585 return current_inferior ()->target_at (stratum);
3586 }
3587
3588 \f
3589
3590 /* See target.h */
3591
3592 void
3593 target_announce_detach (int from_tty)
3594 {
3595 pid_t pid;
3596 const char *exec_file;
3597
3598 if (!from_tty)
3599 return;
3600
3601 pid = inferior_ptid.pid ();
3602 exec_file = get_exec_file (0);
3603 if (exec_file == nullptr)
3604 gdb_printf ("Detaching from pid %s\n",
3605 target_pid_to_str (ptid_t (pid)).c_str ());
3606 else
3607 gdb_printf (_("Detaching from program: %s, %s\n"), exec_file,
3608 target_pid_to_str (ptid_t (pid)).c_str ());
3609 }
3610
3611 /* See target.h */
3612
3613 void
3614 target_announce_attach (int from_tty, int pid)
3615 {
3616 if (!from_tty)
3617 return;
3618
3619 const char *exec_file = get_exec_file (0);
3620
3621 if (exec_file != nullptr)
3622 gdb_printf ("Attaching to program: %s, %s\n", exec_file,
3623 target_pid_to_str (ptid_t (pid)).c_str ());
3624 else
3625 gdb_printf ("Attaching to %s\n",
3626 target_pid_to_str (ptid_t (pid)).c_str ());
3627 }
3628
3629 /* The inferior process has died. Long live the inferior! */
3630
3631 void
3632 generic_mourn_inferior (void)
3633 {
3634 inferior *inf = current_inferior ();
3635
3636 switch_to_no_thread ();
3637
3638 /* Mark breakpoints uninserted in case something tries to delete a
3639 breakpoint while we delete the inferior's threads (which would
3640 fail, since the inferior is long gone). */
3641 mark_breakpoints_out ();
3642
3643 if (inf->pid != 0)
3644 exit_inferior (inf);
3645
3646 /* Note this wipes step-resume breakpoints, so needs to be done
3647 after exit_inferior, which ends up referencing the step-resume
3648 breakpoints through clear_thread_inferior_resources. */
3649 breakpoint_init_inferior (inf_exited);
3650
3651 registers_changed ();
3652
3653 reopen_exec_file ();
3654 reinit_frame_cache ();
3655
3656 if (deprecated_detach_hook)
3657 deprecated_detach_hook ();
3658 }
3659 \f
3660 /* Convert a normal process ID to a string. Returns the string in a
3661 static buffer. */
3662
3663 std::string
3664 normal_pid_to_str (ptid_t ptid)
3665 {
3666 return string_printf ("process %d", ptid.pid ());
3667 }
3668
3669 static std::string
3670 default_pid_to_str (struct target_ops *ops, ptid_t ptid)
3671 {
3672 return normal_pid_to_str (ptid);
3673 }
3674
3675 /* Error-catcher for target_find_memory_regions. */
3676 static int
3677 dummy_find_memory_regions (struct target_ops *self,
3678 find_memory_region_ftype ignore1, void *ignore2)
3679 {
3680 error (_("Command not implemented for this target."));
3681 return 0;
3682 }
3683
3684 /* Error-catcher for target_make_corefile_notes. */
3685 static gdb::unique_xmalloc_ptr<char>
3686 dummy_make_corefile_notes (struct target_ops *self,
3687 bfd *ignore1, int *ignore2)
3688 {
3689 error (_("Command not implemented for this target."));
3690 return NULL;
3691 }
3692
3693 #include "target-delegates.c"
3694
3695 /* The initial current target, so that there is always a semi-valid
3696 current target. */
3697
3698 static dummy_target the_dummy_target;
3699
3700 /* See target.h. */
3701
3702 target_ops *
3703 get_dummy_target ()
3704 {
3705 return &the_dummy_target;
3706 }
3707
3708 static const target_info dummy_target_info = {
3709 "None",
3710 N_("None"),
3711 ""
3712 };
3713
3714 strata
3715 dummy_target::stratum () const
3716 {
3717 return dummy_stratum;
3718 }
3719
3720 strata
3721 debug_target::stratum () const
3722 {
3723 return debug_stratum;
3724 }
3725
3726 const target_info &
3727 dummy_target::info () const
3728 {
3729 return dummy_target_info;
3730 }
3731
3732 const target_info &
3733 debug_target::info () const
3734 {
3735 return beneath ()->info ();
3736 }
3737
3738 \f
3739
3740 int
3741 target_thread_alive (ptid_t ptid)
3742 {
3743 return current_inferior ()->top_target ()->thread_alive (ptid);
3744 }
3745
3746 void
3747 target_update_thread_list (void)
3748 {
3749 current_inferior ()->top_target ()->update_thread_list ();
3750 }
3751
3752 void
3753 target_stop (ptid_t ptid)
3754 {
3755 process_stratum_target *proc_target = current_inferior ()->process_target ();
3756
3757 gdb_assert (!proc_target->commit_resumed_state);
3758
3759 if (!may_stop)
3760 {
3761 warning (_("May not interrupt or stop the target, ignoring attempt"));
3762 return;
3763 }
3764
3765 current_inferior ()->top_target ()->stop (ptid);
3766 }
3767
3768 void
3769 target_interrupt ()
3770 {
3771 if (!may_stop)
3772 {
3773 warning (_("May not interrupt or stop the target, ignoring attempt"));
3774 return;
3775 }
3776
3777 current_inferior ()->top_target ()->interrupt ();
3778 }
3779
3780 /* See target.h. */
3781
3782 void
3783 target_pass_ctrlc (void)
3784 {
3785 /* Pass the Ctrl-C to the first target that has a thread
3786 running. */
3787 for (inferior *inf : all_inferiors ())
3788 {
3789 target_ops *proc_target = inf->process_target ();
3790 if (proc_target == NULL)
3791 continue;
3792
3793 for (thread_info *thr : inf->non_exited_threads ())
3794 {
3795 /* A thread can be THREAD_STOPPED and executing, while
3796 running an infcall. */
3797 if (thr->state == THREAD_RUNNING || thr->executing ())
3798 {
3799 /* We can get here quite deep in target layers. Avoid
3800 switching thread context or anything that would
3801 communicate with the target (e.g., to fetch
3802 registers), or flushing e.g., the frame cache. We
3803 just switch inferior in order to be able to call
3804 through the target_stack. */
3805 scoped_restore_current_inferior restore_inferior;
3806 set_current_inferior (inf);
3807 current_inferior ()->top_target ()->pass_ctrlc ();
3808 return;
3809 }
3810 }
3811 }
3812 }
3813
3814 /* See target.h. */
3815
3816 void
3817 default_target_pass_ctrlc (struct target_ops *ops)
3818 {
3819 target_interrupt ();
3820 }
3821
3822 /* See target/target.h. */
3823
3824 void
3825 target_stop_and_wait (ptid_t ptid)
3826 {
3827 struct target_waitstatus status;
3828 bool was_non_stop = non_stop;
3829
3830 non_stop = true;
3831 target_stop (ptid);
3832
3833 target_wait (ptid, &status, 0);
3834
3835 non_stop = was_non_stop;
3836 }
3837
3838 /* See target/target.h. */
3839
3840 void
3841 target_continue_no_signal (ptid_t ptid)
3842 {
3843 target_resume (ptid, 0, GDB_SIGNAL_0);
3844 }
3845
3846 /* See target/target.h. */
3847
3848 void
3849 target_continue (ptid_t ptid, enum gdb_signal signal)
3850 {
3851 target_resume (ptid, 0, signal);
3852 }
3853
3854 /* Concatenate ELEM to LIST, a comma-separated list. */
3855
3856 static void
3857 str_comma_list_concat_elem (std::string *list, const char *elem)
3858 {
3859 if (!list->empty ())
3860 list->append (", ");
3861
3862 list->append (elem);
3863 }
3864
3865 /* Helper for target_options_to_string. If OPT is present in
3866 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3867 OPT is removed from TARGET_OPTIONS. */
3868
3869 static void
3870 do_option (target_wait_flags *target_options, std::string *ret,
3871 target_wait_flag opt, const char *opt_str)
3872 {
3873 if ((*target_options & opt) != 0)
3874 {
3875 str_comma_list_concat_elem (ret, opt_str);
3876 *target_options &= ~opt;
3877 }
3878 }
3879
3880 /* See target.h. */
3881
3882 std::string
3883 target_options_to_string (target_wait_flags target_options)
3884 {
3885 std::string ret;
3886
3887 #define DO_TARG_OPTION(OPT) \
3888 do_option (&target_options, &ret, OPT, #OPT)
3889
3890 DO_TARG_OPTION (TARGET_WNOHANG);
3891
3892 if (target_options != 0)
3893 str_comma_list_concat_elem (&ret, "unknown???");
3894
3895 return ret;
3896 }
3897
3898 void
3899 target_fetch_registers (struct regcache *regcache, int regno)
3900 {
3901 current_inferior ()->top_target ()->fetch_registers (regcache, regno);
3902 if (targetdebug)
3903 regcache->debug_print_register ("target_fetch_registers", regno);
3904 }
3905
3906 void
3907 target_store_registers (struct regcache *regcache, int regno)
3908 {
3909 if (!may_write_registers)
3910 error (_("Writing to registers is not allowed (regno %d)"), regno);
3911
3912 current_inferior ()->top_target ()->store_registers (regcache, regno);
3913 if (targetdebug)
3914 {
3915 regcache->debug_print_register ("target_store_registers", regno);
3916 }
3917 }
3918
3919 int
3920 target_core_of_thread (ptid_t ptid)
3921 {
3922 return current_inferior ()->top_target ()->core_of_thread (ptid);
3923 }
3924
3925 int
3926 simple_verify_memory (struct target_ops *ops,
3927 const gdb_byte *data, CORE_ADDR lma, ULONGEST size)
3928 {
3929 LONGEST total_xfered = 0;
3930
3931 while (total_xfered < size)
3932 {
3933 ULONGEST xfered_len;
3934 enum target_xfer_status status;
3935 gdb_byte buf[1024];
3936 ULONGEST howmuch = std::min<ULONGEST> (sizeof (buf), size - total_xfered);
3937
3938 status = target_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
3939 buf, NULL, lma + total_xfered, howmuch,
3940 &xfered_len);
3941 if (status == TARGET_XFER_OK
3942 && memcmp (data + total_xfered, buf, xfered_len) == 0)
3943 {
3944 total_xfered += xfered_len;
3945 QUIT;
3946 }
3947 else
3948 return 0;
3949 }
3950 return 1;
3951 }
3952
3953 /* Default implementation of memory verification. */
3954
3955 static int
3956 default_verify_memory (struct target_ops *self,
3957 const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3958 {
3959 /* Start over from the top of the target stack. */
3960 return simple_verify_memory (current_inferior ()->top_target (),
3961 data, memaddr, size);
3962 }
3963
3964 int
3965 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3966 {
3967 target_ops *target = current_inferior ()->top_target ();
3968
3969 return target->verify_memory (data, memaddr, size);
3970 }
3971
3972 /* The documentation for this function is in its prototype declaration in
3973 target.h. */
3974
3975 int
3976 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask,
3977 enum target_hw_bp_type rw)
3978 {
3979 target_ops *target = current_inferior ()->top_target ();
3980
3981 return target->insert_mask_watchpoint (addr, mask, rw);
3982 }
3983
3984 /* The documentation for this function is in its prototype declaration in
3985 target.h. */
3986
3987 int
3988 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask,
3989 enum target_hw_bp_type rw)
3990 {
3991 target_ops *target = current_inferior ()->top_target ();
3992
3993 return target->remove_mask_watchpoint (addr, mask, rw);
3994 }
3995
3996 /* The documentation for this function is in its prototype declaration
3997 in target.h. */
3998
3999 int
4000 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4001 {
4002 target_ops *target = current_inferior ()->top_target ();
4003
4004 return target->masked_watch_num_registers (addr, mask);
4005 }
4006
4007 /* The documentation for this function is in its prototype declaration
4008 in target.h. */
4009
4010 int
4011 target_ranged_break_num_registers (void)
4012 {
4013 return current_inferior ()->top_target ()->ranged_break_num_registers ();
4014 }
4015
4016 /* See target.h. */
4017
4018 struct btrace_target_info *
4019 target_enable_btrace (thread_info *tp, const struct btrace_config *conf)
4020 {
4021 return current_inferior ()->top_target ()->enable_btrace (tp, conf);
4022 }
4023
4024 /* See target.h. */
4025
4026 void
4027 target_disable_btrace (struct btrace_target_info *btinfo)
4028 {
4029 current_inferior ()->top_target ()->disable_btrace (btinfo);
4030 }
4031
4032 /* See target.h. */
4033
4034 void
4035 target_teardown_btrace (struct btrace_target_info *btinfo)
4036 {
4037 current_inferior ()->top_target ()->teardown_btrace (btinfo);
4038 }
4039
4040 /* See target.h. */
4041
4042 enum btrace_error
4043 target_read_btrace (struct btrace_data *btrace,
4044 struct btrace_target_info *btinfo,
4045 enum btrace_read_type type)
4046 {
4047 target_ops *target = current_inferior ()->top_target ();
4048
4049 return target->read_btrace (btrace, btinfo, type);
4050 }
4051
4052 /* See target.h. */
4053
4054 const struct btrace_config *
4055 target_btrace_conf (const struct btrace_target_info *btinfo)
4056 {
4057 return current_inferior ()->top_target ()->btrace_conf (btinfo);
4058 }
4059
4060 /* See target.h. */
4061
4062 void
4063 target_stop_recording (void)
4064 {
4065 current_inferior ()->top_target ()->stop_recording ();
4066 }
4067
4068 /* See target.h. */
4069
4070 void
4071 target_save_record (const char *filename)
4072 {
4073 current_inferior ()->top_target ()->save_record (filename);
4074 }
4075
4076 /* See target.h. */
4077
4078 int
4079 target_supports_delete_record ()
4080 {
4081 return current_inferior ()->top_target ()->supports_delete_record ();
4082 }
4083
4084 /* See target.h. */
4085
4086 void
4087 target_delete_record (void)
4088 {
4089 current_inferior ()->top_target ()->delete_record ();
4090 }
4091
4092 /* See target.h. */
4093
4094 enum record_method
4095 target_record_method (ptid_t ptid)
4096 {
4097 return current_inferior ()->top_target ()->record_method (ptid);
4098 }
4099
4100 /* See target.h. */
4101
4102 int
4103 target_record_is_replaying (ptid_t ptid)
4104 {
4105 return current_inferior ()->top_target ()->record_is_replaying (ptid);
4106 }
4107
4108 /* See target.h. */
4109
4110 int
4111 target_record_will_replay (ptid_t ptid, int dir)
4112 {
4113 return current_inferior ()->top_target ()->record_will_replay (ptid, dir);
4114 }
4115
4116 /* See target.h. */
4117
4118 void
4119 target_record_stop_replaying (void)
4120 {
4121 current_inferior ()->top_target ()->record_stop_replaying ();
4122 }
4123
4124 /* See target.h. */
4125
4126 void
4127 target_goto_record_begin (void)
4128 {
4129 current_inferior ()->top_target ()->goto_record_begin ();
4130 }
4131
4132 /* See target.h. */
4133
4134 void
4135 target_goto_record_end (void)
4136 {
4137 current_inferior ()->top_target ()->goto_record_end ();
4138 }
4139
4140 /* See target.h. */
4141
4142 void
4143 target_goto_record (ULONGEST insn)
4144 {
4145 current_inferior ()->top_target ()->goto_record (insn);
4146 }
4147
4148 /* See target.h. */
4149
4150 void
4151 target_insn_history (int size, gdb_disassembly_flags flags)
4152 {
4153 current_inferior ()->top_target ()->insn_history (size, flags);
4154 }
4155
4156 /* See target.h. */
4157
4158 void
4159 target_insn_history_from (ULONGEST from, int size,
4160 gdb_disassembly_flags flags)
4161 {
4162 current_inferior ()->top_target ()->insn_history_from (from, size, flags);
4163 }
4164
4165 /* See target.h. */
4166
4167 void
4168 target_insn_history_range (ULONGEST begin, ULONGEST end,
4169 gdb_disassembly_flags flags)
4170 {
4171 current_inferior ()->top_target ()->insn_history_range (begin, end, flags);
4172 }
4173
4174 /* See target.h. */
4175
4176 void
4177 target_call_history (int size, record_print_flags flags)
4178 {
4179 current_inferior ()->top_target ()->call_history (size, flags);
4180 }
4181
4182 /* See target.h. */
4183
4184 void
4185 target_call_history_from (ULONGEST begin, int size, record_print_flags flags)
4186 {
4187 current_inferior ()->top_target ()->call_history_from (begin, size, flags);
4188 }
4189
4190 /* See target.h. */
4191
4192 void
4193 target_call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
4194 {
4195 current_inferior ()->top_target ()->call_history_range (begin, end, flags);
4196 }
4197
4198 /* See target.h. */
4199
4200 const struct frame_unwind *
4201 target_get_unwinder (void)
4202 {
4203 return current_inferior ()->top_target ()->get_unwinder ();
4204 }
4205
4206 /* See target.h. */
4207
4208 const struct frame_unwind *
4209 target_get_tailcall_unwinder (void)
4210 {
4211 return current_inferior ()->top_target ()->get_tailcall_unwinder ();
4212 }
4213
4214 /* See target.h. */
4215
4216 void
4217 target_prepare_to_generate_core (void)
4218 {
4219 current_inferior ()->top_target ()->prepare_to_generate_core ();
4220 }
4221
4222 /* See target.h. */
4223
4224 void
4225 target_done_generating_core (void)
4226 {
4227 current_inferior ()->top_target ()->done_generating_core ();
4228 }
4229
4230 \f
4231
4232 static char targ_desc[] =
4233 "Names of targets and files being debugged.\nShows the entire \
4234 stack of targets currently in use (including the exec-file,\n\
4235 core-file, and process, if any), as well as the symbol file name.";
4236
4237 static void
4238 default_rcmd (struct target_ops *self, const char *command,
4239 struct ui_file *output)
4240 {
4241 error (_("\"monitor\" command not supported by this target."));
4242 }
4243
4244 static void
4245 do_monitor_command (const char *cmd, int from_tty)
4246 {
4247 target_rcmd (cmd, gdb_stdtarg);
4248 }
4249
4250 /* Erases all the memory regions marked as flash. CMD and FROM_TTY are
4251 ignored. */
4252
4253 void
4254 flash_erase_command (const char *cmd, int from_tty)
4255 {
4256 /* Used to communicate termination of flash operations to the target. */
4257 bool found_flash_region = false;
4258 gdbarch *gdbarch = current_inferior ()->arch ();
4259
4260 std::vector<mem_region> mem_regions = target_memory_map ();
4261
4262 /* Iterate over all memory regions. */
4263 for (const mem_region &m : mem_regions)
4264 {
4265 /* Is this a flash memory region? */
4266 if (m.attrib.mode == MEM_FLASH)
4267 {
4268 found_flash_region = true;
4269 target_flash_erase (m.lo, m.hi - m.lo);
4270
4271 ui_out_emit_tuple tuple_emitter (current_uiout, "erased-regions");
4272
4273 current_uiout->message (_("Erasing flash memory region at address "));
4274 current_uiout->field_core_addr ("address", gdbarch, m.lo);
4275 current_uiout->message (", size = ");
4276 current_uiout->field_string ("size", hex_string (m.hi - m.lo));
4277 current_uiout->message ("\n");
4278 }
4279 }
4280
4281 /* Did we do any flash operations? If so, we need to finalize them. */
4282 if (found_flash_region)
4283 target_flash_done ();
4284 else
4285 current_uiout->message (_("No flash memory regions found.\n"));
4286 }
4287
4288 /* Print the name of each layers of our target stack. */
4289
4290 static void
4291 maintenance_print_target_stack (const char *cmd, int from_tty)
4292 {
4293 gdb_printf (_("The current target stack is:\n"));
4294
4295 for (target_ops *t = current_inferior ()->top_target ();
4296 t != NULL;
4297 t = t->beneath ())
4298 {
4299 if (t->stratum () == debug_stratum)
4300 continue;
4301 gdb_printf (" - %s (%s)\n", t->shortname (), t->longname ());
4302 }
4303 }
4304
4305 /* See target.h. */
4306
4307 void
4308 target_async (bool enable)
4309 {
4310 /* If we are trying to enable async mode then it must be the case that
4311 async mode is possible for this target. */
4312 gdb_assert (!enable || target_can_async_p ());
4313 infrun_async (enable);
4314 current_inferior ()->top_target ()->async (enable);
4315 }
4316
4317 /* See target.h. */
4318
4319 void
4320 target_thread_events (int enable)
4321 {
4322 current_inferior ()->top_target ()->thread_events (enable);
4323 }
4324
4325 /* Controls if targets can report that they can/are async. This is
4326 just for maintainers to use when debugging gdb. */
4327 bool target_async_permitted = true;
4328
4329 static void
4330 set_maint_target_async (bool permitted)
4331 {
4332 if (have_live_inferiors ())
4333 error (_("Cannot change this setting while the inferior is running."));
4334
4335 target_async_permitted = permitted;
4336 }
4337
4338 static bool
4339 get_maint_target_async ()
4340 {
4341 return target_async_permitted;
4342 }
4343
4344 static void
4345 show_maint_target_async (ui_file *file, int from_tty,
4346 cmd_list_element *c, const char *value)
4347 {
4348 gdb_printf (file,
4349 _("Controlling the inferior in "
4350 "asynchronous mode is %s.\n"), value);
4351 }
4352
4353 /* Return true if the target operates in non-stop mode even with "set
4354 non-stop off". */
4355
4356 static int
4357 target_always_non_stop_p (void)
4358 {
4359 return current_inferior ()->top_target ()->always_non_stop_p ();
4360 }
4361
4362 /* See target.h. */
4363
4364 bool
4365 target_is_non_stop_p ()
4366 {
4367 return ((non_stop
4368 || target_non_stop_enabled == AUTO_BOOLEAN_TRUE
4369 || (target_non_stop_enabled == AUTO_BOOLEAN_AUTO
4370 && target_always_non_stop_p ()))
4371 && target_can_async_p ());
4372 }
4373
4374 /* See target.h. */
4375
4376 bool
4377 exists_non_stop_target ()
4378 {
4379 if (target_is_non_stop_p ())
4380 return true;
4381
4382 scoped_restore_current_thread restore_thread;
4383
4384 for (inferior *inf : all_inferiors ())
4385 {
4386 switch_to_inferior_no_thread (inf);
4387 if (target_is_non_stop_p ())
4388 return true;
4389 }
4390
4391 return false;
4392 }
4393
4394 /* Controls if targets can report that they always run in non-stop
4395 mode. This is just for maintainers to use when debugging gdb. */
4396 enum auto_boolean target_non_stop_enabled = AUTO_BOOLEAN_AUTO;
4397
4398 /* Set callback for maint target-non-stop setting. */
4399
4400 static void
4401 set_maint_target_non_stop (auto_boolean enabled)
4402 {
4403 if (have_live_inferiors ())
4404 error (_("Cannot change this setting while the inferior is running."));
4405
4406 target_non_stop_enabled = enabled;
4407 }
4408
4409 /* Get callback for maint target-non-stop setting. */
4410
4411 static auto_boolean
4412 get_maint_target_non_stop ()
4413 {
4414 return target_non_stop_enabled;
4415 }
4416
4417 static void
4418 show_maint_target_non_stop (ui_file *file, int from_tty,
4419 cmd_list_element *c, const char *value)
4420 {
4421 if (target_non_stop_enabled == AUTO_BOOLEAN_AUTO)
4422 gdb_printf (file,
4423 _("Whether the target is always in non-stop mode "
4424 "is %s (currently %s).\n"), value,
4425 target_always_non_stop_p () ? "on" : "off");
4426 else
4427 gdb_printf (file,
4428 _("Whether the target is always in non-stop mode "
4429 "is %s.\n"), value);
4430 }
4431
4432 /* Temporary copies of permission settings. */
4433
4434 static bool may_write_registers_1 = true;
4435 static bool may_write_memory_1 = true;
4436 static bool may_insert_breakpoints_1 = true;
4437 static bool may_insert_tracepoints_1 = true;
4438 static bool may_insert_fast_tracepoints_1 = true;
4439 static bool may_stop_1 = true;
4440
4441 /* Make the user-set values match the real values again. */
4442
4443 void
4444 update_target_permissions (void)
4445 {
4446 may_write_registers_1 = may_write_registers;
4447 may_write_memory_1 = may_write_memory;
4448 may_insert_breakpoints_1 = may_insert_breakpoints;
4449 may_insert_tracepoints_1 = may_insert_tracepoints;
4450 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4451 may_stop_1 = may_stop;
4452 }
4453
4454 /* The one function handles (most of) the permission flags in the same
4455 way. */
4456
4457 static void
4458 set_target_permissions (const char *args, int from_tty,
4459 struct cmd_list_element *c)
4460 {
4461 if (target_has_execution ())
4462 {
4463 update_target_permissions ();
4464 error (_("Cannot change this setting while the inferior is running."));
4465 }
4466
4467 /* Make the real values match the user-changed values. */
4468 may_insert_breakpoints = may_insert_breakpoints_1;
4469 may_insert_tracepoints = may_insert_tracepoints_1;
4470 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4471 may_stop = may_stop_1;
4472 update_observer_mode ();
4473 }
4474
4475 /* Set some permissions independently of observer mode. */
4476
4477 static void
4478 set_write_memory_registers_permission (const char *args, int from_tty,
4479 struct cmd_list_element *c)
4480 {
4481 /* Make the real values match the user-changed values. */
4482 may_write_memory = may_write_memory_1;
4483 may_write_registers = may_write_registers_1;
4484 update_observer_mode ();
4485 }
4486
4487 void _initialize_target ();
4488
4489 void
4490 _initialize_target ()
4491 {
4492 the_debug_target = new debug_target ();
4493
4494 add_info ("target", info_target_command, targ_desc);
4495 add_info ("files", info_target_command, targ_desc);
4496
4497 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4498 Set target debugging."), _("\
4499 Show target debugging."), _("\
4500 When non-zero, target debugging is enabled. Higher numbers are more\n\
4501 verbose."),
4502 set_targetdebug,
4503 show_targetdebug,
4504 &setdebuglist, &showdebuglist);
4505
4506 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4507 &trust_readonly, _("\
4508 Set mode for reading from readonly sections."), _("\
4509 Show mode for reading from readonly sections."), _("\
4510 When this mode is on, memory reads from readonly sections (such as .text)\n\
4511 will be read from the object file instead of from the target. This will\n\
4512 result in significant performance improvement for remote targets."),
4513 NULL,
4514 show_trust_readonly,
4515 &setlist, &showlist);
4516
4517 add_com ("monitor", class_obscure, do_monitor_command,
4518 _("Send a command to the remote monitor (remote targets only)."));
4519
4520 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4521 _("Print the name of each layer of the internal target stack."),
4522 &maintenanceprintlist);
4523
4524 add_setshow_boolean_cmd ("target-async", no_class,
4525 _("\
4526 Set whether gdb controls the inferior in asynchronous mode."), _("\
4527 Show whether gdb controls the inferior in asynchronous mode."), _("\
4528 Tells gdb whether to control the inferior in asynchronous mode."),
4529 set_maint_target_async,
4530 get_maint_target_async,
4531 show_maint_target_async,
4532 &maintenance_set_cmdlist,
4533 &maintenance_show_cmdlist);
4534
4535 add_setshow_auto_boolean_cmd ("target-non-stop", no_class,
4536 _("\
4537 Set whether gdb always controls the inferior in non-stop mode."), _("\
4538 Show whether gdb always controls the inferior in non-stop mode."), _("\
4539 Tells gdb whether to control the inferior in non-stop mode."),
4540 set_maint_target_non_stop,
4541 get_maint_target_non_stop,
4542 show_maint_target_non_stop,
4543 &maintenance_set_cmdlist,
4544 &maintenance_show_cmdlist);
4545
4546 add_setshow_boolean_cmd ("may-write-registers", class_support,
4547 &may_write_registers_1, _("\
4548 Set permission to write into registers."), _("\
4549 Show permission to write into registers."), _("\
4550 When this permission is on, GDB may write into the target's registers.\n\
4551 Otherwise, any sort of write attempt will result in an error."),
4552 set_write_memory_registers_permission, NULL,
4553 &setlist, &showlist);
4554
4555 add_setshow_boolean_cmd ("may-write-memory", class_support,
4556 &may_write_memory_1, _("\
4557 Set permission to write into target memory."), _("\
4558 Show permission to write into target memory."), _("\
4559 When this permission is on, GDB may write into the target's memory.\n\
4560 Otherwise, any sort of write attempt will result in an error."),
4561 set_write_memory_registers_permission, NULL,
4562 &setlist, &showlist);
4563
4564 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4565 &may_insert_breakpoints_1, _("\
4566 Set permission to insert breakpoints in the target."), _("\
4567 Show permission to insert breakpoints in the target."), _("\
4568 When this permission is on, GDB may insert breakpoints in the program.\n\
4569 Otherwise, any sort of insertion attempt will result in an error."),
4570 set_target_permissions, NULL,
4571 &setlist, &showlist);
4572
4573 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4574 &may_insert_tracepoints_1, _("\
4575 Set permission to insert tracepoints in the target."), _("\
4576 Show permission to insert tracepoints in the target."), _("\
4577 When this permission is on, GDB may insert tracepoints in the program.\n\
4578 Otherwise, any sort of insertion attempt will result in an error."),
4579 set_target_permissions, NULL,
4580 &setlist, &showlist);
4581
4582 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4583 &may_insert_fast_tracepoints_1, _("\
4584 Set permission to insert fast tracepoints in the target."), _("\
4585 Show permission to insert fast tracepoints in the target."), _("\
4586 When this permission is on, GDB may insert fast tracepoints.\n\
4587 Otherwise, any sort of insertion attempt will result in an error."),
4588 set_target_permissions, NULL,
4589 &setlist, &showlist);
4590
4591 add_setshow_boolean_cmd ("may-interrupt", class_support,
4592 &may_stop_1, _("\
4593 Set permission to interrupt or signal the target."), _("\
4594 Show permission to interrupt or signal the target."), _("\
4595 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4596 Otherwise, any attempt to interrupt or stop will be ignored."),
4597 set_target_permissions, NULL,
4598 &setlist, &showlist);
4599
4600 add_com ("flash-erase", no_class, flash_erase_command,
4601 _("Erase all flash memory regions."));
4602
4603 add_setshow_boolean_cmd ("auto-connect-native-target", class_support,
4604 &auto_connect_native_target, _("\
4605 Set whether GDB may automatically connect to the native target."), _("\
4606 Show whether GDB may automatically connect to the native target."), _("\
4607 When on, and GDB is not connected to a target yet, GDB\n\
4608 attempts \"run\" and other commands with the native target."),
4609 NULL, show_auto_connect_native_target,
4610 &setlist, &showlist);
4611 }