Remove path name from test case
[binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2023 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "language.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "value.h"
31 #include "arch-utils.h"
32 #include "osabi.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
36 #include "objfiles.h"
37 #include "dwarf2.h"
38 #include "dwarf2/frame.h"
39 #include "gdbtypes.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
43 #include "ax-gdb.h"
44 #include "gdbsupport/selftest.h"
45
46 #include "aarch64-tdep.h"
47 #include "aarch64-ravenscar-thread.h"
48
49 #include "record.h"
50 #include "record-full.h"
51 #include "arch/aarch64-insn.h"
52 #include "gdbarch.h"
53
54 #include "opcode/aarch64.h"
55 #include <algorithm>
56 #include <unordered_map>
57
58 /* For inferior_ptid and current_inferior (). */
59 #include "inferior.h"
60 /* For std::sqrt and std::pow. */
61 #include <cmath>
62
63 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
64 four members. */
65 #define HA_MAX_NUM_FLDS 4
66
67 /* All possible aarch64 target descriptors. */
68 static std::unordered_map <aarch64_features, target_desc *> tdesc_aarch64_map;
69
70 /* The standard register names, and all the valid aliases for them.
71 We're not adding fp here, that name is already taken, see
72 _initialize_frame_reg. */
73 static const struct
74 {
75 const char *const name;
76 int regnum;
77 } aarch64_register_aliases[] =
78 {
79 /* Link register alias for x30. */
80 {"lr", AARCH64_LR_REGNUM},
81 /* SP is the canonical name for x31 according to aarch64_r_register_names,
82 so we're adding an x31 alias for sp. */
83 {"x31", AARCH64_SP_REGNUM},
84 /* specials */
85 {"ip0", AARCH64_X0_REGNUM + 16},
86 {"ip1", AARCH64_X0_REGNUM + 17}
87 };
88
89 /* The required core 'R' registers. */
90 static const char *const aarch64_r_register_names[] =
91 {
92 /* These registers must appear in consecutive RAW register number
93 order and they must begin with AARCH64_X0_REGNUM! */
94 "x0", "x1", "x2", "x3",
95 "x4", "x5", "x6", "x7",
96 "x8", "x9", "x10", "x11",
97 "x12", "x13", "x14", "x15",
98 "x16", "x17", "x18", "x19",
99 "x20", "x21", "x22", "x23",
100 "x24", "x25", "x26", "x27",
101 "x28", "x29", "x30", "sp",
102 "pc", "cpsr"
103 };
104
105 /* The FP/SIMD 'V' registers. */
106 static const char *const aarch64_v_register_names[] =
107 {
108 /* These registers must appear in consecutive RAW register number
109 order and they must begin with AARCH64_V0_REGNUM! */
110 "v0", "v1", "v2", "v3",
111 "v4", "v5", "v6", "v7",
112 "v8", "v9", "v10", "v11",
113 "v12", "v13", "v14", "v15",
114 "v16", "v17", "v18", "v19",
115 "v20", "v21", "v22", "v23",
116 "v24", "v25", "v26", "v27",
117 "v28", "v29", "v30", "v31",
118 "fpsr",
119 "fpcr"
120 };
121
122 /* The SVE 'Z' and 'P' registers. */
123 static const char *const aarch64_sve_register_names[] =
124 {
125 /* These registers must appear in consecutive RAW register number
126 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
127 "z0", "z1", "z2", "z3",
128 "z4", "z5", "z6", "z7",
129 "z8", "z9", "z10", "z11",
130 "z12", "z13", "z14", "z15",
131 "z16", "z17", "z18", "z19",
132 "z20", "z21", "z22", "z23",
133 "z24", "z25", "z26", "z27",
134 "z28", "z29", "z30", "z31",
135 "fpsr", "fpcr",
136 "p0", "p1", "p2", "p3",
137 "p4", "p5", "p6", "p7",
138 "p8", "p9", "p10", "p11",
139 "p12", "p13", "p14", "p15",
140 "ffr", "vg"
141 };
142
143 static const char *const aarch64_pauth_register_names[] =
144 {
145 /* Authentication mask for data pointer, low half/user pointers. */
146 "pauth_dmask",
147 /* Authentication mask for code pointer, low half/user pointers. */
148 "pauth_cmask",
149 /* Authentication mask for data pointer, high half / kernel pointers. */
150 "pauth_dmask_high",
151 /* Authentication mask for code pointer, high half / kernel pointers. */
152 "pauth_cmask_high"
153 };
154
155 static const char *const aarch64_mte_register_names[] =
156 {
157 /* Tag Control Register. */
158 "tag_ctl"
159 };
160
161 static int aarch64_stack_frame_destroyed_p (struct gdbarch *, CORE_ADDR);
162
163 /* AArch64 prologue cache structure. */
164 struct aarch64_prologue_cache
165 {
166 /* The program counter at the start of the function. It is used to
167 identify this frame as a prologue frame. */
168 CORE_ADDR func;
169
170 /* The program counter at the time this frame was created; i.e. where
171 this function was called from. It is used to identify this frame as a
172 stub frame. */
173 CORE_ADDR prev_pc;
174
175 /* The stack pointer at the time this frame was created; i.e. the
176 caller's stack pointer when this function was called. It is used
177 to identify this frame. */
178 CORE_ADDR prev_sp;
179
180 /* Is the target available to read from? */
181 int available_p;
182
183 /* The frame base for this frame is just prev_sp - frame size.
184 FRAMESIZE is the distance from the frame pointer to the
185 initial stack pointer. */
186 int framesize;
187
188 /* The register used to hold the frame pointer for this frame. */
189 int framereg;
190
191 /* Saved register offsets. */
192 trad_frame_saved_reg *saved_regs;
193 };
194
195 /* Holds information used to read/write from/to ZA
196 pseudo-registers.
197
198 With this information, the read/write code can be simplified so it
199 deals only with the required information to map a ZA pseudo-register
200 to the exact bytes into the ZA contents buffer. Otherwise we'd need
201 to use a lot of conditionals. */
202
203 struct za_offsets
204 {
205 /* Offset, into ZA, of the starting byte of the pseudo-register. */
206 size_t starting_offset;
207 /* The size of the contiguous chunks of the pseudo-register. */
208 size_t chunk_size;
209 /* The number of pseudo-register chunks contained in ZA. */
210 size_t chunks;
211 /* The offset between each contiguous chunk. */
212 size_t stride_size;
213 };
214
215 /* Holds data that is helpful to determine the individual fields that make
216 up the names of the ZA pseudo-registers. It is also very helpful to
217 determine offsets, stride and sizes for reading ZA tiles and tile
218 slices. */
219
220 struct za_pseudo_encoding
221 {
222 /* The slice index (0 ~ svl). Only used for tile slices. */
223 uint8_t slice_index;
224 /* The tile number (0 ~ 15). */
225 uint8_t tile_index;
226 /* Direction (horizontal/vertical). Only used for tile slices. */
227 bool horizontal;
228 /* Qualifier index (0 ~ 4). These map to B, H, S, D and Q. */
229 uint8_t qualifier_index;
230 };
231
232 static void
233 show_aarch64_debug (struct ui_file *file, int from_tty,
234 struct cmd_list_element *c, const char *value)
235 {
236 gdb_printf (file, _("AArch64 debugging is %s.\n"), value);
237 }
238
239 namespace {
240
241 /* Abstract instruction reader. */
242
243 class abstract_instruction_reader
244 {
245 public:
246 /* Read in one instruction. */
247 virtual ULONGEST read (CORE_ADDR memaddr, int len,
248 enum bfd_endian byte_order) = 0;
249 };
250
251 /* Instruction reader from real target. */
252
253 class instruction_reader : public abstract_instruction_reader
254 {
255 public:
256 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
257 override
258 {
259 return read_code_unsigned_integer (memaddr, len, byte_order);
260 }
261 };
262
263 } // namespace
264
265 /* If address signing is enabled, mask off the signature bits from the link
266 register, which is passed by value in ADDR, using the register values in
267 THIS_FRAME. */
268
269 static CORE_ADDR
270 aarch64_frame_unmask_lr (aarch64_gdbarch_tdep *tdep,
271 frame_info_ptr this_frame, CORE_ADDR addr)
272 {
273 if (tdep->has_pauth ()
274 && frame_unwind_register_unsigned (this_frame,
275 tdep->ra_sign_state_regnum))
276 {
277 /* VA range select (bit 55) tells us whether to use the low half masks
278 or the high half masks. */
279 int cmask_num;
280 if (tdep->pauth_reg_count > 2 && addr & VA_RANGE_SELECT_BIT_MASK)
281 cmask_num = AARCH64_PAUTH_CMASK_HIGH_REGNUM (tdep->pauth_reg_base);
282 else
283 cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
284
285 /* By default, we assume TBI and discard the top 8 bits plus the VA range
286 select bit (55). */
287 CORE_ADDR mask = AARCH64_TOP_BITS_MASK;
288 mask |= frame_unwind_register_unsigned (this_frame, cmask_num);
289 addr = aarch64_remove_top_bits (addr, mask);
290
291 /* Record in the frame that the link register required unmasking. */
292 set_frame_previous_pc_masked (this_frame);
293 }
294
295 return addr;
296 }
297
298 /* Implement the "get_pc_address_flags" gdbarch method. */
299
300 static std::string
301 aarch64_get_pc_address_flags (frame_info_ptr frame, CORE_ADDR pc)
302 {
303 if (pc != 0 && get_frame_pc_masked (frame))
304 return "PAC";
305
306 return "";
307 }
308
309 /* Analyze a prologue, looking for a recognizable stack frame
310 and frame pointer. Scan until we encounter a store that could
311 clobber the stack frame unexpectedly, or an unknown instruction. */
312
313 static CORE_ADDR
314 aarch64_analyze_prologue (struct gdbarch *gdbarch,
315 CORE_ADDR start, CORE_ADDR limit,
316 struct aarch64_prologue_cache *cache,
317 abstract_instruction_reader& reader)
318 {
319 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
320 int i;
321
322 /* Whether the stack has been set. This should be true when we notice a SP
323 to FP move or if we are using the SP as the base register for storing
324 data, in case the FP is omitted. */
325 bool seen_stack_set = false;
326
327 /* Track X registers and D registers in prologue. */
328 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
329
330 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
331 regs[i] = pv_register (i, 0);
332 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
333
334 for (; start < limit; start += 4)
335 {
336 uint32_t insn;
337 aarch64_inst inst;
338
339 insn = reader.read (start, 4, byte_order_for_code);
340
341 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
342 break;
343
344 if (inst.opcode->iclass == addsub_imm
345 && (inst.opcode->op == OP_ADD
346 || strcmp ("sub", inst.opcode->name) == 0))
347 {
348 unsigned rd = inst.operands[0].reg.regno;
349 unsigned rn = inst.operands[1].reg.regno;
350
351 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
352 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
353 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
354 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
355
356 if (inst.opcode->op == OP_ADD)
357 {
358 regs[rd] = pv_add_constant (regs[rn],
359 inst.operands[2].imm.value);
360 }
361 else
362 {
363 regs[rd] = pv_add_constant (regs[rn],
364 -inst.operands[2].imm.value);
365 }
366
367 /* Did we move SP to FP? */
368 if (rn == AARCH64_SP_REGNUM && rd == AARCH64_FP_REGNUM)
369 seen_stack_set = true;
370 }
371 else if (inst.opcode->iclass == addsub_ext
372 && strcmp ("sub", inst.opcode->name) == 0)
373 {
374 unsigned rd = inst.operands[0].reg.regno;
375 unsigned rn = inst.operands[1].reg.regno;
376 unsigned rm = inst.operands[2].reg.regno;
377
378 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
379 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
380 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
381 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_EXT);
382
383 regs[rd] = pv_subtract (regs[rn], regs[rm]);
384 }
385 else if (inst.opcode->iclass == branch_imm)
386 {
387 /* Stop analysis on branch. */
388 break;
389 }
390 else if (inst.opcode->iclass == condbranch)
391 {
392 /* Stop analysis on branch. */
393 break;
394 }
395 else if (inst.opcode->iclass == branch_reg)
396 {
397 /* Stop analysis on branch. */
398 break;
399 }
400 else if (inst.opcode->iclass == compbranch)
401 {
402 /* Stop analysis on branch. */
403 break;
404 }
405 else if (inst.opcode->op == OP_MOVZ)
406 {
407 unsigned rd = inst.operands[0].reg.regno;
408
409 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
410 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
411 gdb_assert (inst.operands[1].type == AARCH64_OPND_HALF);
412 gdb_assert (inst.operands[1].shifter.kind == AARCH64_MOD_LSL);
413
414 /* If this shows up before we set the stack, keep going. Otherwise
415 stop the analysis. */
416 if (seen_stack_set)
417 break;
418
419 regs[rd] = pv_constant (inst.operands[1].imm.value
420 << inst.operands[1].shifter.amount);
421 }
422 else if (inst.opcode->iclass == log_shift
423 && strcmp (inst.opcode->name, "orr") == 0)
424 {
425 unsigned rd = inst.operands[0].reg.regno;
426 unsigned rn = inst.operands[1].reg.regno;
427 unsigned rm = inst.operands[2].reg.regno;
428
429 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
430 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
431 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
432
433 if (inst.operands[2].shifter.amount == 0
434 && rn == AARCH64_SP_REGNUM)
435 regs[rd] = regs[rm];
436 else
437 {
438 aarch64_debug_printf ("prologue analysis gave up "
439 "addr=%s opcode=0x%x (orr x register)",
440 core_addr_to_string_nz (start), insn);
441
442 break;
443 }
444 }
445 else if (inst.opcode->op == OP_STUR)
446 {
447 unsigned rt = inst.operands[0].reg.regno;
448 unsigned rn = inst.operands[1].addr.base_regno;
449 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
450
451 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
452 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
453 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
454 gdb_assert (!inst.operands[1].addr.offset.is_reg);
455
456 stack.store
457 (pv_add_constant (regs[rn], inst.operands[1].addr.offset.imm),
458 size, regs[rt]);
459
460 /* Are we storing with SP as a base? */
461 if (rn == AARCH64_SP_REGNUM)
462 seen_stack_set = true;
463 }
464 else if ((inst.opcode->iclass == ldstpair_off
465 || (inst.opcode->iclass == ldstpair_indexed
466 && inst.operands[2].addr.preind))
467 && strcmp ("stp", inst.opcode->name) == 0)
468 {
469 /* STP with addressing mode Pre-indexed and Base register. */
470 unsigned rt1;
471 unsigned rt2;
472 unsigned rn = inst.operands[2].addr.base_regno;
473 int32_t imm = inst.operands[2].addr.offset.imm;
474 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
475
476 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
477 || inst.operands[0].type == AARCH64_OPND_Ft);
478 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
479 || inst.operands[1].type == AARCH64_OPND_Ft2);
480 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
481 gdb_assert (!inst.operands[2].addr.offset.is_reg);
482
483 /* If recording this store would invalidate the store area
484 (perhaps because rn is not known) then we should abandon
485 further prologue analysis. */
486 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
487 break;
488
489 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
490 break;
491
492 rt1 = inst.operands[0].reg.regno;
493 rt2 = inst.operands[1].reg.regno;
494 if (inst.operands[0].type == AARCH64_OPND_Ft)
495 {
496 rt1 += AARCH64_X_REGISTER_COUNT;
497 rt2 += AARCH64_X_REGISTER_COUNT;
498 }
499
500 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt1]);
501 stack.store (pv_add_constant (regs[rn], imm + size), size, regs[rt2]);
502
503 if (inst.operands[2].addr.writeback)
504 regs[rn] = pv_add_constant (regs[rn], imm);
505
506 /* Ignore the instruction that allocates stack space and sets
507 the SP. */
508 if (rn == AARCH64_SP_REGNUM && !inst.operands[2].addr.writeback)
509 seen_stack_set = true;
510 }
511 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
512 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
513 && (inst.opcode->op == OP_STR_POS
514 || inst.opcode->op == OP_STRF_POS)))
515 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
516 && strcmp ("str", inst.opcode->name) == 0)
517 {
518 /* STR (immediate) */
519 unsigned int rt = inst.operands[0].reg.regno;
520 int32_t imm = inst.operands[1].addr.offset.imm;
521 unsigned int rn = inst.operands[1].addr.base_regno;
522 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
523 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
524 || inst.operands[0].type == AARCH64_OPND_Ft);
525
526 if (inst.operands[0].type == AARCH64_OPND_Ft)
527 rt += AARCH64_X_REGISTER_COUNT;
528
529 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt]);
530 if (inst.operands[1].addr.writeback)
531 regs[rn] = pv_add_constant (regs[rn], imm);
532
533 /* Are we storing with SP as a base? */
534 if (rn == AARCH64_SP_REGNUM)
535 seen_stack_set = true;
536 }
537 else if (inst.opcode->iclass == testbranch)
538 {
539 /* Stop analysis on branch. */
540 break;
541 }
542 else if (inst.opcode->iclass == ic_system)
543 {
544 aarch64_gdbarch_tdep *tdep
545 = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
546 int ra_state_val = 0;
547
548 if (insn == 0xd503233f /* paciasp. */
549 || insn == 0xd503237f /* pacibsp. */)
550 {
551 /* Return addresses are mangled. */
552 ra_state_val = 1;
553 }
554 else if (insn == 0xd50323bf /* autiasp. */
555 || insn == 0xd50323ff /* autibsp. */)
556 {
557 /* Return addresses are not mangled. */
558 ra_state_val = 0;
559 }
560 else if (IS_BTI (insn))
561 /* We don't need to do anything special for a BTI instruction. */
562 continue;
563 else
564 {
565 aarch64_debug_printf ("prologue analysis gave up addr=%s"
566 " opcode=0x%x (iclass)",
567 core_addr_to_string_nz (start), insn);
568 break;
569 }
570
571 if (tdep->has_pauth () && cache != nullptr)
572 {
573 int regnum = tdep->ra_sign_state_regnum;
574 cache->saved_regs[regnum].set_value (ra_state_val);
575 }
576 }
577 else
578 {
579 aarch64_debug_printf ("prologue analysis gave up addr=%s"
580 " opcode=0x%x",
581 core_addr_to_string_nz (start), insn);
582
583 break;
584 }
585 }
586
587 if (cache == NULL)
588 return start;
589
590 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
591 {
592 /* Frame pointer is fp. Frame size is constant. */
593 cache->framereg = AARCH64_FP_REGNUM;
594 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
595 }
596 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
597 {
598 /* Try the stack pointer. */
599 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
600 cache->framereg = AARCH64_SP_REGNUM;
601 }
602 else
603 {
604 /* We're just out of luck. We don't know where the frame is. */
605 cache->framereg = -1;
606 cache->framesize = 0;
607 }
608
609 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
610 {
611 CORE_ADDR offset;
612
613 if (stack.find_reg (gdbarch, i, &offset))
614 cache->saved_regs[i].set_addr (offset);
615 }
616
617 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
618 {
619 int regnum = gdbarch_num_regs (gdbarch);
620 CORE_ADDR offset;
621
622 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
623 &offset))
624 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].set_addr (offset);
625 }
626
627 return start;
628 }
629
630 static CORE_ADDR
631 aarch64_analyze_prologue (struct gdbarch *gdbarch,
632 CORE_ADDR start, CORE_ADDR limit,
633 struct aarch64_prologue_cache *cache)
634 {
635 instruction_reader reader;
636
637 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
638 reader);
639 }
640
641 #if GDB_SELF_TEST
642
643 namespace selftests {
644
645 /* Instruction reader from manually cooked instruction sequences. */
646
647 class instruction_reader_test : public abstract_instruction_reader
648 {
649 public:
650 template<size_t SIZE>
651 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
652 : m_insns (insns), m_insns_size (SIZE)
653 {}
654
655 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
656 override
657 {
658 SELF_CHECK (len == 4);
659 SELF_CHECK (memaddr % 4 == 0);
660 SELF_CHECK (memaddr / 4 < m_insns_size);
661
662 return m_insns[memaddr / 4];
663 }
664
665 private:
666 const uint32_t *m_insns;
667 size_t m_insns_size;
668 };
669
670 static void
671 aarch64_analyze_prologue_test (void)
672 {
673 struct gdbarch_info info;
674
675 info.bfd_arch_info = bfd_scan_arch ("aarch64");
676
677 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
678 SELF_CHECK (gdbarch != NULL);
679
680 struct aarch64_prologue_cache cache;
681 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
682
683 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
684
685 /* Test the simple prologue in which frame pointer is used. */
686 {
687 static const uint32_t insns[] = {
688 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
689 0x910003fd, /* mov x29, sp */
690 0x97ffffe6, /* bl 0x400580 */
691 };
692 instruction_reader_test reader (insns);
693
694 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
695 SELF_CHECK (end == 4 * 2);
696
697 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
698 SELF_CHECK (cache.framesize == 272);
699
700 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
701 {
702 if (i == AARCH64_FP_REGNUM)
703 SELF_CHECK (cache.saved_regs[i].addr () == -272);
704 else if (i == AARCH64_LR_REGNUM)
705 SELF_CHECK (cache.saved_regs[i].addr () == -264);
706 else
707 SELF_CHECK (cache.saved_regs[i].is_realreg ()
708 && cache.saved_regs[i].realreg () == i);
709 }
710
711 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
712 {
713 int num_regs = gdbarch_num_regs (gdbarch);
714 int regnum = i + num_regs + AARCH64_D0_REGNUM;
715
716 SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
717 && cache.saved_regs[regnum].realreg () == regnum);
718 }
719 }
720
721 /* Test a prologue in which STR is used and frame pointer is not
722 used. */
723 {
724 static const uint32_t insns[] = {
725 0xf81d0ff3, /* str x19, [sp, #-48]! */
726 0xb9002fe0, /* str w0, [sp, #44] */
727 0xf90013e1, /* str x1, [sp, #32]*/
728 0xfd000fe0, /* str d0, [sp, #24] */
729 0xaa0203f3, /* mov x19, x2 */
730 0xf94013e0, /* ldr x0, [sp, #32] */
731 };
732 instruction_reader_test reader (insns);
733
734 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
735 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
736
737 SELF_CHECK (end == 4 * 5);
738
739 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
740 SELF_CHECK (cache.framesize == 48);
741
742 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
743 {
744 if (i == 1)
745 SELF_CHECK (cache.saved_regs[i].addr () == -16);
746 else if (i == 19)
747 SELF_CHECK (cache.saved_regs[i].addr () == -48);
748 else
749 SELF_CHECK (cache.saved_regs[i].is_realreg ()
750 && cache.saved_regs[i].realreg () == i);
751 }
752
753 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
754 {
755 int num_regs = gdbarch_num_regs (gdbarch);
756 int regnum = i + num_regs + AARCH64_D0_REGNUM;
757
758
759 if (i == 0)
760 SELF_CHECK (cache.saved_regs[regnum].addr () == -24);
761 else
762 SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
763 && cache.saved_regs[regnum].realreg () == regnum);
764 }
765 }
766
767 /* Test handling of movz before setting the frame pointer. */
768 {
769 static const uint32_t insns[] = {
770 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
771 0x52800020, /* mov w0, #0x1 */
772 0x910003fd, /* mov x29, sp */
773 0x528000a2, /* mov w2, #0x5 */
774 0x97fffff8, /* bl 6e4 */
775 };
776
777 instruction_reader_test reader (insns);
778
779 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
780 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
781
782 /* We should stop at the 4th instruction. */
783 SELF_CHECK (end == (4 - 1) * 4);
784 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
785 SELF_CHECK (cache.framesize == 16);
786 }
787
788 /* Test handling of movz/stp when using the stack pointer as frame
789 pointer. */
790 {
791 static const uint32_t insns[] = {
792 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
793 0x52800020, /* mov w0, #0x1 */
794 0x290207e0, /* stp w0, w1, [sp, #16] */
795 0xa9018fe2, /* stp x2, x3, [sp, #24] */
796 0x528000a2, /* mov w2, #0x5 */
797 0x97fffff8, /* bl 6e4 */
798 };
799
800 instruction_reader_test reader (insns);
801
802 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
803 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
804
805 /* We should stop at the 5th instruction. */
806 SELF_CHECK (end == (5 - 1) * 4);
807 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
808 SELF_CHECK (cache.framesize == 64);
809 }
810
811 /* Test handling of movz/str when using the stack pointer as frame
812 pointer */
813 {
814 static const uint32_t insns[] = {
815 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
816 0x52800020, /* mov w0, #0x1 */
817 0xb9002be4, /* str w4, [sp, #40] */
818 0xf9001be5, /* str x5, [sp, #48] */
819 0x528000a2, /* mov w2, #0x5 */
820 0x97fffff8, /* bl 6e4 */
821 };
822
823 instruction_reader_test reader (insns);
824
825 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
826 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
827
828 /* We should stop at the 5th instruction. */
829 SELF_CHECK (end == (5 - 1) * 4);
830 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
831 SELF_CHECK (cache.framesize == 64);
832 }
833
834 /* Test handling of movz/stur when using the stack pointer as frame
835 pointer. */
836 {
837 static const uint32_t insns[] = {
838 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
839 0x52800020, /* mov w0, #0x1 */
840 0xb80343e6, /* stur w6, [sp, #52] */
841 0xf80383e7, /* stur x7, [sp, #56] */
842 0x528000a2, /* mov w2, #0x5 */
843 0x97fffff8, /* bl 6e4 */
844 };
845
846 instruction_reader_test reader (insns);
847
848 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
849 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
850
851 /* We should stop at the 5th instruction. */
852 SELF_CHECK (end == (5 - 1) * 4);
853 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
854 SELF_CHECK (cache.framesize == 64);
855 }
856
857 /* Test handling of movz when there is no frame pointer set or no stack
858 pointer used. */
859 {
860 static const uint32_t insns[] = {
861 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
862 0x52800020, /* mov w0, #0x1 */
863 0x528000a2, /* mov w2, #0x5 */
864 0x97fffff8, /* bl 6e4 */
865 };
866
867 instruction_reader_test reader (insns);
868
869 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
870 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
871
872 /* We should stop at the 4th instruction. */
873 SELF_CHECK (end == (4 - 1) * 4);
874 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
875 SELF_CHECK (cache.framesize == 16);
876 }
877
878 /* Test a prologue in which there is a return address signing instruction. */
879 if (tdep->has_pauth ())
880 {
881 static const uint32_t insns[] = {
882 0xd503233f, /* paciasp */
883 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
884 0x910003fd, /* mov x29, sp */
885 0xf801c3f3, /* str x19, [sp, #28] */
886 0xb9401fa0, /* ldr x19, [x29, #28] */
887 };
888 instruction_reader_test reader (insns);
889
890 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
891 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
892 reader);
893
894 SELF_CHECK (end == 4 * 4);
895 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
896 SELF_CHECK (cache.framesize == 48);
897
898 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
899 {
900 if (i == 19)
901 SELF_CHECK (cache.saved_regs[i].addr () == -20);
902 else if (i == AARCH64_FP_REGNUM)
903 SELF_CHECK (cache.saved_regs[i].addr () == -48);
904 else if (i == AARCH64_LR_REGNUM)
905 SELF_CHECK (cache.saved_regs[i].addr () == -40);
906 else
907 SELF_CHECK (cache.saved_regs[i].is_realreg ()
908 && cache.saved_regs[i].realreg () == i);
909 }
910
911 if (tdep->has_pauth ())
912 {
913 int regnum = tdep->ra_sign_state_regnum;
914 SELF_CHECK (cache.saved_regs[regnum].is_value ());
915 }
916 }
917
918 /* Test a prologue with a BTI instruction. */
919 {
920 static const uint32_t insns[] = {
921 0xd503245f, /* bti */
922 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
923 0x910003fd, /* mov x29, sp */
924 0xf801c3f3, /* str x19, [sp, #28] */
925 0xb9401fa0, /* ldr x19, [x29, #28] */
926 };
927 instruction_reader_test reader (insns);
928
929 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
930 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
931 reader);
932
933 SELF_CHECK (end == 4 * 4);
934 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
935 SELF_CHECK (cache.framesize == 48);
936
937 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
938 {
939 if (i == 19)
940 SELF_CHECK (cache.saved_regs[i].addr () == -20);
941 else if (i == AARCH64_FP_REGNUM)
942 SELF_CHECK (cache.saved_regs[i].addr () == -48);
943 else if (i == AARCH64_LR_REGNUM)
944 SELF_CHECK (cache.saved_regs[i].addr () == -40);
945 else
946 SELF_CHECK (cache.saved_regs[i].is_realreg ()
947 && cache.saved_regs[i].realreg () == i);
948 }
949 }
950 }
951 } // namespace selftests
952 #endif /* GDB_SELF_TEST */
953
954 /* Implement the "skip_prologue" gdbarch method. */
955
956 static CORE_ADDR
957 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
958 {
959 CORE_ADDR func_addr, func_end_addr, limit_pc;
960
961 /* See if we can determine the end of the prologue via the symbol
962 table. If so, then return either PC, or the PC after the
963 prologue, whichever is greater. */
964 bool func_addr_found
965 = find_pc_partial_function (pc, NULL, &func_addr, &func_end_addr);
966
967 if (func_addr_found)
968 {
969 CORE_ADDR post_prologue_pc
970 = skip_prologue_using_sal (gdbarch, func_addr);
971
972 if (post_prologue_pc != 0)
973 return std::max (pc, post_prologue_pc);
974 }
975
976 /* Can't determine prologue from the symbol table, need to examine
977 instructions. */
978
979 /* Find an upper limit on the function prologue using the debug
980 information. If the debug information could not be used to
981 provide that bound, then use an arbitrary large number as the
982 upper bound. */
983 limit_pc = skip_prologue_using_sal (gdbarch, pc);
984 if (limit_pc == 0)
985 limit_pc = pc + 128; /* Magic. */
986
987 limit_pc
988 = func_end_addr == 0 ? limit_pc : std::min (limit_pc, func_end_addr - 4);
989
990 /* Try disassembling prologue. */
991 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
992 }
993
994 /* Scan the function prologue for THIS_FRAME and populate the prologue
995 cache CACHE. */
996
997 static void
998 aarch64_scan_prologue (frame_info_ptr this_frame,
999 struct aarch64_prologue_cache *cache)
1000 {
1001 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
1002 CORE_ADDR prologue_start;
1003 CORE_ADDR prologue_end;
1004 CORE_ADDR prev_pc = get_frame_pc (this_frame);
1005 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1006
1007 cache->prev_pc = prev_pc;
1008
1009 /* Assume we do not find a frame. */
1010 cache->framereg = -1;
1011 cache->framesize = 0;
1012
1013 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1014 &prologue_end))
1015 {
1016 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1017
1018 if (sal.line == 0)
1019 {
1020 /* No line info so use the current PC. */
1021 prologue_end = prev_pc;
1022 }
1023 else if (sal.end < prologue_end)
1024 {
1025 /* The next line begins after the function end. */
1026 prologue_end = sal.end;
1027 }
1028
1029 prologue_end = std::min (prologue_end, prev_pc);
1030 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1031 }
1032 else
1033 {
1034 CORE_ADDR frame_loc;
1035
1036 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
1037 if (frame_loc == 0)
1038 return;
1039
1040 cache->framereg = AARCH64_FP_REGNUM;
1041 cache->framesize = 16;
1042 cache->saved_regs[29].set_addr (0);
1043 cache->saved_regs[30].set_addr (8);
1044 }
1045 }
1046
1047 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
1048 function may throw an exception if the inferior's registers or memory is
1049 not available. */
1050
1051 static void
1052 aarch64_make_prologue_cache_1 (frame_info_ptr this_frame,
1053 struct aarch64_prologue_cache *cache)
1054 {
1055 CORE_ADDR unwound_fp;
1056 int reg;
1057
1058 aarch64_scan_prologue (this_frame, cache);
1059
1060 if (cache->framereg == -1)
1061 return;
1062
1063 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
1064 if (unwound_fp == 0)
1065 return;
1066
1067 cache->prev_sp = unwound_fp;
1068 if (!aarch64_stack_frame_destroyed_p (get_frame_arch (this_frame),
1069 cache->prev_pc))
1070 cache->prev_sp += cache->framesize;
1071
1072 /* Calculate actual addresses of saved registers using offsets
1073 determined by aarch64_analyze_prologue. */
1074 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
1075 if (cache->saved_regs[reg].is_addr ())
1076 cache->saved_regs[reg].set_addr (cache->saved_regs[reg].addr ()
1077 + cache->prev_sp);
1078
1079 cache->func = get_frame_func (this_frame);
1080
1081 cache->available_p = 1;
1082 }
1083
1084 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1085 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1086 Return a pointer to the current aarch64_prologue_cache in
1087 *THIS_CACHE. */
1088
1089 static struct aarch64_prologue_cache *
1090 aarch64_make_prologue_cache (frame_info_ptr this_frame, void **this_cache)
1091 {
1092 struct aarch64_prologue_cache *cache;
1093
1094 if (*this_cache != NULL)
1095 return (struct aarch64_prologue_cache *) *this_cache;
1096
1097 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1098 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1099 *this_cache = cache;
1100
1101 try
1102 {
1103 aarch64_make_prologue_cache_1 (this_frame, cache);
1104 }
1105 catch (const gdb_exception_error &ex)
1106 {
1107 if (ex.error != NOT_AVAILABLE_ERROR)
1108 throw;
1109 }
1110
1111 return cache;
1112 }
1113
1114 /* Implement the "stop_reason" frame_unwind method. */
1115
1116 static enum unwind_stop_reason
1117 aarch64_prologue_frame_unwind_stop_reason (frame_info_ptr this_frame,
1118 void **this_cache)
1119 {
1120 struct aarch64_prologue_cache *cache
1121 = aarch64_make_prologue_cache (this_frame, this_cache);
1122
1123 if (!cache->available_p)
1124 return UNWIND_UNAVAILABLE;
1125
1126 /* Halt the backtrace at "_start". */
1127 gdbarch *arch = get_frame_arch (this_frame);
1128 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (arch);
1129 if (cache->prev_pc <= tdep->lowest_pc)
1130 return UNWIND_OUTERMOST;
1131
1132 /* We've hit a wall, stop. */
1133 if (cache->prev_sp == 0)
1134 return UNWIND_OUTERMOST;
1135
1136 return UNWIND_NO_REASON;
1137 }
1138
1139 /* Our frame ID for a normal frame is the current function's starting
1140 PC and the caller's SP when we were called. */
1141
1142 static void
1143 aarch64_prologue_this_id (frame_info_ptr this_frame,
1144 void **this_cache, struct frame_id *this_id)
1145 {
1146 struct aarch64_prologue_cache *cache
1147 = aarch64_make_prologue_cache (this_frame, this_cache);
1148
1149 if (!cache->available_p)
1150 *this_id = frame_id_build_unavailable_stack (cache->func);
1151 else
1152 *this_id = frame_id_build (cache->prev_sp, cache->func);
1153 }
1154
1155 /* Implement the "prev_register" frame_unwind method. */
1156
1157 static struct value *
1158 aarch64_prologue_prev_register (frame_info_ptr this_frame,
1159 void **this_cache, int prev_regnum)
1160 {
1161 struct aarch64_prologue_cache *cache
1162 = aarch64_make_prologue_cache (this_frame, this_cache);
1163
1164 /* If we are asked to unwind the PC, then we need to return the LR
1165 instead. The prologue may save PC, but it will point into this
1166 frame's prologue, not the next frame's resume location. */
1167 if (prev_regnum == AARCH64_PC_REGNUM)
1168 {
1169 CORE_ADDR lr;
1170 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1171 aarch64_gdbarch_tdep *tdep
1172 = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
1173
1174 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1175
1176 if (tdep->has_pauth ()
1177 && cache->saved_regs[tdep->ra_sign_state_regnum].is_value ())
1178 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
1179
1180 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
1181 }
1182
1183 /* SP is generally not saved to the stack, but this frame is
1184 identified by the next frame's stack pointer at the time of the
1185 call. The value was already reconstructed into PREV_SP. */
1186 /*
1187 +----------+ ^
1188 | saved lr | |
1189 +->| saved fp |--+
1190 | | |
1191 | | | <- Previous SP
1192 | +----------+
1193 | | saved lr |
1194 +--| saved fp |<- FP
1195 | |
1196 | |<- SP
1197 +----------+ */
1198 if (prev_regnum == AARCH64_SP_REGNUM)
1199 return frame_unwind_got_constant (this_frame, prev_regnum,
1200 cache->prev_sp);
1201
1202 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1203 prev_regnum);
1204 }
1205
1206 /* AArch64 prologue unwinder. */
1207 static frame_unwind aarch64_prologue_unwind =
1208 {
1209 "aarch64 prologue",
1210 NORMAL_FRAME,
1211 aarch64_prologue_frame_unwind_stop_reason,
1212 aarch64_prologue_this_id,
1213 aarch64_prologue_prev_register,
1214 NULL,
1215 default_frame_sniffer
1216 };
1217
1218 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1219 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1220 Return a pointer to the current aarch64_prologue_cache in
1221 *THIS_CACHE. */
1222
1223 static struct aarch64_prologue_cache *
1224 aarch64_make_stub_cache (frame_info_ptr this_frame, void **this_cache)
1225 {
1226 struct aarch64_prologue_cache *cache;
1227
1228 if (*this_cache != NULL)
1229 return (struct aarch64_prologue_cache *) *this_cache;
1230
1231 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1232 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1233 *this_cache = cache;
1234
1235 try
1236 {
1237 cache->prev_sp = get_frame_register_unsigned (this_frame,
1238 AARCH64_SP_REGNUM);
1239 cache->prev_pc = get_frame_pc (this_frame);
1240 cache->available_p = 1;
1241 }
1242 catch (const gdb_exception_error &ex)
1243 {
1244 if (ex.error != NOT_AVAILABLE_ERROR)
1245 throw;
1246 }
1247
1248 return cache;
1249 }
1250
1251 /* Implement the "stop_reason" frame_unwind method. */
1252
1253 static enum unwind_stop_reason
1254 aarch64_stub_frame_unwind_stop_reason (frame_info_ptr this_frame,
1255 void **this_cache)
1256 {
1257 struct aarch64_prologue_cache *cache
1258 = aarch64_make_stub_cache (this_frame, this_cache);
1259
1260 if (!cache->available_p)
1261 return UNWIND_UNAVAILABLE;
1262
1263 return UNWIND_NO_REASON;
1264 }
1265
1266 /* Our frame ID for a stub frame is the current SP and LR. */
1267
1268 static void
1269 aarch64_stub_this_id (frame_info_ptr this_frame,
1270 void **this_cache, struct frame_id *this_id)
1271 {
1272 struct aarch64_prologue_cache *cache
1273 = aarch64_make_stub_cache (this_frame, this_cache);
1274
1275 if (cache->available_p)
1276 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1277 else
1278 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
1279 }
1280
1281 /* Implement the "sniffer" frame_unwind method. */
1282
1283 static int
1284 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1285 frame_info_ptr this_frame,
1286 void **this_prologue_cache)
1287 {
1288 CORE_ADDR addr_in_block;
1289 gdb_byte dummy[4];
1290
1291 addr_in_block = get_frame_address_in_block (this_frame);
1292 if (in_plt_section (addr_in_block)
1293 /* We also use the stub winder if the target memory is unreadable
1294 to avoid having the prologue unwinder trying to read it. */
1295 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1296 return 1;
1297
1298 return 0;
1299 }
1300
1301 /* AArch64 stub unwinder. */
1302 static frame_unwind aarch64_stub_unwind =
1303 {
1304 "aarch64 stub",
1305 NORMAL_FRAME,
1306 aarch64_stub_frame_unwind_stop_reason,
1307 aarch64_stub_this_id,
1308 aarch64_prologue_prev_register,
1309 NULL,
1310 aarch64_stub_unwind_sniffer
1311 };
1312
1313 /* Return the frame base address of *THIS_FRAME. */
1314
1315 static CORE_ADDR
1316 aarch64_normal_frame_base (frame_info_ptr this_frame, void **this_cache)
1317 {
1318 struct aarch64_prologue_cache *cache
1319 = aarch64_make_prologue_cache (this_frame, this_cache);
1320
1321 return cache->prev_sp - cache->framesize;
1322 }
1323
1324 /* AArch64 default frame base information. */
1325 static frame_base aarch64_normal_base =
1326 {
1327 &aarch64_prologue_unwind,
1328 aarch64_normal_frame_base,
1329 aarch64_normal_frame_base,
1330 aarch64_normal_frame_base
1331 };
1332
1333 /* Return the value of the REGNUM register in the previous frame of
1334 *THIS_FRAME. */
1335
1336 static struct value *
1337 aarch64_dwarf2_prev_register (frame_info_ptr this_frame,
1338 void **this_cache, int regnum)
1339 {
1340 gdbarch *arch = get_frame_arch (this_frame);
1341 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (arch);
1342 CORE_ADDR lr;
1343
1344 switch (regnum)
1345 {
1346 case AARCH64_PC_REGNUM:
1347 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1348 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
1349 return frame_unwind_got_constant (this_frame, regnum, lr);
1350
1351 default:
1352 internal_error (_("Unexpected register %d"), regnum);
1353 }
1354 }
1355
1356 static const unsigned char op_lit0 = DW_OP_lit0;
1357 static const unsigned char op_lit1 = DW_OP_lit1;
1358
1359 /* Implement the "init_reg" dwarf2_frame_ops method. */
1360
1361 static void
1362 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1363 struct dwarf2_frame_state_reg *reg,
1364 frame_info_ptr this_frame)
1365 {
1366 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
1367
1368 switch (regnum)
1369 {
1370 case AARCH64_PC_REGNUM:
1371 reg->how = DWARF2_FRAME_REG_FN;
1372 reg->loc.fn = aarch64_dwarf2_prev_register;
1373 return;
1374
1375 case AARCH64_SP_REGNUM:
1376 reg->how = DWARF2_FRAME_REG_CFA;
1377 return;
1378 }
1379
1380 /* Init pauth registers. */
1381 if (tdep->has_pauth ())
1382 {
1383 if (regnum == tdep->ra_sign_state_regnum)
1384 {
1385 /* Initialize RA_STATE to zero. */
1386 reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1387 reg->loc.exp.start = &op_lit0;
1388 reg->loc.exp.len = 1;
1389 return;
1390 }
1391 else if (regnum >= tdep->pauth_reg_base
1392 && regnum < tdep->pauth_reg_base + tdep->pauth_reg_count)
1393 {
1394 reg->how = DWARF2_FRAME_REG_SAME_VALUE;
1395 return;
1396 }
1397 }
1398 }
1399
1400 /* Implement the execute_dwarf_cfa_vendor_op method. */
1401
1402 static bool
1403 aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
1404 struct dwarf2_frame_state *fs)
1405 {
1406 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
1407 struct dwarf2_frame_state_reg *ra_state;
1408
1409 if (op == DW_CFA_AARCH64_negate_ra_state)
1410 {
1411 /* On systems without pauth, treat as a nop. */
1412 if (!tdep->has_pauth ())
1413 return true;
1414
1415 /* Allocate RA_STATE column if it's not allocated yet. */
1416 fs->regs.alloc_regs (AARCH64_DWARF_RA_SIGN_STATE + 1);
1417
1418 /* Toggle the status of RA_STATE between 0 and 1. */
1419 ra_state = &(fs->regs.reg[AARCH64_DWARF_RA_SIGN_STATE]);
1420 ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1421
1422 if (ra_state->loc.exp.start == nullptr
1423 || ra_state->loc.exp.start == &op_lit0)
1424 ra_state->loc.exp.start = &op_lit1;
1425 else
1426 ra_state->loc.exp.start = &op_lit0;
1427
1428 ra_state->loc.exp.len = 1;
1429
1430 return true;
1431 }
1432
1433 return false;
1434 }
1435
1436 /* Used for matching BRK instructions for AArch64. */
1437 static constexpr uint32_t BRK_INSN_MASK = 0xffe0001f;
1438 static constexpr uint32_t BRK_INSN_BASE = 0xd4200000;
1439
1440 /* Implementation of gdbarch_program_breakpoint_here_p for aarch64. */
1441
1442 static bool
1443 aarch64_program_breakpoint_here_p (gdbarch *gdbarch, CORE_ADDR address)
1444 {
1445 const uint32_t insn_len = 4;
1446 gdb_byte target_mem[4];
1447
1448 /* Enable the automatic memory restoration from breakpoints while
1449 we read the memory. Otherwise we may find temporary breakpoints, ones
1450 inserted by GDB, and flag them as permanent breakpoints. */
1451 scoped_restore restore_memory
1452 = make_scoped_restore_show_memory_breakpoints (0);
1453
1454 if (target_read_memory (address, target_mem, insn_len) == 0)
1455 {
1456 uint32_t insn =
1457 (uint32_t) extract_unsigned_integer (target_mem, insn_len,
1458 gdbarch_byte_order_for_code (gdbarch));
1459
1460 /* Check if INSN is a BRK instruction pattern. There are multiple choices
1461 of such instructions with different immediate values. Different OS'
1462 may use a different variation, but they have the same outcome. */
1463 return ((insn & BRK_INSN_MASK) == BRK_INSN_BASE);
1464 }
1465
1466 return false;
1467 }
1468
1469 /* When arguments must be pushed onto the stack, they go on in reverse
1470 order. The code below implements a FILO (stack) to do this. */
1471
1472 struct stack_item_t
1473 {
1474 /* Value to pass on stack. It can be NULL if this item is for stack
1475 padding. */
1476 const gdb_byte *data;
1477
1478 /* Size in bytes of value to pass on stack. */
1479 int len;
1480 };
1481
1482 /* Implement the gdbarch type alignment method, overrides the generic
1483 alignment algorithm for anything that is aarch64 specific. */
1484
1485 static ULONGEST
1486 aarch64_type_align (gdbarch *gdbarch, struct type *t)
1487 {
1488 t = check_typedef (t);
1489 if (t->code () == TYPE_CODE_ARRAY && t->is_vector ())
1490 {
1491 /* Use the natural alignment for vector types (the same for
1492 scalar type), but the maximum alignment is 128-bit. */
1493 if (t->length () > 16)
1494 return 16;
1495 else
1496 return t->length ();
1497 }
1498
1499 /* Allow the common code to calculate the alignment. */
1500 return 0;
1501 }
1502
1503 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1504
1505 Return the number of register required, or -1 on failure.
1506
1507 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1508 to the element, else fail if the type of this element does not match the
1509 existing value. */
1510
1511 static int
1512 aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1513 struct type **fundamental_type)
1514 {
1515 if (type == nullptr)
1516 return -1;
1517
1518 switch (type->code ())
1519 {
1520 case TYPE_CODE_FLT:
1521 case TYPE_CODE_DECFLOAT:
1522 if (type->length () > 16)
1523 return -1;
1524
1525 if (*fundamental_type == nullptr)
1526 *fundamental_type = type;
1527 else if (type->length () != (*fundamental_type)->length ()
1528 || type->code () != (*fundamental_type)->code ())
1529 return -1;
1530
1531 return 1;
1532
1533 case TYPE_CODE_COMPLEX:
1534 {
1535 struct type *target_type = check_typedef (type->target_type ());
1536 if (target_type->length () > 16)
1537 return -1;
1538
1539 if (*fundamental_type == nullptr)
1540 *fundamental_type = target_type;
1541 else if (target_type->length () != (*fundamental_type)->length ()
1542 || target_type->code () != (*fundamental_type)->code ())
1543 return -1;
1544
1545 return 2;
1546 }
1547
1548 case TYPE_CODE_ARRAY:
1549 {
1550 if (type->is_vector ())
1551 {
1552 if (type->length () != 8 && type->length () != 16)
1553 return -1;
1554
1555 if (*fundamental_type == nullptr)
1556 *fundamental_type = type;
1557 else if (type->length () != (*fundamental_type)->length ()
1558 || type->code () != (*fundamental_type)->code ())
1559 return -1;
1560
1561 return 1;
1562 }
1563 else
1564 {
1565 struct type *target_type = type->target_type ();
1566 int count = aapcs_is_vfp_call_or_return_candidate_1
1567 (target_type, fundamental_type);
1568
1569 if (count == -1)
1570 return count;
1571
1572 count *= (type->length () / target_type->length ());
1573 return count;
1574 }
1575 }
1576
1577 case TYPE_CODE_STRUCT:
1578 case TYPE_CODE_UNION:
1579 {
1580 int count = 0;
1581
1582 for (int i = 0; i < type->num_fields (); i++)
1583 {
1584 /* Ignore any static fields. */
1585 if (type->field (i).is_static ())
1586 continue;
1587
1588 struct type *member = check_typedef (type->field (i).type ());
1589
1590 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1591 (member, fundamental_type);
1592 if (sub_count == -1)
1593 return -1;
1594 count += sub_count;
1595 }
1596
1597 /* Ensure there is no padding between the fields (allowing for empty
1598 zero length structs) */
1599 int ftype_length = (*fundamental_type == nullptr)
1600 ? 0 : (*fundamental_type)->length ();
1601 if (count * ftype_length != type->length ())
1602 return -1;
1603
1604 return count;
1605 }
1606
1607 default:
1608 break;
1609 }
1610
1611 return -1;
1612 }
1613
1614 /* Return true if an argument, whose type is described by TYPE, can be passed or
1615 returned in simd/fp registers, providing enough parameter passing registers
1616 are available. This is as described in the AAPCS64.
1617
1618 Upon successful return, *COUNT returns the number of needed registers,
1619 *FUNDAMENTAL_TYPE contains the type of those registers.
1620
1621 Candidate as per the AAPCS64 5.4.2.C is either a:
1622 - float.
1623 - short-vector.
1624 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1625 all the members are floats and has at most 4 members.
1626 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1627 all the members are short vectors and has at most 4 members.
1628 - Complex (7.1.1)
1629
1630 Note that HFAs and HVAs can include nested structures and arrays. */
1631
1632 static bool
1633 aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1634 struct type **fundamental_type)
1635 {
1636 if (type == nullptr)
1637 return false;
1638
1639 *fundamental_type = nullptr;
1640
1641 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1642 fundamental_type);
1643
1644 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1645 {
1646 *count = ag_count;
1647 return true;
1648 }
1649 else
1650 return false;
1651 }
1652
1653 /* AArch64 function call information structure. */
1654 struct aarch64_call_info
1655 {
1656 /* the current argument number. */
1657 unsigned argnum = 0;
1658
1659 /* The next general purpose register number, equivalent to NGRN as
1660 described in the AArch64 Procedure Call Standard. */
1661 unsigned ngrn = 0;
1662
1663 /* The next SIMD and floating point register number, equivalent to
1664 NSRN as described in the AArch64 Procedure Call Standard. */
1665 unsigned nsrn = 0;
1666
1667 /* The next stacked argument address, equivalent to NSAA as
1668 described in the AArch64 Procedure Call Standard. */
1669 unsigned nsaa = 0;
1670
1671 /* Stack item vector. */
1672 std::vector<stack_item_t> si;
1673 };
1674
1675 /* Pass a value in a sequence of consecutive X registers. The caller
1676 is responsible for ensuring sufficient registers are available. */
1677
1678 static void
1679 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1680 struct aarch64_call_info *info, struct type *type,
1681 struct value *arg)
1682 {
1683 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1684 int len = type->length ();
1685 enum type_code typecode = type->code ();
1686 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1687 const bfd_byte *buf = arg->contents ().data ();
1688
1689 info->argnum++;
1690
1691 while (len > 0)
1692 {
1693 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1694 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1695 byte_order);
1696
1697
1698 /* Adjust sub-word struct/union args when big-endian. */
1699 if (byte_order == BFD_ENDIAN_BIG
1700 && partial_len < X_REGISTER_SIZE
1701 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1702 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1703
1704 aarch64_debug_printf ("arg %d in %s = 0x%s", info->argnum,
1705 gdbarch_register_name (gdbarch, regnum),
1706 phex (regval, X_REGISTER_SIZE));
1707
1708 regcache_cooked_write_unsigned (regcache, regnum, regval);
1709 len -= partial_len;
1710 buf += partial_len;
1711 regnum++;
1712 }
1713 }
1714
1715 /* Attempt to marshall a value in a V register. Return 1 if
1716 successful, or 0 if insufficient registers are available. This
1717 function, unlike the equivalent pass_in_x() function does not
1718 handle arguments spread across multiple registers. */
1719
1720 static int
1721 pass_in_v (struct gdbarch *gdbarch,
1722 struct regcache *regcache,
1723 struct aarch64_call_info *info,
1724 int len, const bfd_byte *buf)
1725 {
1726 if (info->nsrn < 8)
1727 {
1728 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1729 /* Enough space for a full vector register. */
1730 gdb_byte reg[register_size (gdbarch, regnum)];
1731 gdb_assert (len <= sizeof (reg));
1732
1733 info->argnum++;
1734 info->nsrn++;
1735
1736 memset (reg, 0, sizeof (reg));
1737 /* PCS C.1, the argument is allocated to the least significant
1738 bits of V register. */
1739 memcpy (reg, buf, len);
1740 regcache->cooked_write (regnum, reg);
1741
1742 aarch64_debug_printf ("arg %d in %s", info->argnum,
1743 gdbarch_register_name (gdbarch, regnum));
1744
1745 return 1;
1746 }
1747 info->nsrn = 8;
1748 return 0;
1749 }
1750
1751 /* Marshall an argument onto the stack. */
1752
1753 static void
1754 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1755 struct value *arg)
1756 {
1757 const bfd_byte *buf = arg->contents ().data ();
1758 int len = type->length ();
1759 int align;
1760 stack_item_t item;
1761
1762 info->argnum++;
1763
1764 align = type_align (type);
1765
1766 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1767 Natural alignment of the argument's type. */
1768 align = align_up (align, 8);
1769
1770 /* The AArch64 PCS requires at most doubleword alignment. */
1771 if (align > 16)
1772 align = 16;
1773
1774 aarch64_debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1775 info->nsaa);
1776
1777 item.len = len;
1778 item.data = buf;
1779 info->si.push_back (item);
1780
1781 info->nsaa += len;
1782 if (info->nsaa & (align - 1))
1783 {
1784 /* Push stack alignment padding. */
1785 int pad = align - (info->nsaa & (align - 1));
1786
1787 item.len = pad;
1788 item.data = NULL;
1789
1790 info->si.push_back (item);
1791 info->nsaa += pad;
1792 }
1793 }
1794
1795 /* Marshall an argument into a sequence of one or more consecutive X
1796 registers or, if insufficient X registers are available then onto
1797 the stack. */
1798
1799 static void
1800 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1801 struct aarch64_call_info *info, struct type *type,
1802 struct value *arg)
1803 {
1804 int len = type->length ();
1805 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1806
1807 /* PCS C.13 - Pass in registers if we have enough spare */
1808 if (info->ngrn + nregs <= 8)
1809 {
1810 pass_in_x (gdbarch, regcache, info, type, arg);
1811 info->ngrn += nregs;
1812 }
1813 else
1814 {
1815 info->ngrn = 8;
1816 pass_on_stack (info, type, arg);
1817 }
1818 }
1819
1820 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1821 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1822 registers. A return value of false is an error state as the value will have
1823 been partially passed to the stack. */
1824 static bool
1825 pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1826 struct aarch64_call_info *info, struct type *arg_type,
1827 struct value *arg)
1828 {
1829 switch (arg_type->code ())
1830 {
1831 case TYPE_CODE_FLT:
1832 case TYPE_CODE_DECFLOAT:
1833 return pass_in_v (gdbarch, regcache, info, arg_type->length (),
1834 arg->contents ().data ());
1835 break;
1836
1837 case TYPE_CODE_COMPLEX:
1838 {
1839 const bfd_byte *buf = arg->contents ().data ();
1840 struct type *target_type = check_typedef (arg_type->target_type ());
1841
1842 if (!pass_in_v (gdbarch, regcache, info, target_type->length (),
1843 buf))
1844 return false;
1845
1846 return pass_in_v (gdbarch, regcache, info, target_type->length (),
1847 buf + target_type->length ());
1848 }
1849
1850 case TYPE_CODE_ARRAY:
1851 if (arg_type->is_vector ())
1852 return pass_in_v (gdbarch, regcache, info, arg_type->length (),
1853 arg->contents ().data ());
1854 /* fall through. */
1855
1856 case TYPE_CODE_STRUCT:
1857 case TYPE_CODE_UNION:
1858 for (int i = 0; i < arg_type->num_fields (); i++)
1859 {
1860 /* Don't include static fields. */
1861 if (arg_type->field (i).is_static ())
1862 continue;
1863
1864 struct value *field = arg->primitive_field (0, i, arg_type);
1865 struct type *field_type = check_typedef (field->type ());
1866
1867 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1868 field))
1869 return false;
1870 }
1871 return true;
1872
1873 default:
1874 return false;
1875 }
1876 }
1877
1878 /* Implement the "push_dummy_call" gdbarch method. */
1879
1880 static CORE_ADDR
1881 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1882 struct regcache *regcache, CORE_ADDR bp_addr,
1883 int nargs,
1884 struct value **args, CORE_ADDR sp,
1885 function_call_return_method return_method,
1886 CORE_ADDR struct_addr)
1887 {
1888 int argnum;
1889 struct aarch64_call_info info;
1890
1891 /* We need to know what the type of the called function is in order
1892 to determine the number of named/anonymous arguments for the
1893 actual argument placement, and the return type in order to handle
1894 return value correctly.
1895
1896 The generic code above us views the decision of return in memory
1897 or return in registers as a two stage processes. The language
1898 handler is consulted first and may decide to return in memory (eg
1899 class with copy constructor returned by value), this will cause
1900 the generic code to allocate space AND insert an initial leading
1901 argument.
1902
1903 If the language code does not decide to pass in memory then the
1904 target code is consulted.
1905
1906 If the language code decides to pass in memory we want to move
1907 the pointer inserted as the initial argument from the argument
1908 list and into X8, the conventional AArch64 struct return pointer
1909 register. */
1910
1911 /* Set the return address. For the AArch64, the return breakpoint
1912 is always at BP_ADDR. */
1913 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1914
1915 /* If we were given an initial argument for the return slot, lose it. */
1916 if (return_method == return_method_hidden_param)
1917 {
1918 args++;
1919 nargs--;
1920 }
1921
1922 /* The struct_return pointer occupies X8. */
1923 if (return_method != return_method_normal)
1924 {
1925 aarch64_debug_printf ("struct return in %s = 0x%s",
1926 gdbarch_register_name
1927 (gdbarch, AARCH64_STRUCT_RETURN_REGNUM),
1928 paddress (gdbarch, struct_addr));
1929
1930 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1931 struct_addr);
1932 }
1933
1934 for (argnum = 0; argnum < nargs; argnum++)
1935 {
1936 struct value *arg = args[argnum];
1937 struct type *arg_type, *fundamental_type;
1938 int len, elements;
1939
1940 arg_type = check_typedef (arg->type ());
1941 len = arg_type->length ();
1942
1943 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1944 if there are enough spare registers. */
1945 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1946 &fundamental_type))
1947 {
1948 if (info.nsrn + elements <= 8)
1949 {
1950 /* We know that we have sufficient registers available therefore
1951 this will never need to fallback to the stack. */
1952 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1953 arg))
1954 gdb_assert_not_reached ("Failed to push args");
1955 }
1956 else
1957 {
1958 info.nsrn = 8;
1959 pass_on_stack (&info, arg_type, arg);
1960 }
1961 continue;
1962 }
1963
1964 switch (arg_type->code ())
1965 {
1966 case TYPE_CODE_INT:
1967 case TYPE_CODE_BOOL:
1968 case TYPE_CODE_CHAR:
1969 case TYPE_CODE_RANGE:
1970 case TYPE_CODE_ENUM:
1971 if (len < 4 && !is_fixed_point_type (arg_type))
1972 {
1973 /* Promote to 32 bit integer. */
1974 if (arg_type->is_unsigned ())
1975 arg_type = builtin_type (gdbarch)->builtin_uint32;
1976 else
1977 arg_type = builtin_type (gdbarch)->builtin_int32;
1978 arg = value_cast (arg_type, arg);
1979 }
1980 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1981 break;
1982
1983 case TYPE_CODE_STRUCT:
1984 case TYPE_CODE_ARRAY:
1985 case TYPE_CODE_UNION:
1986 if (len > 16)
1987 {
1988 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1989 invisible reference. */
1990
1991 /* Allocate aligned storage. */
1992 sp = align_down (sp - len, 16);
1993
1994 /* Write the real data into the stack. */
1995 write_memory (sp, arg->contents ().data (), len);
1996
1997 /* Construct the indirection. */
1998 arg_type = lookup_pointer_type (arg_type);
1999 arg = value_from_pointer (arg_type, sp);
2000 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
2001 }
2002 else
2003 /* PCS C.15 / C.18 multiple values pass. */
2004 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
2005 break;
2006
2007 default:
2008 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
2009 break;
2010 }
2011 }
2012
2013 /* Make sure stack retains 16 byte alignment. */
2014 if (info.nsaa & 15)
2015 sp -= 16 - (info.nsaa & 15);
2016
2017 while (!info.si.empty ())
2018 {
2019 const stack_item_t &si = info.si.back ();
2020
2021 sp -= si.len;
2022 if (si.data != NULL)
2023 write_memory (sp, si.data, si.len);
2024 info.si.pop_back ();
2025 }
2026
2027 /* Finally, update the SP register. */
2028 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
2029
2030 return sp;
2031 }
2032
2033 /* Implement the "frame_align" gdbarch method. */
2034
2035 static CORE_ADDR
2036 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
2037 {
2038 /* Align the stack to sixteen bytes. */
2039 return sp & ~(CORE_ADDR) 15;
2040 }
2041
2042 /* Return the type for an AdvSISD Q register. */
2043
2044 static struct type *
2045 aarch64_vnq_type (struct gdbarch *gdbarch)
2046 {
2047 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2048
2049 if (tdep->vnq_type == NULL)
2050 {
2051 struct type *t;
2052 struct type *elem;
2053
2054 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
2055 TYPE_CODE_UNION);
2056
2057 elem = builtin_type (gdbarch)->builtin_uint128;
2058 append_composite_type_field (t, "u", elem);
2059
2060 elem = builtin_type (gdbarch)->builtin_int128;
2061 append_composite_type_field (t, "s", elem);
2062
2063 tdep->vnq_type = t;
2064 }
2065
2066 return tdep->vnq_type;
2067 }
2068
2069 /* Return the type for an AdvSISD D register. */
2070
2071 static struct type *
2072 aarch64_vnd_type (struct gdbarch *gdbarch)
2073 {
2074 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2075
2076 if (tdep->vnd_type == NULL)
2077 {
2078 struct type *t;
2079 struct type *elem;
2080
2081 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
2082 TYPE_CODE_UNION);
2083
2084 elem = builtin_type (gdbarch)->builtin_double;
2085 append_composite_type_field (t, "f", elem);
2086
2087 elem = builtin_type (gdbarch)->builtin_uint64;
2088 append_composite_type_field (t, "u", elem);
2089
2090 elem = builtin_type (gdbarch)->builtin_int64;
2091 append_composite_type_field (t, "s", elem);
2092
2093 tdep->vnd_type = t;
2094 }
2095
2096 return tdep->vnd_type;
2097 }
2098
2099 /* Return the type for an AdvSISD S register. */
2100
2101 static struct type *
2102 aarch64_vns_type (struct gdbarch *gdbarch)
2103 {
2104 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2105
2106 if (tdep->vns_type == NULL)
2107 {
2108 struct type *t;
2109 struct type *elem;
2110
2111 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2112 TYPE_CODE_UNION);
2113
2114 elem = builtin_type (gdbarch)->builtin_float;
2115 append_composite_type_field (t, "f", elem);
2116
2117 elem = builtin_type (gdbarch)->builtin_uint32;
2118 append_composite_type_field (t, "u", elem);
2119
2120 elem = builtin_type (gdbarch)->builtin_int32;
2121 append_composite_type_field (t, "s", elem);
2122
2123 tdep->vns_type = t;
2124 }
2125
2126 return tdep->vns_type;
2127 }
2128
2129 /* Return the type for an AdvSISD H register. */
2130
2131 static struct type *
2132 aarch64_vnh_type (struct gdbarch *gdbarch)
2133 {
2134 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2135
2136 if (tdep->vnh_type == NULL)
2137 {
2138 struct type *t;
2139 struct type *elem;
2140
2141 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2142 TYPE_CODE_UNION);
2143
2144 elem = builtin_type (gdbarch)->builtin_bfloat16;
2145 append_composite_type_field (t, "bf", elem);
2146
2147 elem = builtin_type (gdbarch)->builtin_half;
2148 append_composite_type_field (t, "f", elem);
2149
2150 elem = builtin_type (gdbarch)->builtin_uint16;
2151 append_composite_type_field (t, "u", elem);
2152
2153 elem = builtin_type (gdbarch)->builtin_int16;
2154 append_composite_type_field (t, "s", elem);
2155
2156 tdep->vnh_type = t;
2157 }
2158
2159 return tdep->vnh_type;
2160 }
2161
2162 /* Return the type for an AdvSISD B register. */
2163
2164 static struct type *
2165 aarch64_vnb_type (struct gdbarch *gdbarch)
2166 {
2167 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2168
2169 if (tdep->vnb_type == NULL)
2170 {
2171 struct type *t;
2172 struct type *elem;
2173
2174 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2175 TYPE_CODE_UNION);
2176
2177 elem = builtin_type (gdbarch)->builtin_uint8;
2178 append_composite_type_field (t, "u", elem);
2179
2180 elem = builtin_type (gdbarch)->builtin_int8;
2181 append_composite_type_field (t, "s", elem);
2182
2183 tdep->vnb_type = t;
2184 }
2185
2186 return tdep->vnb_type;
2187 }
2188
2189 /* Return TRUE if REGNUM is a ZA tile slice pseudo-register number. Return
2190 FALSE otherwise. */
2191
2192 static bool
2193 is_sme_tile_slice_pseudo_register (struct gdbarch *gdbarch, int regnum)
2194 {
2195 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2196
2197 gdb_assert (tdep->has_sme ());
2198 gdb_assert (tdep->sme_svq > 0);
2199 gdb_assert (tdep->sme_pseudo_base <= regnum);
2200 gdb_assert (regnum < tdep->sme_pseudo_base + tdep->sme_pseudo_count);
2201
2202 if (tdep->sme_tile_slice_pseudo_base <= regnum
2203 && regnum < tdep->sme_tile_slice_pseudo_base
2204 + tdep->sme_tile_slice_pseudo_count)
2205 return true;
2206
2207 return false;
2208 }
2209
2210 /* Given REGNUM, a ZA pseudo-register number, return, in ENCODING, the
2211 decoded fields that make up its name. */
2212
2213 static void
2214 aarch64_za_decode_pseudos (struct gdbarch *gdbarch, int regnum,
2215 struct za_pseudo_encoding &encoding)
2216 {
2217 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2218
2219 gdb_assert (tdep->has_sme ());
2220 gdb_assert (tdep->sme_svq > 0);
2221 gdb_assert (tdep->sme_pseudo_base <= regnum);
2222 gdb_assert (regnum < tdep->sme_pseudo_base + tdep->sme_pseudo_count);
2223
2224 if (is_sme_tile_slice_pseudo_register (gdbarch, regnum))
2225 {
2226 /* Calculate the tile slice pseudo-register offset relative to the other
2227 tile slice pseudo-registers. */
2228 int offset = regnum - tdep->sme_tile_slice_pseudo_base;
2229
2230 /* Fetch the qualifier. We can have 160 to 2560 possible tile slice
2231 pseudo-registers. Each qualifier (we have 5 of them: B, H, S, D
2232 and Q) covers 32 * svq pseudo-registers, so we divide the offset by
2233 that constant. */
2234 size_t qualifier = offset / (tdep->sme_svq * 32);
2235 encoding.qualifier_index = qualifier;
2236
2237 /* Prepare to fetch the direction (d), tile number (t) and slice
2238 number (s). */
2239 int dts = offset % (tdep->sme_svq * 32);
2240
2241 /* The direction is represented by the even/odd numbers. Even-numbered
2242 pseudo-registers are horizontal tile slices and odd-numbered
2243 pseudo-registers are vertical tile slices. */
2244 encoding.horizontal = !(dts & 1);
2245
2246 /* Fetch the tile number. The tile number is closely related to the
2247 qualifier. B has 1 tile, H has 2 tiles, S has 4 tiles, D has 8 tiles
2248 and Q has 16 tiles. */
2249 encoding.tile_index = (dts >> 1) & ((1 << qualifier) - 1);
2250
2251 /* Fetch the slice number. The slice number is closely related to the
2252 qualifier and the svl. */
2253 encoding.slice_index = dts >> (qualifier + 1);
2254 }
2255 else
2256 {
2257 /* Calculate the tile pseudo-register offset relative to the other
2258 tile pseudo-registers. */
2259 int offset = regnum - tdep->sme_tile_pseudo_base;
2260
2261 encoding.qualifier_index = std::floor (std::log2 (offset + 1));
2262 /* Calculate the tile number. */
2263 encoding.tile_index = (offset + 1) - (1 << encoding.qualifier_index);
2264 /* Direction and slice index don't get used for tiles. Set them to
2265 0/false values. */
2266 encoding.slice_index = 0;
2267 encoding.horizontal = false;
2268 }
2269 }
2270
2271 /* Return the type for a ZA tile slice pseudo-register based on ENCODING. */
2272
2273 static struct type *
2274 aarch64_za_tile_slice_type (struct gdbarch *gdbarch,
2275 const struct za_pseudo_encoding &encoding)
2276 {
2277 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2278
2279 gdb_assert (tdep->has_sme ());
2280 gdb_assert (tdep->sme_svq > 0);
2281
2282 if (tdep->sme_tile_slice_type_q == nullptr)
2283 {
2284 /* Q tile slice type. */
2285 tdep->sme_tile_slice_type_q
2286 = init_vector_type (builtin_type (gdbarch)->builtin_uint128,
2287 tdep->sme_svq);
2288 /* D tile slice type. */
2289 tdep->sme_tile_slice_type_d
2290 = init_vector_type (builtin_type (gdbarch)->builtin_uint64,
2291 tdep->sme_svq * 2);
2292 /* S tile slice type. */
2293 tdep->sme_tile_slice_type_s
2294 = init_vector_type (builtin_type (gdbarch)->builtin_uint32,
2295 tdep->sme_svq * 4);
2296 /* H tile slice type. */
2297 tdep->sme_tile_slice_type_h
2298 = init_vector_type (builtin_type (gdbarch)->builtin_uint16,
2299 tdep->sme_svq * 8);
2300 /* B tile slice type. */
2301 tdep->sme_tile_slice_type_b
2302 = init_vector_type (builtin_type (gdbarch)->builtin_uint8,
2303 tdep->sme_svq * 16);
2304 }
2305
2306 switch (encoding.qualifier_index)
2307 {
2308 case 4:
2309 return tdep->sme_tile_slice_type_q;
2310 case 3:
2311 return tdep->sme_tile_slice_type_d;
2312 case 2:
2313 return tdep->sme_tile_slice_type_s;
2314 case 1:
2315 return tdep->sme_tile_slice_type_h;
2316 case 0:
2317 return tdep->sme_tile_slice_type_b;
2318 default:
2319 error (_("Invalid qualifier index %s for tile slice pseudo register."),
2320 pulongest (encoding.qualifier_index));
2321 }
2322
2323 gdb_assert_not_reached ("Unknown qualifier for ZA tile slice register");
2324 }
2325
2326 /* Return the type for a ZA tile pseudo-register based on ENCODING. */
2327
2328 static struct type *
2329 aarch64_za_tile_type (struct gdbarch *gdbarch,
2330 const struct za_pseudo_encoding &encoding)
2331 {
2332 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2333
2334 gdb_assert (tdep->has_sme ());
2335 gdb_assert (tdep->sme_svq > 0);
2336
2337 if (tdep->sme_tile_type_q == nullptr)
2338 {
2339 struct type *inner_vectors_type;
2340
2341 /* Q tile type. */
2342 inner_vectors_type
2343 = init_vector_type (builtin_type (gdbarch)->builtin_uint128,
2344 tdep->sme_svq);
2345 tdep->sme_tile_type_q
2346 = init_vector_type (inner_vectors_type, tdep->sme_svq);
2347
2348 /* D tile type. */
2349 inner_vectors_type
2350 = init_vector_type (builtin_type (gdbarch)->builtin_uint64,
2351 tdep->sme_svq * 2);
2352 tdep->sme_tile_type_d
2353 = init_vector_type (inner_vectors_type, tdep->sme_svq * 2);
2354
2355 /* S tile type. */
2356 inner_vectors_type
2357 = init_vector_type (builtin_type (gdbarch)->builtin_uint32,
2358 tdep->sme_svq * 4);
2359 tdep->sme_tile_type_s
2360 = init_vector_type (inner_vectors_type, tdep->sme_svq * 4);
2361
2362 /* H tile type. */
2363 inner_vectors_type
2364 = init_vector_type (builtin_type (gdbarch)->builtin_uint16,
2365 tdep->sme_svq * 8);
2366 tdep->sme_tile_type_h
2367 = init_vector_type (inner_vectors_type, tdep->sme_svq * 8);
2368
2369 /* B tile type. */
2370 inner_vectors_type
2371 = init_vector_type (builtin_type (gdbarch)->builtin_uint8,
2372 tdep->sme_svq * 16);
2373 tdep->sme_tile_type_b
2374 = init_vector_type (inner_vectors_type, tdep->sme_svq * 16);
2375 }
2376
2377 switch (encoding.qualifier_index)
2378 {
2379 case 4:
2380 return tdep->sme_tile_type_q;
2381 case 3:
2382 return tdep->sme_tile_type_d;
2383 case 2:
2384 return tdep->sme_tile_type_s;
2385 case 1:
2386 return tdep->sme_tile_type_h;
2387 case 0:
2388 return tdep->sme_tile_type_b;
2389 default:
2390 error (_("Invalid qualifier index %s for ZA tile pseudo register."),
2391 pulongest (encoding.qualifier_index));
2392 }
2393
2394 gdb_assert_not_reached ("unknown qualifier for tile pseudo-register");
2395 }
2396
2397 /* Return the type for an AdvSISD V register. */
2398
2399 static struct type *
2400 aarch64_vnv_type (struct gdbarch *gdbarch)
2401 {
2402 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2403
2404 if (tdep->vnv_type == NULL)
2405 {
2406 /* The other AArch64 pseudo registers (Q,D,H,S,B) refer to a single value
2407 slice from the non-pseudo vector registers. However NEON V registers
2408 are always vector registers, and need constructing as such. */
2409 const struct builtin_type *bt = builtin_type (gdbarch);
2410
2411 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
2412 TYPE_CODE_UNION);
2413
2414 struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
2415 TYPE_CODE_UNION);
2416 append_composite_type_field (sub, "f",
2417 init_vector_type (bt->builtin_double, 2));
2418 append_composite_type_field (sub, "u",
2419 init_vector_type (bt->builtin_uint64, 2));
2420 append_composite_type_field (sub, "s",
2421 init_vector_type (bt->builtin_int64, 2));
2422 append_composite_type_field (t, "d", sub);
2423
2424 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2425 TYPE_CODE_UNION);
2426 append_composite_type_field (sub, "f",
2427 init_vector_type (bt->builtin_float, 4));
2428 append_composite_type_field (sub, "u",
2429 init_vector_type (bt->builtin_uint32, 4));
2430 append_composite_type_field (sub, "s",
2431 init_vector_type (bt->builtin_int32, 4));
2432 append_composite_type_field (t, "s", sub);
2433
2434 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2435 TYPE_CODE_UNION);
2436 append_composite_type_field (sub, "bf",
2437 init_vector_type (bt->builtin_bfloat16, 8));
2438 append_composite_type_field (sub, "f",
2439 init_vector_type (bt->builtin_half, 8));
2440 append_composite_type_field (sub, "u",
2441 init_vector_type (bt->builtin_uint16, 8));
2442 append_composite_type_field (sub, "s",
2443 init_vector_type (bt->builtin_int16, 8));
2444 append_composite_type_field (t, "h", sub);
2445
2446 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2447 TYPE_CODE_UNION);
2448 append_composite_type_field (sub, "u",
2449 init_vector_type (bt->builtin_uint8, 16));
2450 append_composite_type_field (sub, "s",
2451 init_vector_type (bt->builtin_int8, 16));
2452 append_composite_type_field (t, "b", sub);
2453
2454 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
2455 TYPE_CODE_UNION);
2456 append_composite_type_field (sub, "u",
2457 init_vector_type (bt->builtin_uint128, 1));
2458 append_composite_type_field (sub, "s",
2459 init_vector_type (bt->builtin_int128, 1));
2460 append_composite_type_field (t, "q", sub);
2461
2462 tdep->vnv_type = t;
2463 }
2464
2465 return tdep->vnv_type;
2466 }
2467
2468 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
2469
2470 static int
2471 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2472 {
2473 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2474
2475 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
2476 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
2477
2478 if (reg == AARCH64_DWARF_SP)
2479 return AARCH64_SP_REGNUM;
2480
2481 if (reg == AARCH64_DWARF_PC)
2482 return AARCH64_PC_REGNUM;
2483
2484 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
2485 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
2486
2487 if (reg == AARCH64_DWARF_SVE_VG)
2488 return AARCH64_SVE_VG_REGNUM;
2489
2490 if (reg == AARCH64_DWARF_SVE_FFR)
2491 return AARCH64_SVE_FFR_REGNUM;
2492
2493 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
2494 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
2495
2496 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
2497 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
2498
2499 if (tdep->has_pauth ())
2500 {
2501 if (reg == AARCH64_DWARF_RA_SIGN_STATE)
2502 return tdep->ra_sign_state_regnum;
2503 }
2504
2505 return -1;
2506 }
2507
2508 /* Implement the "print_insn" gdbarch method. */
2509
2510 static int
2511 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
2512 {
2513 info->symbols = NULL;
2514 return default_print_insn (memaddr, info);
2515 }
2516
2517 /* AArch64 BRK software debug mode instruction.
2518 Note that AArch64 code is always little-endian.
2519 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
2520 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
2521
2522 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
2523
2524 /* Extract from an array REGS containing the (raw) register state a
2525 function return value of type TYPE, and copy that, in virtual
2526 format, into VALBUF. */
2527
2528 static void
2529 aarch64_extract_return_value (struct type *type, struct regcache *regs,
2530 gdb_byte *valbuf)
2531 {
2532 struct gdbarch *gdbarch = regs->arch ();
2533 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2534 int elements;
2535 struct type *fundamental_type;
2536
2537 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2538 &fundamental_type))
2539 {
2540 int len = fundamental_type->length ();
2541
2542 for (int i = 0; i < elements; i++)
2543 {
2544 int regno = AARCH64_V0_REGNUM + i;
2545 /* Enough space for a full vector register. */
2546 gdb_byte buf[register_size (gdbarch, regno)];
2547 gdb_assert (len <= sizeof (buf));
2548
2549 aarch64_debug_printf
2550 ("read HFA or HVA return value element %d from %s",
2551 i + 1, gdbarch_register_name (gdbarch, regno));
2552
2553 regs->cooked_read (regno, buf);
2554
2555 memcpy (valbuf, buf, len);
2556 valbuf += len;
2557 }
2558 }
2559 else if (type->code () == TYPE_CODE_INT
2560 || type->code () == TYPE_CODE_CHAR
2561 || type->code () == TYPE_CODE_BOOL
2562 || type->code () == TYPE_CODE_PTR
2563 || TYPE_IS_REFERENCE (type)
2564 || type->code () == TYPE_CODE_ENUM)
2565 {
2566 /* If the type is a plain integer, then the access is
2567 straight-forward. Otherwise we have to play around a bit
2568 more. */
2569 int len = type->length ();
2570 int regno = AARCH64_X0_REGNUM;
2571 ULONGEST tmp;
2572
2573 while (len > 0)
2574 {
2575 /* By using store_unsigned_integer we avoid having to do
2576 anything special for small big-endian values. */
2577 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2578 store_unsigned_integer (valbuf,
2579 (len > X_REGISTER_SIZE
2580 ? X_REGISTER_SIZE : len), byte_order, tmp);
2581 len -= X_REGISTER_SIZE;
2582 valbuf += X_REGISTER_SIZE;
2583 }
2584 }
2585 else
2586 {
2587 /* For a structure or union the behaviour is as if the value had
2588 been stored to word-aligned memory and then loaded into
2589 registers with 64-bit load instruction(s). */
2590 int len = type->length ();
2591 int regno = AARCH64_X0_REGNUM;
2592 bfd_byte buf[X_REGISTER_SIZE];
2593
2594 while (len > 0)
2595 {
2596 regs->cooked_read (regno++, buf);
2597 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2598 len -= X_REGISTER_SIZE;
2599 valbuf += X_REGISTER_SIZE;
2600 }
2601 }
2602 }
2603
2604
2605 /* Will a function return an aggregate type in memory or in a
2606 register? Return 0 if an aggregate type can be returned in a
2607 register, 1 if it must be returned in memory. */
2608
2609 static int
2610 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2611 {
2612 type = check_typedef (type);
2613 int elements;
2614 struct type *fundamental_type;
2615
2616 if (TYPE_HAS_DYNAMIC_LENGTH (type))
2617 return 1;
2618
2619 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2620 &fundamental_type))
2621 {
2622 /* v0-v7 are used to return values and one register is allocated
2623 for one member. However, HFA or HVA has at most four members. */
2624 return 0;
2625 }
2626
2627 if (type->length () > 16
2628 || !language_pass_by_reference (type).trivially_copyable)
2629 {
2630 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2631 invisible reference. */
2632
2633 return 1;
2634 }
2635
2636 return 0;
2637 }
2638
2639 /* Write into appropriate registers a function return value of type
2640 TYPE, given in virtual format. */
2641
2642 static void
2643 aarch64_store_return_value (struct type *type, struct regcache *regs,
2644 const gdb_byte *valbuf)
2645 {
2646 struct gdbarch *gdbarch = regs->arch ();
2647 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2648 int elements;
2649 struct type *fundamental_type;
2650
2651 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2652 &fundamental_type))
2653 {
2654 int len = fundamental_type->length ();
2655
2656 for (int i = 0; i < elements; i++)
2657 {
2658 int regno = AARCH64_V0_REGNUM + i;
2659 /* Enough space for a full vector register. */
2660 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2661 gdb_assert (len <= sizeof (tmpbuf));
2662
2663 aarch64_debug_printf
2664 ("write HFA or HVA return value element %d to %s",
2665 i + 1, gdbarch_register_name (gdbarch, regno));
2666
2667 /* Depending on whether the target supports SVE or not, the V
2668 registers may report a size > 16 bytes. In that case, read the
2669 original contents of the register before overriding it with a new
2670 value that has a potential size <= 16 bytes. */
2671 regs->cooked_read (regno, tmpbuf);
2672 memcpy (tmpbuf, valbuf,
2673 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2674 regs->cooked_write (regno, tmpbuf);
2675 valbuf += len;
2676 }
2677 }
2678 else if (type->code () == TYPE_CODE_INT
2679 || type->code () == TYPE_CODE_CHAR
2680 || type->code () == TYPE_CODE_BOOL
2681 || type->code () == TYPE_CODE_PTR
2682 || TYPE_IS_REFERENCE (type)
2683 || type->code () == TYPE_CODE_ENUM)
2684 {
2685 if (type->length () <= X_REGISTER_SIZE)
2686 {
2687 /* Values of one word or less are zero/sign-extended and
2688 returned in r0. */
2689 bfd_byte tmpbuf[X_REGISTER_SIZE];
2690 LONGEST val = unpack_long (type, valbuf);
2691
2692 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2693 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2694 }
2695 else
2696 {
2697 /* Integral values greater than one word are stored in
2698 consecutive registers starting with r0. This will always
2699 be a multiple of the regiser size. */
2700 int len = type->length ();
2701 int regno = AARCH64_X0_REGNUM;
2702
2703 while (len > 0)
2704 {
2705 regs->cooked_write (regno++, valbuf);
2706 len -= X_REGISTER_SIZE;
2707 valbuf += X_REGISTER_SIZE;
2708 }
2709 }
2710 }
2711 else
2712 {
2713 /* For a structure or union the behaviour is as if the value had
2714 been stored to word-aligned memory and then loaded into
2715 registers with 64-bit load instruction(s). */
2716 int len = type->length ();
2717 int regno = AARCH64_X0_REGNUM;
2718 bfd_byte tmpbuf[X_REGISTER_SIZE];
2719
2720 while (len > 0)
2721 {
2722 memcpy (tmpbuf, valbuf,
2723 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2724 regs->cooked_write (regno++, tmpbuf);
2725 len -= X_REGISTER_SIZE;
2726 valbuf += X_REGISTER_SIZE;
2727 }
2728 }
2729 }
2730
2731 /* Implement the "return_value" gdbarch method. */
2732
2733 static enum return_value_convention
2734 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2735 struct type *valtype, struct regcache *regcache,
2736 struct value **read_value, const gdb_byte *writebuf)
2737 {
2738 if (valtype->code () == TYPE_CODE_STRUCT
2739 || valtype->code () == TYPE_CODE_UNION
2740 || valtype->code () == TYPE_CODE_ARRAY)
2741 {
2742 if (aarch64_return_in_memory (gdbarch, valtype))
2743 {
2744 /* From the AAPCS64's Result Return section:
2745
2746 "Otherwise, the caller shall reserve a block of memory of
2747 sufficient size and alignment to hold the result. The address
2748 of the memory block shall be passed as an additional argument to
2749 the function in x8. */
2750
2751 aarch64_debug_printf ("return value in memory");
2752
2753 if (read_value != nullptr)
2754 {
2755 CORE_ADDR addr;
2756
2757 regcache->cooked_read (AARCH64_STRUCT_RETURN_REGNUM, &addr);
2758 *read_value = value_at_non_lval (valtype, addr);
2759 }
2760
2761 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
2762 }
2763 }
2764
2765 if (writebuf)
2766 aarch64_store_return_value (valtype, regcache, writebuf);
2767
2768 if (read_value)
2769 {
2770 *read_value = value::allocate (valtype);
2771 aarch64_extract_return_value (valtype, regcache,
2772 (*read_value)->contents_raw ().data ());
2773 }
2774
2775 aarch64_debug_printf ("return value in registers");
2776
2777 return RETURN_VALUE_REGISTER_CONVENTION;
2778 }
2779
2780 /* Implement the "get_longjmp_target" gdbarch method. */
2781
2782 static int
2783 aarch64_get_longjmp_target (frame_info_ptr frame, CORE_ADDR *pc)
2784 {
2785 CORE_ADDR jb_addr;
2786 gdb_byte buf[X_REGISTER_SIZE];
2787 struct gdbarch *gdbarch = get_frame_arch (frame);
2788 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2789 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2790
2791 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2792
2793 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2794 X_REGISTER_SIZE))
2795 return 0;
2796
2797 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2798 return 1;
2799 }
2800
2801 /* Implement the "gen_return_address" gdbarch method. */
2802
2803 static void
2804 aarch64_gen_return_address (struct gdbarch *gdbarch,
2805 struct agent_expr *ax, struct axs_value *value,
2806 CORE_ADDR scope)
2807 {
2808 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2809 value->kind = axs_lvalue_register;
2810 value->u.reg = AARCH64_LR_REGNUM;
2811 }
2812 \f
2813
2814 /* Return TRUE if REGNUM is a W pseudo-register number. Return FALSE
2815 otherwise. */
2816
2817 static bool
2818 is_w_pseudo_register (struct gdbarch *gdbarch, int regnum)
2819 {
2820 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2821
2822 if (tdep->w_pseudo_base <= regnum
2823 && regnum < tdep->w_pseudo_base + tdep->w_pseudo_count)
2824 return true;
2825
2826 return false;
2827 }
2828
2829 /* Return TRUE if REGNUM is a SME pseudo-register number. Return FALSE
2830 otherwise. */
2831
2832 static bool
2833 is_sme_pseudo_register (struct gdbarch *gdbarch, int regnum)
2834 {
2835 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2836
2837 if (tdep->has_sme () && tdep->sme_pseudo_base <= regnum
2838 && regnum < tdep->sme_pseudo_base + tdep->sme_pseudo_count)
2839 return true;
2840
2841 return false;
2842 }
2843
2844 /* Convert ENCODING into a ZA tile slice name. */
2845
2846 static const std::string
2847 aarch64_za_tile_slice_name (const struct za_pseudo_encoding &encoding)
2848 {
2849 gdb_assert (encoding.qualifier_index >= 0);
2850 gdb_assert (encoding.qualifier_index <= 4);
2851 gdb_assert (encoding.tile_index >= 0);
2852 gdb_assert (encoding.tile_index <= 15);
2853 gdb_assert (encoding.slice_index >= 0);
2854 gdb_assert (encoding.slice_index <= 255);
2855
2856 const char orientation = encoding.horizontal ? 'h' : 'v';
2857
2858 const char qualifiers[6] = "bhsdq";
2859 const char qualifier = qualifiers [encoding.qualifier_index];
2860 return string_printf ("za%d%c%c%d", encoding.tile_index, orientation,
2861 qualifier, encoding.slice_index);
2862 }
2863
2864 /* Convert ENCODING into a ZA tile name. */
2865
2866 static const std::string
2867 aarch64_za_tile_name (const struct za_pseudo_encoding &encoding)
2868 {
2869 /* Tiles don't use the slice number and the direction fields. */
2870 gdb_assert (encoding.qualifier_index >= 0);
2871 gdb_assert (encoding.qualifier_index <= 4);
2872 gdb_assert (encoding.tile_index >= 0);
2873 gdb_assert (encoding.tile_index <= 15);
2874
2875 const char qualifiers[6] = "bhsdq";
2876 const char qualifier = qualifiers [encoding.qualifier_index];
2877 return (string_printf ("za%d%c", encoding.tile_index, qualifier));
2878 }
2879
2880 /* Given a SME pseudo-register REGNUM, return its type. */
2881
2882 static struct type *
2883 aarch64_sme_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2884 {
2885 struct za_pseudo_encoding encoding;
2886
2887 /* Decode the SME pseudo-register number. */
2888 aarch64_za_decode_pseudos (gdbarch, regnum, encoding);
2889
2890 if (is_sme_tile_slice_pseudo_register (gdbarch, regnum))
2891 return aarch64_za_tile_slice_type (gdbarch, encoding);
2892 else
2893 return aarch64_za_tile_type (gdbarch, encoding);
2894 }
2895
2896 /* Return the pseudo register name corresponding to register regnum. */
2897
2898 static const char *
2899 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2900 {
2901 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
2902
2903 /* W pseudo-registers. Bottom halves of the X registers. */
2904 static const char *const w_name[] =
2905 {
2906 "w0", "w1", "w2", "w3",
2907 "w4", "w5", "w6", "w7",
2908 "w8", "w9", "w10", "w11",
2909 "w12", "w13", "w14", "w15",
2910 "w16", "w17", "w18", "w19",
2911 "w20", "w21", "w22", "w23",
2912 "w24", "w25", "w26", "w27",
2913 "w28", "w29", "w30",
2914 };
2915
2916 static const char *const q_name[] =
2917 {
2918 "q0", "q1", "q2", "q3",
2919 "q4", "q5", "q6", "q7",
2920 "q8", "q9", "q10", "q11",
2921 "q12", "q13", "q14", "q15",
2922 "q16", "q17", "q18", "q19",
2923 "q20", "q21", "q22", "q23",
2924 "q24", "q25", "q26", "q27",
2925 "q28", "q29", "q30", "q31",
2926 };
2927
2928 static const char *const d_name[] =
2929 {
2930 "d0", "d1", "d2", "d3",
2931 "d4", "d5", "d6", "d7",
2932 "d8", "d9", "d10", "d11",
2933 "d12", "d13", "d14", "d15",
2934 "d16", "d17", "d18", "d19",
2935 "d20", "d21", "d22", "d23",
2936 "d24", "d25", "d26", "d27",
2937 "d28", "d29", "d30", "d31",
2938 };
2939
2940 static const char *const s_name[] =
2941 {
2942 "s0", "s1", "s2", "s3",
2943 "s4", "s5", "s6", "s7",
2944 "s8", "s9", "s10", "s11",
2945 "s12", "s13", "s14", "s15",
2946 "s16", "s17", "s18", "s19",
2947 "s20", "s21", "s22", "s23",
2948 "s24", "s25", "s26", "s27",
2949 "s28", "s29", "s30", "s31",
2950 };
2951
2952 static const char *const h_name[] =
2953 {
2954 "h0", "h1", "h2", "h3",
2955 "h4", "h5", "h6", "h7",
2956 "h8", "h9", "h10", "h11",
2957 "h12", "h13", "h14", "h15",
2958 "h16", "h17", "h18", "h19",
2959 "h20", "h21", "h22", "h23",
2960 "h24", "h25", "h26", "h27",
2961 "h28", "h29", "h30", "h31",
2962 };
2963
2964 static const char *const b_name[] =
2965 {
2966 "b0", "b1", "b2", "b3",
2967 "b4", "b5", "b6", "b7",
2968 "b8", "b9", "b10", "b11",
2969 "b12", "b13", "b14", "b15",
2970 "b16", "b17", "b18", "b19",
2971 "b20", "b21", "b22", "b23",
2972 "b24", "b25", "b26", "b27",
2973 "b28", "b29", "b30", "b31",
2974 };
2975
2976 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2977
2978 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2979 return q_name[p_regnum - AARCH64_Q0_REGNUM];
2980
2981 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2982 return d_name[p_regnum - AARCH64_D0_REGNUM];
2983
2984 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2985 return s_name[p_regnum - AARCH64_S0_REGNUM];
2986
2987 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2988 return h_name[p_regnum - AARCH64_H0_REGNUM];
2989
2990 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2991 return b_name[p_regnum - AARCH64_B0_REGNUM];
2992
2993 /* W pseudo-registers? */
2994 if (is_w_pseudo_register (gdbarch, regnum))
2995 return w_name[regnum - tdep->w_pseudo_base];
2996
2997 if (tdep->has_sve ())
2998 {
2999 static const char *const sve_v_name[] =
3000 {
3001 "v0", "v1", "v2", "v3",
3002 "v4", "v5", "v6", "v7",
3003 "v8", "v9", "v10", "v11",
3004 "v12", "v13", "v14", "v15",
3005 "v16", "v17", "v18", "v19",
3006 "v20", "v21", "v22", "v23",
3007 "v24", "v25", "v26", "v27",
3008 "v28", "v29", "v30", "v31",
3009 };
3010
3011 if (p_regnum >= AARCH64_SVE_V0_REGNUM
3012 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
3013 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
3014 }
3015
3016 if (is_sme_pseudo_register (gdbarch, regnum))
3017 return tdep->sme_pseudo_names[regnum - tdep->sme_pseudo_base].c_str ();
3018
3019 /* RA_STATE is used for unwinding only. Do not assign it a name - this
3020 prevents it from being read by methods such as
3021 mi_cmd_trace_frame_collected. */
3022 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
3023 return "";
3024
3025 internal_error (_("aarch64_pseudo_register_name: bad register number %d"),
3026 p_regnum);
3027 }
3028
3029 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
3030
3031 static struct type *
3032 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
3033 {
3034 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3035
3036 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
3037
3038 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
3039 return aarch64_vnq_type (gdbarch);
3040
3041 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
3042 return aarch64_vnd_type (gdbarch);
3043
3044 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
3045 return aarch64_vns_type (gdbarch);
3046
3047 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
3048 return aarch64_vnh_type (gdbarch);
3049
3050 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
3051 return aarch64_vnb_type (gdbarch);
3052
3053 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
3054 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
3055 return aarch64_vnv_type (gdbarch);
3056
3057 /* W pseudo-registers are 32-bit. */
3058 if (is_w_pseudo_register (gdbarch, regnum))
3059 return builtin_type (gdbarch)->builtin_uint32;
3060
3061 if (is_sme_pseudo_register (gdbarch, regnum))
3062 return aarch64_sme_pseudo_register_type (gdbarch, regnum);
3063
3064 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
3065 return builtin_type (gdbarch)->builtin_uint64;
3066
3067 internal_error (_("aarch64_pseudo_register_type: bad register number %d"),
3068 p_regnum);
3069 }
3070
3071 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
3072
3073 static int
3074 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
3075 const struct reggroup *group)
3076 {
3077 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3078
3079 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
3080
3081 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
3082 return group == all_reggroup || group == vector_reggroup;
3083 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
3084 return (group == all_reggroup || group == vector_reggroup
3085 || group == float_reggroup);
3086 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
3087 return (group == all_reggroup || group == vector_reggroup
3088 || group == float_reggroup);
3089 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
3090 return group == all_reggroup || group == vector_reggroup;
3091 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
3092 return group == all_reggroup || group == vector_reggroup;
3093 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
3094 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
3095 return group == all_reggroup || group == vector_reggroup;
3096 else if (is_sme_pseudo_register (gdbarch, regnum))
3097 return group == all_reggroup || group == vector_reggroup;
3098 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
3099 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum)
3100 return 0;
3101
3102 return group == all_reggroup;
3103 }
3104
3105 /* Helper for aarch64_pseudo_read_value. */
3106
3107 static struct value *
3108 aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
3109 readable_regcache *regcache, int regnum_offset,
3110 int regsize, struct value *result_value)
3111 {
3112 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
3113
3114 /* Enough space for a full vector register. */
3115 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
3116 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
3117
3118 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
3119 result_value->mark_bytes_unavailable (0,
3120 result_value->type ()->length ());
3121 else
3122 memcpy (result_value->contents_raw ().data (), reg_buf, regsize);
3123
3124 return result_value;
3125 }
3126
3127 /* Helper function for reading/writing ZA pseudo-registers. Given REGNUM,
3128 a ZA pseudo-register number, return, in OFFSETS, the information on positioning
3129 of the bytes that must be read from/written to. */
3130
3131 static void
3132 aarch64_za_offsets_from_regnum (struct gdbarch *gdbarch, int regnum,
3133 struct za_offsets &offsets)
3134 {
3135 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3136
3137 gdb_assert (tdep->has_sme ());
3138 gdb_assert (tdep->sme_svq > 0);
3139 gdb_assert (tdep->sme_pseudo_base <= regnum);
3140 gdb_assert (regnum < tdep->sme_pseudo_base + tdep->sme_pseudo_count);
3141
3142 struct za_pseudo_encoding encoding;
3143
3144 /* Decode the ZA pseudo-register number. */
3145 aarch64_za_decode_pseudos (gdbarch, regnum, encoding);
3146
3147 /* Fetch the streaming vector length. */
3148 size_t svl = sve_vl_from_vq (tdep->sme_svq);
3149
3150 if (is_sme_tile_slice_pseudo_register (gdbarch, regnum))
3151 {
3152 if (encoding.horizontal)
3153 {
3154 /* Horizontal tile slices are contiguous ranges of svl bytes. */
3155
3156 /* The starting offset depends on the tile index (to locate the tile
3157 in the ZA buffer), the slice index (to locate the slice within the
3158 tile) and the qualifier. */
3159 offsets.starting_offset
3160 = encoding.tile_index * svl + encoding.slice_index
3161 * (svl >> encoding.qualifier_index);
3162 /* Horizontal tile slice data is contiguous and thus doesn't have
3163 a stride. */
3164 offsets.stride_size = 0;
3165 /* Horizontal tile slice data is contiguous and thus only has 1
3166 chunk. */
3167 offsets.chunks = 1;
3168 /* The chunk size is always svl bytes. */
3169 offsets.chunk_size = svl;
3170 }
3171 else
3172 {
3173 /* Vertical tile slices are non-contiguous ranges of
3174 (1 << qualifier_index) bytes. */
3175
3176 /* The starting offset depends on the tile number (to locate the
3177 tile in the ZA buffer), the slice index (to locate the element
3178 within the tile slice) and the qualifier. */
3179 offsets.starting_offset
3180 = encoding.tile_index * svl + encoding.slice_index
3181 * (1 << encoding.qualifier_index);
3182 /* The offset between vertical tile slices depends on the qualifier
3183 and svl. */
3184 offsets.stride_size = svl << encoding.qualifier_index;
3185 /* The number of chunks depends on svl and the qualifier size. */
3186 offsets.chunks = svl >> encoding.qualifier_index;
3187 /* The chunk size depends on the qualifier. */
3188 offsets.chunk_size = 1 << encoding.qualifier_index;
3189 }
3190 }
3191 else
3192 {
3193 /* ZA tile pseudo-register. */
3194
3195 /* Starting offset depends on the tile index and qualifier. */
3196 offsets.starting_offset = encoding.tile_index * svl;
3197 /* The offset between tile slices depends on the qualifier and svl. */
3198 offsets.stride_size = svl << encoding.qualifier_index;
3199 /* The number of chunks depends on the qualifier and svl. */
3200 offsets.chunks = svl >> encoding.qualifier_index;
3201 /* The chunk size is always svl bytes. */
3202 offsets.chunk_size = svl;
3203 }
3204 }
3205
3206 /* Given REGNUM, a SME pseudo-register number, return its value in RESULT. */
3207
3208 static struct value *
3209 aarch64_sme_pseudo_register_read (struct gdbarch *gdbarch,
3210 readable_regcache *regcache, int regnum,
3211 struct value *result)
3212 {
3213 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3214
3215 gdb_assert (tdep->has_sme ());
3216 gdb_assert (tdep->sme_svq > 0);
3217 gdb_assert (tdep->sme_pseudo_base <= regnum);
3218 gdb_assert (regnum < tdep->sme_pseudo_base + tdep->sme_pseudo_count);
3219
3220 /* Fetch the offsets that we need in order to read from the correct blocks
3221 of ZA. */
3222 struct za_offsets offsets;
3223 aarch64_za_offsets_from_regnum (gdbarch, regnum, offsets);
3224
3225 /* Fetch the contents of ZA. */
3226 size_t svl = sve_vl_from_vq (tdep->sme_svq);
3227 gdb::byte_vector za (std::pow (svl, 2));
3228 regcache->raw_read (tdep->sme_za_regnum, za.data ());
3229
3230 /* Copy the requested data. */
3231 for (int chunks = 0; chunks < offsets.chunks; chunks++)
3232 {
3233 const gdb_byte *source
3234 = za.data () + offsets.starting_offset + chunks * offsets.stride_size;
3235 gdb_byte *destination
3236 = result->contents_raw ().data () + chunks * offsets.chunk_size;
3237
3238 memcpy (destination, source, offsets.chunk_size);
3239 }
3240 return result;
3241 }
3242
3243 /* Implement the "pseudo_register_read_value" gdbarch method. */
3244
3245 static struct value *
3246 aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
3247 int regnum)
3248 {
3249 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3250 struct value *result_value = value::allocate (register_type (gdbarch, regnum));
3251
3252 result_value->set_lval (lval_register);
3253 VALUE_REGNUM (result_value) = regnum;
3254
3255 if (is_w_pseudo_register (gdbarch, regnum))
3256 {
3257 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3258 /* Default offset for little endian. */
3259 int offset = 0;
3260
3261 if (byte_order == BFD_ENDIAN_BIG)
3262 offset = 4;
3263
3264 /* Find the correct X register to extract the data from. */
3265 int x_regnum = AARCH64_X0_REGNUM + (regnum - tdep->w_pseudo_base);
3266 gdb_byte data[4];
3267
3268 /* Read the bottom 4 bytes of X. */
3269 if (regcache->raw_read_part (x_regnum, offset, 4, data) != REG_VALID)
3270 result_value->mark_bytes_unavailable (0, 4);
3271 else
3272 memcpy (result_value->contents_raw ().data (), data, 4);
3273
3274 return result_value;
3275 }
3276 else if (is_sme_pseudo_register (gdbarch, regnum))
3277 return aarch64_sme_pseudo_register_read (gdbarch, regcache, regnum,
3278 result_value);
3279
3280 regnum -= gdbarch_num_regs (gdbarch);
3281
3282 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
3283 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
3284 regnum - AARCH64_Q0_REGNUM,
3285 Q_REGISTER_SIZE, result_value);
3286
3287 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
3288 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
3289 regnum - AARCH64_D0_REGNUM,
3290 D_REGISTER_SIZE, result_value);
3291
3292 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
3293 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
3294 regnum - AARCH64_S0_REGNUM,
3295 S_REGISTER_SIZE, result_value);
3296
3297 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
3298 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
3299 regnum - AARCH64_H0_REGNUM,
3300 H_REGISTER_SIZE, result_value);
3301
3302 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
3303 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
3304 regnum - AARCH64_B0_REGNUM,
3305 B_REGISTER_SIZE, result_value);
3306
3307 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
3308 && regnum < AARCH64_SVE_V0_REGNUM + 32)
3309 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
3310 regnum - AARCH64_SVE_V0_REGNUM,
3311 V_REGISTER_SIZE, result_value);
3312
3313 gdb_assert_not_reached ("regnum out of bound");
3314 }
3315
3316 /* Helper for aarch64_pseudo_write. */
3317
3318 static void
3319 aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
3320 int regnum_offset, int regsize, const gdb_byte *buf)
3321 {
3322 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
3323
3324 /* Enough space for a full vector register. */
3325 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
3326 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
3327
3328 /* Ensure the register buffer is zero, we want gdb writes of the
3329 various 'scalar' pseudo registers to behavior like architectural
3330 writes, register width bytes are written the remainder are set to
3331 zero. */
3332 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
3333
3334 memcpy (reg_buf, buf, regsize);
3335 regcache->raw_write (v_regnum, reg_buf);
3336 }
3337
3338 /* Given REGNUM, a SME pseudo-register number, store the bytes from DATA to the
3339 pseudo-register. */
3340
3341 static void
3342 aarch64_sme_pseudo_register_write (struct gdbarch *gdbarch,
3343 struct regcache *regcache,
3344 int regnum, const gdb_byte *data)
3345 {
3346 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3347
3348 gdb_assert (tdep->has_sme ());
3349 gdb_assert (tdep->sme_svq > 0);
3350 gdb_assert (tdep->sme_pseudo_base <= regnum);
3351 gdb_assert (regnum < tdep->sme_pseudo_base + tdep->sme_pseudo_count);
3352
3353 /* Fetch the offsets that we need in order to write to the correct blocks
3354 of ZA. */
3355 struct za_offsets offsets;
3356 aarch64_za_offsets_from_regnum (gdbarch, regnum, offsets);
3357
3358 /* Fetch the contents of ZA. */
3359 size_t svl = sve_vl_from_vq (tdep->sme_svq);
3360 gdb::byte_vector za (std::pow (svl, 2));
3361
3362 /* Copy the requested data. */
3363 for (int chunks = 0; chunks < offsets.chunks; chunks++)
3364 {
3365 const gdb_byte *source = data + chunks * offsets.chunk_size;
3366 gdb_byte *destination
3367 = za.data () + offsets.starting_offset + chunks * offsets.stride_size;
3368
3369 memcpy (destination, source, offsets.chunk_size);
3370 }
3371
3372 /* Write back to ZA. */
3373 regcache->raw_write (tdep->sme_za_regnum, za.data ());
3374 }
3375
3376 /* Implement the "pseudo_register_write" gdbarch method. */
3377
3378 static void
3379 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
3380 int regnum, const gdb_byte *buf)
3381 {
3382 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
3383
3384 if (is_w_pseudo_register (gdbarch, regnum))
3385 {
3386 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3387 /* Default offset for little endian. */
3388 int offset = 0;
3389
3390 if (byte_order == BFD_ENDIAN_BIG)
3391 offset = 4;
3392
3393 /* Find the correct X register to extract the data from. */
3394 int x_regnum = AARCH64_X0_REGNUM + (regnum - tdep->w_pseudo_base);
3395
3396 /* First zero-out the contents of X. */
3397 ULONGEST zero = 0;
3398 regcache->raw_write (x_regnum, zero);
3399 /* Write to the bottom 4 bytes of X. */
3400 regcache->raw_write_part (x_regnum, offset, 4, buf);
3401 return;
3402 }
3403 else if (is_sme_pseudo_register (gdbarch, regnum))
3404 {
3405 aarch64_sme_pseudo_register_write (gdbarch, regcache, regnum, buf);
3406 return;
3407 }
3408
3409 regnum -= gdbarch_num_regs (gdbarch);
3410
3411 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
3412 return aarch64_pseudo_write_1 (gdbarch, regcache,
3413 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
3414 buf);
3415
3416 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
3417 return aarch64_pseudo_write_1 (gdbarch, regcache,
3418 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
3419 buf);
3420
3421 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
3422 return aarch64_pseudo_write_1 (gdbarch, regcache,
3423 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
3424 buf);
3425
3426 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
3427 return aarch64_pseudo_write_1 (gdbarch, regcache,
3428 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
3429 buf);
3430
3431 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
3432 return aarch64_pseudo_write_1 (gdbarch, regcache,
3433 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
3434 buf);
3435
3436 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
3437 && regnum < AARCH64_SVE_V0_REGNUM + 32)
3438 return aarch64_pseudo_write_1 (gdbarch, regcache,
3439 regnum - AARCH64_SVE_V0_REGNUM,
3440 V_REGISTER_SIZE, buf);
3441
3442 gdb_assert_not_reached ("regnum out of bound");
3443 }
3444
3445 /* Callback function for user_reg_add. */
3446
3447 static struct value *
3448 value_of_aarch64_user_reg (frame_info_ptr frame, const void *baton)
3449 {
3450 const int *reg_p = (const int *) baton;
3451
3452 return value_of_register (*reg_p, frame);
3453 }
3454 \f
3455
3456 /* Implement the "software_single_step" gdbarch method, needed to
3457 single step through atomic sequences on AArch64. */
3458
3459 static std::vector<CORE_ADDR>
3460 aarch64_software_single_step (struct regcache *regcache)
3461 {
3462 struct gdbarch *gdbarch = regcache->arch ();
3463 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3464 const int insn_size = 4;
3465 const int atomic_sequence_length = 16; /* Instruction sequence length. */
3466 CORE_ADDR pc = regcache_read_pc (regcache);
3467 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
3468 CORE_ADDR loc = pc;
3469 CORE_ADDR closing_insn = 0;
3470
3471 ULONGEST insn_from_memory;
3472 if (!safe_read_memory_unsigned_integer (loc, insn_size,
3473 byte_order_for_code,
3474 &insn_from_memory))
3475 {
3476 /* Assume we don't have a atomic sequence, as we couldn't read the
3477 instruction in this location. */
3478 return {};
3479 }
3480
3481 uint32_t insn = insn_from_memory;
3482 int index;
3483 int insn_count;
3484 int bc_insn_count = 0; /* Conditional branch instruction count. */
3485 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
3486 aarch64_inst inst;
3487
3488 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
3489 return {};
3490
3491 /* Look for a Load Exclusive instruction which begins the sequence. */
3492 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
3493 return {};
3494
3495 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
3496 {
3497 loc += insn_size;
3498
3499 if (!safe_read_memory_unsigned_integer (loc, insn_size,
3500 byte_order_for_code,
3501 &insn_from_memory))
3502 {
3503 /* Assume we don't have a atomic sequence, as we couldn't read the
3504 instruction in this location. */
3505 return {};
3506 }
3507
3508 insn = insn_from_memory;
3509 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
3510 return {};
3511 /* Check if the instruction is a conditional branch. */
3512 if (inst.opcode->iclass == condbranch)
3513 {
3514 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
3515
3516 if (bc_insn_count >= 1)
3517 return {};
3518
3519 /* It is, so we'll try to set a breakpoint at the destination. */
3520 breaks[1] = loc + inst.operands[0].imm.value;
3521
3522 bc_insn_count++;
3523 last_breakpoint++;
3524 }
3525
3526 /* Look for the Store Exclusive which closes the atomic sequence. */
3527 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
3528 {
3529 closing_insn = loc;
3530 break;
3531 }
3532 }
3533
3534 /* We didn't find a closing Store Exclusive instruction, fall back. */
3535 if (!closing_insn)
3536 return {};
3537
3538 /* Insert breakpoint after the end of the atomic sequence. */
3539 breaks[0] = loc + insn_size;
3540
3541 /* Check for duplicated breakpoints, and also check that the second
3542 breakpoint is not within the atomic sequence. */
3543 if (last_breakpoint
3544 && (breaks[1] == breaks[0]
3545 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
3546 last_breakpoint = 0;
3547
3548 std::vector<CORE_ADDR> next_pcs;
3549
3550 /* Insert the breakpoint at the end of the sequence, and one at the
3551 destination of the conditional branch, if it exists. */
3552 for (index = 0; index <= last_breakpoint; index++)
3553 next_pcs.push_back (breaks[index]);
3554
3555 return next_pcs;
3556 }
3557
3558 struct aarch64_displaced_step_copy_insn_closure
3559 : public displaced_step_copy_insn_closure
3560 {
3561 /* It is true when condition instruction, such as B.CON, TBZ, etc,
3562 is being displaced stepping. */
3563 bool cond = false;
3564
3565 /* PC adjustment offset after displaced stepping. If 0, then we don't
3566 write the PC back, assuming the PC is already the right address. */
3567 int32_t pc_adjust = 0;
3568 };
3569
3570 /* Data when visiting instructions for displaced stepping. */
3571
3572 struct aarch64_displaced_step_data
3573 {
3574 struct aarch64_insn_data base;
3575
3576 /* The address where the instruction will be executed at. */
3577 CORE_ADDR new_addr;
3578 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
3579 uint32_t insn_buf[AARCH64_DISPLACED_MODIFIED_INSNS];
3580 /* Number of instructions in INSN_BUF. */
3581 unsigned insn_count;
3582 /* Registers when doing displaced stepping. */
3583 struct regcache *regs;
3584
3585 aarch64_displaced_step_copy_insn_closure *dsc;
3586 };
3587
3588 /* Implementation of aarch64_insn_visitor method "b". */
3589
3590 static void
3591 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
3592 struct aarch64_insn_data *data)
3593 {
3594 struct aarch64_displaced_step_data *dsd
3595 = (struct aarch64_displaced_step_data *) data;
3596 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
3597
3598 if (can_encode_int32 (new_offset, 28))
3599 {
3600 /* Emit B rather than BL, because executing BL on a new address
3601 will get the wrong address into LR. In order to avoid this,
3602 we emit B, and update LR if the instruction is BL. */
3603 emit_b (dsd->insn_buf, 0, new_offset);
3604 dsd->insn_count++;
3605 }
3606 else
3607 {
3608 /* Write NOP. */
3609 emit_nop (dsd->insn_buf);
3610 dsd->insn_count++;
3611 dsd->dsc->pc_adjust = offset;
3612 }
3613
3614 if (is_bl)
3615 {
3616 /* Update LR. */
3617 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
3618 data->insn_addr + 4);
3619 }
3620 }
3621
3622 /* Implementation of aarch64_insn_visitor method "b_cond". */
3623
3624 static void
3625 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
3626 struct aarch64_insn_data *data)
3627 {
3628 struct aarch64_displaced_step_data *dsd
3629 = (struct aarch64_displaced_step_data *) data;
3630
3631 /* GDB has to fix up PC after displaced step this instruction
3632 differently according to the condition is true or false. Instead
3633 of checking COND against conditional flags, we can use
3634 the following instructions, and GDB can tell how to fix up PC
3635 according to the PC value.
3636
3637 B.COND TAKEN ; If cond is true, then jump to TAKEN.
3638 INSN1 ;
3639 TAKEN:
3640 INSN2
3641 */
3642
3643 emit_bcond (dsd->insn_buf, cond, 8);
3644 dsd->dsc->cond = true;
3645 dsd->dsc->pc_adjust = offset;
3646 dsd->insn_count = 1;
3647 }
3648
3649 /* Dynamically allocate a new register. If we know the register
3650 statically, we should make it a global as above instead of using this
3651 helper function. */
3652
3653 static struct aarch64_register
3654 aarch64_register (unsigned num, int is64)
3655 {
3656 return (struct aarch64_register) { num, is64 };
3657 }
3658
3659 /* Implementation of aarch64_insn_visitor method "cb". */
3660
3661 static void
3662 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
3663 const unsigned rn, int is64,
3664 struct aarch64_insn_data *data)
3665 {
3666 struct aarch64_displaced_step_data *dsd
3667 = (struct aarch64_displaced_step_data *) data;
3668
3669 /* The offset is out of range for a compare and branch
3670 instruction. We can use the following instructions instead:
3671
3672 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
3673 INSN1 ;
3674 TAKEN:
3675 INSN2
3676 */
3677 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
3678 dsd->insn_count = 1;
3679 dsd->dsc->cond = true;
3680 dsd->dsc->pc_adjust = offset;
3681 }
3682
3683 /* Implementation of aarch64_insn_visitor method "tb". */
3684
3685 static void
3686 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
3687 const unsigned rt, unsigned bit,
3688 struct aarch64_insn_data *data)
3689 {
3690 struct aarch64_displaced_step_data *dsd
3691 = (struct aarch64_displaced_step_data *) data;
3692
3693 /* The offset is out of range for a test bit and branch
3694 instruction We can use the following instructions instead:
3695
3696 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
3697 INSN1 ;
3698 TAKEN:
3699 INSN2
3700
3701 */
3702 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
3703 dsd->insn_count = 1;
3704 dsd->dsc->cond = true;
3705 dsd->dsc->pc_adjust = offset;
3706 }
3707
3708 /* Implementation of aarch64_insn_visitor method "adr". */
3709
3710 static void
3711 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
3712 const int is_adrp, struct aarch64_insn_data *data)
3713 {
3714 struct aarch64_displaced_step_data *dsd
3715 = (struct aarch64_displaced_step_data *) data;
3716 /* We know exactly the address the ADR{P,} instruction will compute.
3717 We can just write it to the destination register. */
3718 CORE_ADDR address = data->insn_addr + offset;
3719
3720 if (is_adrp)
3721 {
3722 /* Clear the lower 12 bits of the offset to get the 4K page. */
3723 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3724 address & ~0xfff);
3725 }
3726 else
3727 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3728 address);
3729
3730 dsd->dsc->pc_adjust = 4;
3731 emit_nop (dsd->insn_buf);
3732 dsd->insn_count = 1;
3733 }
3734
3735 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
3736
3737 static void
3738 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
3739 const unsigned rt, const int is64,
3740 struct aarch64_insn_data *data)
3741 {
3742 struct aarch64_displaced_step_data *dsd
3743 = (struct aarch64_displaced_step_data *) data;
3744 CORE_ADDR address = data->insn_addr + offset;
3745 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
3746
3747 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
3748 address);
3749
3750 if (is_sw)
3751 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
3752 aarch64_register (rt, 1), zero);
3753 else
3754 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
3755 aarch64_register (rt, 1), zero);
3756
3757 dsd->dsc->pc_adjust = 4;
3758 }
3759
3760 /* Implementation of aarch64_insn_visitor method "others". */
3761
3762 static void
3763 aarch64_displaced_step_others (const uint32_t insn,
3764 struct aarch64_insn_data *data)
3765 {
3766 struct aarch64_displaced_step_data *dsd
3767 = (struct aarch64_displaced_step_data *) data;
3768
3769 uint32_t masked_insn = (insn & CLEAR_Rn_MASK);
3770 if (masked_insn == BLR)
3771 {
3772 /* Emit a BR to the same register and then update LR to the original
3773 address (similar to aarch64_displaced_step_b). */
3774 aarch64_emit_insn (dsd->insn_buf, insn & 0xffdfffff);
3775 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
3776 data->insn_addr + 4);
3777 }
3778 else
3779 aarch64_emit_insn (dsd->insn_buf, insn);
3780 dsd->insn_count = 1;
3781
3782 if (masked_insn == RET || masked_insn == BR || masked_insn == BLR)
3783 dsd->dsc->pc_adjust = 0;
3784 else
3785 dsd->dsc->pc_adjust = 4;
3786 }
3787
3788 static const struct aarch64_insn_visitor visitor =
3789 {
3790 aarch64_displaced_step_b,
3791 aarch64_displaced_step_b_cond,
3792 aarch64_displaced_step_cb,
3793 aarch64_displaced_step_tb,
3794 aarch64_displaced_step_adr,
3795 aarch64_displaced_step_ldr_literal,
3796 aarch64_displaced_step_others,
3797 };
3798
3799 /* Implement the "displaced_step_copy_insn" gdbarch method. */
3800
3801 displaced_step_copy_insn_closure_up
3802 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
3803 CORE_ADDR from, CORE_ADDR to,
3804 struct regcache *regs)
3805 {
3806 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3807 struct aarch64_displaced_step_data dsd;
3808 aarch64_inst inst;
3809 ULONGEST insn_from_memory;
3810
3811 if (!safe_read_memory_unsigned_integer (from, 4, byte_order_for_code,
3812 &insn_from_memory))
3813 return nullptr;
3814
3815 uint32_t insn = insn_from_memory;
3816
3817 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
3818 return NULL;
3819
3820 /* Look for a Load Exclusive instruction which begins the sequence. */
3821 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
3822 {
3823 /* We can't displaced step atomic sequences. */
3824 return NULL;
3825 }
3826
3827 std::unique_ptr<aarch64_displaced_step_copy_insn_closure> dsc
3828 (new aarch64_displaced_step_copy_insn_closure);
3829 dsd.base.insn_addr = from;
3830 dsd.new_addr = to;
3831 dsd.regs = regs;
3832 dsd.dsc = dsc.get ();
3833 dsd.insn_count = 0;
3834 aarch64_relocate_instruction (insn, &visitor,
3835 (struct aarch64_insn_data *) &dsd);
3836 gdb_assert (dsd.insn_count <= AARCH64_DISPLACED_MODIFIED_INSNS);
3837
3838 if (dsd.insn_count != 0)
3839 {
3840 int i;
3841
3842 /* Instruction can be relocated to scratch pad. Copy
3843 relocated instruction(s) there. */
3844 for (i = 0; i < dsd.insn_count; i++)
3845 {
3846 displaced_debug_printf ("writing insn %.8x at %s",
3847 dsd.insn_buf[i],
3848 paddress (gdbarch, to + i * 4));
3849
3850 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
3851 (ULONGEST) dsd.insn_buf[i]);
3852 }
3853 }
3854 else
3855 {
3856 dsc = NULL;
3857 }
3858
3859 /* This is a work around for a problem with g++ 4.8. */
3860 return displaced_step_copy_insn_closure_up (dsc.release ());
3861 }
3862
3863 /* Implement the "displaced_step_fixup" gdbarch method. */
3864
3865 void
3866 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
3867 struct displaced_step_copy_insn_closure *dsc_,
3868 CORE_ADDR from, CORE_ADDR to,
3869 struct regcache *regs, bool completed_p)
3870 {
3871 CORE_ADDR pc = regcache_read_pc (regs);
3872
3873 /* If the displaced instruction didn't complete successfully then all we
3874 need to do is restore the program counter. */
3875 if (!completed_p)
3876 {
3877 pc = from + (pc - to);
3878 regcache_write_pc (regs, pc);
3879 return;
3880 }
3881
3882 aarch64_displaced_step_copy_insn_closure *dsc
3883 = (aarch64_displaced_step_copy_insn_closure *) dsc_;
3884
3885 displaced_debug_printf ("PC after stepping: %s (was %s).",
3886 paddress (gdbarch, pc), paddress (gdbarch, to));
3887
3888 if (dsc->cond)
3889 {
3890 displaced_debug_printf ("[Conditional] pc_adjust before: %d",
3891 dsc->pc_adjust);
3892
3893 if (pc - to == 8)
3894 {
3895 /* Condition is true. */
3896 }
3897 else if (pc - to == 4)
3898 {
3899 /* Condition is false. */
3900 dsc->pc_adjust = 4;
3901 }
3902 else
3903 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
3904
3905 displaced_debug_printf ("[Conditional] pc_adjust after: %d",
3906 dsc->pc_adjust);
3907 }
3908
3909 displaced_debug_printf ("%s PC by %d",
3910 dsc->pc_adjust ? "adjusting" : "not adjusting",
3911 dsc->pc_adjust);
3912
3913 if (dsc->pc_adjust != 0)
3914 {
3915 /* Make sure the previous instruction was executed (that is, the PC
3916 has changed). If the PC didn't change, then discard the adjustment
3917 offset. Otherwise we may skip an instruction before its execution
3918 took place. */
3919 if ((pc - to) == 0)
3920 {
3921 displaced_debug_printf ("PC did not move. Discarding PC adjustment.");
3922 dsc->pc_adjust = 0;
3923 }
3924
3925 displaced_debug_printf ("fixup: set PC to %s:%d",
3926 paddress (gdbarch, from), dsc->pc_adjust);
3927
3928 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
3929 from + dsc->pc_adjust);
3930 }
3931 }
3932
3933 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3934
3935 bool
3936 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch)
3937 {
3938 return true;
3939 }
3940
3941 /* Get the correct target description for the given VQ value.
3942 If VQ is zero then it is assumed SVE is not supported.
3943 (It is not possible to set VQ to zero on an SVE system).
3944
3945 MTE_P indicates the presence of the Memory Tagging Extension feature.
3946
3947 TLS_P indicates the presence of the Thread Local Storage feature. */
3948
3949 const target_desc *
3950 aarch64_read_description (const aarch64_features &features)
3951 {
3952 if (features.vq > AARCH64_MAX_SVE_VQ)
3953 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), features.vq,
3954 AARCH64_MAX_SVE_VQ);
3955
3956 struct target_desc *tdesc = tdesc_aarch64_map[features];
3957
3958 if (tdesc == NULL)
3959 {
3960 tdesc = aarch64_create_target_description (features);
3961 tdesc_aarch64_map[features] = tdesc;
3962 }
3963
3964 return tdesc;
3965 }
3966
3967 /* Return the VQ used when creating the target description TDESC. */
3968
3969 static uint64_t
3970 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
3971 {
3972 const struct tdesc_feature *feature_sve;
3973
3974 if (!tdesc_has_registers (tdesc))
3975 return 0;
3976
3977 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3978
3979 if (feature_sve == nullptr)
3980 return 0;
3981
3982 uint64_t vl = tdesc_register_bitsize (feature_sve,
3983 aarch64_sve_register_names[0]) / 8;
3984 return sve_vq_from_vl (vl);
3985 }
3986
3987
3988 /* Return the svq (streaming vector quotient) used when creating the target
3989 description TDESC. */
3990
3991 static uint64_t
3992 aarch64_get_tdesc_svq (const struct target_desc *tdesc)
3993 {
3994 const struct tdesc_feature *feature_sme;
3995
3996 if (!tdesc_has_registers (tdesc))
3997 return 0;
3998
3999 feature_sme = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sme");
4000
4001 if (feature_sme == nullptr)
4002 return 0;
4003
4004 size_t svl_squared = tdesc_register_bitsize (feature_sme, "za");
4005
4006 /* We have the total size of the ZA matrix, in bits. Figure out the svl
4007 value. */
4008 size_t svl = std::sqrt (svl_squared / 8);
4009
4010 /* Now extract svq. */
4011 return sve_vq_from_vl (svl);
4012 }
4013
4014 /* Get the AArch64 features present in the given target description. */
4015
4016 aarch64_features
4017 aarch64_features_from_target_desc (const struct target_desc *tdesc)
4018 {
4019 aarch64_features features;
4020
4021 if (tdesc == nullptr)
4022 return features;
4023
4024 features.vq = aarch64_get_tdesc_vq (tdesc);
4025
4026 /* We need to look for a couple pauth feature name variations. */
4027 features.pauth
4028 = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth") != nullptr);
4029
4030 if (!features.pauth)
4031 features.pauth = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth_v2")
4032 != nullptr);
4033
4034 features.mte
4035 = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.mte") != nullptr);
4036
4037 const struct tdesc_feature *tls_feature
4038 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.tls");
4039
4040 if (tls_feature != nullptr)
4041 {
4042 /* We have TLS registers. Find out how many. */
4043 if (tdesc_unnumbered_register (tls_feature, "tpidr2"))
4044 features.tls = 2;
4045 else
4046 features.tls = 1;
4047 }
4048
4049 features.svq = aarch64_get_tdesc_svq (tdesc);
4050
4051 /* Check for the SME2 feature. */
4052 features.sme2 = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sme2")
4053 != nullptr);
4054
4055 return features;
4056 }
4057
4058 /* Implement the "cannot_store_register" gdbarch method. */
4059
4060 static int
4061 aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
4062 {
4063 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
4064
4065 if (!tdep->has_pauth ())
4066 return 0;
4067
4068 /* Pointer authentication registers are read-only. */
4069 return (regnum >= tdep->pauth_reg_base
4070 && regnum < tdep->pauth_reg_base + tdep->pauth_reg_count);
4071 }
4072
4073 /* Implement the stack_frame_destroyed_p gdbarch method. */
4074
4075 static int
4076 aarch64_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc)
4077 {
4078 CORE_ADDR func_start, func_end;
4079 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
4080 return 0;
4081
4082 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4083
4084 ULONGEST insn_from_memory;
4085 if (!safe_read_memory_unsigned_integer (pc, 4, byte_order_for_code,
4086 &insn_from_memory))
4087 return 0;
4088
4089 uint32_t insn = insn_from_memory;
4090
4091 aarch64_inst inst;
4092 if (aarch64_decode_insn (insn, &inst, 1, nullptr) != 0)
4093 return 0;
4094
4095 return streq (inst.opcode->name, "ret");
4096 }
4097
4098 /* AArch64 implementation of the remove_non_address_bits gdbarch hook. Remove
4099 non address bits from a pointer value. */
4100
4101 static CORE_ADDR
4102 aarch64_remove_non_address_bits (struct gdbarch *gdbarch, CORE_ADDR pointer)
4103 {
4104 /* By default, we assume TBI and discard the top 8 bits plus the VA range
4105 select bit (55). Below we try to fetch information about pointer
4106 authentication masks in order to make non-address removal more
4107 precise. */
4108 CORE_ADDR mask = AARCH64_TOP_BITS_MASK;
4109
4110 /* Check if we have an inferior first. If not, just use the default
4111 mask.
4112
4113 We use the inferior_ptid here because the pointer authentication masks
4114 should be the same across threads of a process. Since we may not have
4115 access to the current thread (gdb may have switched to no inferiors
4116 momentarily), we use the inferior ptid. */
4117 if (inferior_ptid != null_ptid)
4118 {
4119 /* If we do have an inferior, attempt to fetch its thread's thread_info
4120 struct. */
4121 thread_info *thread = current_inferior ()->find_thread (inferior_ptid);
4122
4123 /* If the thread is running, we will not be able to fetch the mask
4124 registers. */
4125 if (thread != nullptr && thread->state != THREAD_RUNNING)
4126 {
4127 /* Otherwise, fetch the register cache and the masks. */
4128 struct regcache *regs
4129 = get_thread_regcache (current_inferior ()->process_target (),
4130 inferior_ptid);
4131
4132 /* Use the gdbarch from the register cache to check for pointer
4133 authentication support, as it matches the features found in
4134 that particular thread. */
4135 aarch64_gdbarch_tdep *tdep
4136 = gdbarch_tdep<aarch64_gdbarch_tdep> (regs->arch ());
4137
4138 /* Is there pointer authentication support? */
4139 if (tdep->has_pauth ())
4140 {
4141 CORE_ADDR cmask, dmask;
4142 int dmask_regnum
4143 = AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base);
4144 int cmask_regnum
4145 = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
4146
4147 /* If we have a kernel address and we have kernel-mode address
4148 mask registers, use those instead. */
4149 if (tdep->pauth_reg_count > 2
4150 && pointer & VA_RANGE_SELECT_BIT_MASK)
4151 {
4152 dmask_regnum
4153 = AARCH64_PAUTH_DMASK_HIGH_REGNUM (tdep->pauth_reg_base);
4154 cmask_regnum
4155 = AARCH64_PAUTH_CMASK_HIGH_REGNUM (tdep->pauth_reg_base);
4156 }
4157
4158 /* We have both a code mask and a data mask. For now they are
4159 the same, but this may change in the future. */
4160 if (regs->cooked_read (dmask_regnum, &dmask) != REG_VALID)
4161 dmask = mask;
4162
4163 if (regs->cooked_read (cmask_regnum, &cmask) != REG_VALID)
4164 cmask = mask;
4165
4166 mask |= aarch64_mask_from_pac_registers (cmask, dmask);
4167 }
4168 }
4169 }
4170
4171 return aarch64_remove_top_bits (pointer, mask);
4172 }
4173
4174 /* Given NAMES, a vector of strings, initialize it with all the SME
4175 pseudo-register names for the current streaming vector length. */
4176
4177 static void
4178 aarch64_initialize_sme_pseudo_names (struct gdbarch *gdbarch,
4179 std::vector<std::string> &names)
4180 {
4181 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
4182
4183 gdb_assert (tdep->has_sme ());
4184 gdb_assert (tdep->sme_tile_slice_pseudo_base > 0);
4185 gdb_assert (tdep->sme_tile_pseudo_base > 0);
4186
4187 for (int i = 0; i < tdep->sme_tile_slice_pseudo_count; i++)
4188 {
4189 int regnum = tdep->sme_tile_slice_pseudo_base + i;
4190 struct za_pseudo_encoding encoding;
4191 aarch64_za_decode_pseudos (gdbarch, regnum, encoding);
4192 names.push_back (aarch64_za_tile_slice_name (encoding));
4193 }
4194 for (int i = 0; i < AARCH64_ZA_TILES_NUM; i++)
4195 {
4196 int regnum = tdep->sme_tile_pseudo_base + i;
4197 struct za_pseudo_encoding encoding;
4198 aarch64_za_decode_pseudos (gdbarch, regnum, encoding);
4199 names.push_back (aarch64_za_tile_name (encoding));
4200 }
4201 }
4202
4203 /* Initialize the current architecture based on INFO. If possible,
4204 re-use an architecture from ARCHES, which is a list of
4205 architectures already created during this debugging session.
4206
4207 Called e.g. at program startup, when reading a core file, and when
4208 reading a binary file. */
4209
4210 static struct gdbarch *
4211 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
4212 {
4213 const struct tdesc_feature *feature_core, *feature_fpu, *feature_sve;
4214 const struct tdesc_feature *feature_pauth;
4215 bool valid_p = true;
4216 int i, num_regs = 0, num_pseudo_regs = 0;
4217 int first_pauth_regnum = -1, ra_sign_state_offset = -1;
4218 int first_mte_regnum = -1, first_tls_regnum = -1;
4219 uint64_t vq = aarch64_get_tdesc_vq (info.target_desc);
4220 uint64_t svq = aarch64_get_tdesc_svq (info.target_desc);
4221
4222 if (vq > AARCH64_MAX_SVE_VQ)
4223 internal_error (_("VQ out of bounds: %s (max %d)"),
4224 pulongest (vq), AARCH64_MAX_SVE_VQ);
4225
4226 if (svq > AARCH64_MAX_SVE_VQ)
4227 internal_error (_("Streaming vector quotient (svq) out of bounds: %s"
4228 " (max %d)"),
4229 pulongest (svq), AARCH64_MAX_SVE_VQ);
4230
4231 /* If there is already a candidate, use it. */
4232 for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info);
4233 best_arch != nullptr;
4234 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
4235 {
4236 aarch64_gdbarch_tdep *tdep
4237 = gdbarch_tdep<aarch64_gdbarch_tdep> (best_arch->gdbarch);
4238 if (tdep && tdep->vq == vq && tdep->sme_svq == svq)
4239 return best_arch->gdbarch;
4240 }
4241
4242 /* Ensure we always have a target descriptor, and that it is for the given VQ
4243 value. */
4244 const struct target_desc *tdesc = info.target_desc;
4245 if (!tdesc_has_registers (tdesc) || vq != aarch64_get_tdesc_vq (tdesc)
4246 || svq != aarch64_get_tdesc_svq (tdesc))
4247 {
4248 aarch64_features features;
4249 features.vq = vq;
4250 features.svq = svq;
4251 tdesc = aarch64_read_description (features);
4252 }
4253 gdb_assert (tdesc);
4254
4255 feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core");
4256 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
4257 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
4258 const struct tdesc_feature *feature_mte
4259 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.mte");
4260 const struct tdesc_feature *feature_tls
4261 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.tls");
4262
4263 if (feature_core == nullptr)
4264 return nullptr;
4265
4266 tdesc_arch_data_up tdesc_data = tdesc_data_alloc ();
4267
4268 /* Validate the description provides the mandatory core R registers
4269 and allocate their numbers. */
4270 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
4271 valid_p &= tdesc_numbered_register (feature_core, tdesc_data.get (),
4272 AARCH64_X0_REGNUM + i,
4273 aarch64_r_register_names[i]);
4274
4275 num_regs = AARCH64_X0_REGNUM + i;
4276
4277 /* Add the V registers. */
4278 if (feature_fpu != nullptr)
4279 {
4280 if (feature_sve != nullptr)
4281 error (_("Program contains both fpu and SVE features."));
4282
4283 /* Validate the description provides the mandatory V registers
4284 and allocate their numbers. */
4285 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
4286 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data.get (),
4287 AARCH64_V0_REGNUM + i,
4288 aarch64_v_register_names[i]);
4289
4290 num_regs = AARCH64_V0_REGNUM + i;
4291 }
4292
4293 /* Add the SVE registers. */
4294 if (feature_sve != nullptr)
4295 {
4296 /* Validate the description provides the mandatory SVE registers
4297 and allocate their numbers. */
4298 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
4299 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data.get (),
4300 AARCH64_SVE_Z0_REGNUM + i,
4301 aarch64_sve_register_names[i]);
4302
4303 num_regs = AARCH64_SVE_Z0_REGNUM + i;
4304 num_pseudo_regs += 32; /* add the Vn register pseudos. */
4305 }
4306
4307 if (feature_fpu != nullptr || feature_sve != nullptr)
4308 {
4309 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
4310 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
4311 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
4312 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
4313 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
4314 }
4315
4316 int first_sme_regnum = -1;
4317 int first_sme2_regnum = -1;
4318 int first_sme_pseudo_regnum = -1;
4319 const struct tdesc_feature *feature_sme
4320 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sme");
4321 if (feature_sme != nullptr)
4322 {
4323 /* Record the first SME register. */
4324 first_sme_regnum = num_regs;
4325
4326 valid_p &= tdesc_numbered_register (feature_sme, tdesc_data.get (),
4327 num_regs++, "svg");
4328
4329 valid_p &= tdesc_numbered_register (feature_sme, tdesc_data.get (),
4330 num_regs++, "svcr");
4331
4332 valid_p &= tdesc_numbered_register (feature_sme, tdesc_data.get (),
4333 num_regs++, "za");
4334
4335 /* Record the first SME pseudo register. */
4336 first_sme_pseudo_regnum = num_pseudo_regs;
4337
4338 /* Add the ZA tile slice pseudo registers. The number of tile slice
4339 pseudo-registers depend on the svl, and is always a multiple of 5. */
4340 num_pseudo_regs += (svq << 5) * 5;
4341
4342 /* Add the ZA tile pseudo registers. */
4343 num_pseudo_regs += AARCH64_ZA_TILES_NUM;
4344
4345 /* Now check for the SME2 feature. SME2 is only available if SME is
4346 available. */
4347 const struct tdesc_feature *feature_sme2
4348 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sme2");
4349 if (feature_sme2 != nullptr)
4350 {
4351 /* Record the first SME2 register. */
4352 first_sme2_regnum = num_regs;
4353
4354 valid_p &= tdesc_numbered_register (feature_sme2, tdesc_data.get (),
4355 num_regs++, "zt0");
4356 }
4357 }
4358
4359 /* Add the TLS register. */
4360 int tls_register_count = 0;
4361 if (feature_tls != nullptr)
4362 {
4363 first_tls_regnum = num_regs;
4364
4365 /* Look for the TLS registers. tpidr is required, but tpidr2 is
4366 optional. */
4367 valid_p
4368 = tdesc_numbered_register (feature_tls, tdesc_data.get (),
4369 first_tls_regnum, "tpidr");
4370
4371 if (valid_p)
4372 {
4373 tls_register_count++;
4374
4375 bool has_tpidr2
4376 = tdesc_numbered_register (feature_tls, tdesc_data.get (),
4377 first_tls_regnum + tls_register_count,
4378 "tpidr2");
4379
4380 /* Figure out how many TLS registers we have. */
4381 if (has_tpidr2)
4382 tls_register_count++;
4383
4384 num_regs += tls_register_count;
4385 }
4386 else
4387 {
4388 warning (_("Provided TLS register feature doesn't contain "
4389 "required tpidr register."));
4390 return nullptr;
4391 }
4392 }
4393
4394 /* We have two versions of the pauth target description due to a past bug
4395 where GDB would crash when seeing the first version of the pauth target
4396 description. */
4397 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
4398 if (feature_pauth == nullptr)
4399 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth_v2");
4400
4401 /* Add the pauth registers. */
4402 int pauth_masks = 0;
4403 if (feature_pauth != NULL)
4404 {
4405 first_pauth_regnum = num_regs;
4406 ra_sign_state_offset = num_pseudo_regs;
4407
4408 /* Size of the expected register set with all 4 masks. */
4409 int set_size = ARRAY_SIZE (aarch64_pauth_register_names);
4410
4411 /* QEMU exposes a couple additional masks for the high half of the
4412 address. We should either have 2 registers or 4 registers. */
4413 if (tdesc_unnumbered_register (feature_pauth,
4414 "pauth_dmask_high") == 0)
4415 {
4416 /* We did not find pauth_dmask_high, assume we only have
4417 2 masks. We are not dealing with QEMU/Emulators then. */
4418 set_size -= 2;
4419 }
4420
4421 /* Validate the descriptor provides the mandatory PAUTH registers and
4422 allocate their numbers. */
4423 for (i = 0; i < set_size; i++)
4424 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data.get (),
4425 first_pauth_regnum + i,
4426 aarch64_pauth_register_names[i]);
4427
4428 num_regs += i;
4429 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
4430 pauth_masks = set_size;
4431 }
4432
4433 /* Add the MTE registers. */
4434 if (feature_mte != NULL)
4435 {
4436 first_mte_regnum = num_regs;
4437 /* Validate the descriptor provides the mandatory MTE registers and
4438 allocate their numbers. */
4439 for (i = 0; i < ARRAY_SIZE (aarch64_mte_register_names); i++)
4440 valid_p &= tdesc_numbered_register (feature_mte, tdesc_data.get (),
4441 first_mte_regnum + i,
4442 aarch64_mte_register_names[i]);
4443
4444 num_regs += i;
4445 }
4446 /* W pseudo-registers */
4447 int first_w_regnum = num_pseudo_regs;
4448 num_pseudo_regs += 31;
4449
4450 if (!valid_p)
4451 return nullptr;
4452
4453 /* AArch64 code is always little-endian. */
4454 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
4455
4456 gdbarch *gdbarch
4457 = gdbarch_alloc (&info, gdbarch_tdep_up (new aarch64_gdbarch_tdep));
4458 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
4459
4460 /* This should be low enough for everything. */
4461 tdep->lowest_pc = 0x20;
4462 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
4463 tdep->jb_elt_size = 8;
4464 tdep->vq = vq;
4465 tdep->pauth_reg_base = first_pauth_regnum;
4466 tdep->pauth_reg_count = pauth_masks;
4467 tdep->ra_sign_state_regnum = -1;
4468 tdep->mte_reg_base = first_mte_regnum;
4469 tdep->tls_regnum_base = first_tls_regnum;
4470 tdep->tls_register_count = tls_register_count;
4471
4472 /* Set the SME register set details. The pseudo-registers will be adjusted
4473 later. */
4474 tdep->sme_reg_base = first_sme_regnum;
4475 tdep->sme_svg_regnum = first_sme_regnum;
4476 tdep->sme_svcr_regnum = first_sme_regnum + 1;
4477 tdep->sme_za_regnum = first_sme_regnum + 2;
4478 tdep->sme_svq = svq;
4479
4480 /* Set the SME2 register set details. */
4481 tdep->sme2_zt0_regnum = first_sme2_regnum;
4482
4483 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
4484 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
4485
4486 /* Advance PC across function entry code. */
4487 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
4488
4489 /* The stack grows downward. */
4490 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
4491
4492 /* Breakpoint manipulation. */
4493 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
4494 aarch64_breakpoint::kind_from_pc);
4495 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
4496 aarch64_breakpoint::bp_from_kind);
4497 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
4498 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
4499
4500 /* Information about registers, etc. */
4501 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
4502 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
4503 set_gdbarch_num_regs (gdbarch, num_regs);
4504
4505 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
4506 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
4507 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
4508 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
4509 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
4510 set_tdesc_pseudo_register_reggroup_p (gdbarch,
4511 aarch64_pseudo_register_reggroup_p);
4512 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
4513
4514 /* ABI */
4515 set_gdbarch_short_bit (gdbarch, 16);
4516 set_gdbarch_int_bit (gdbarch, 32);
4517 set_gdbarch_float_bit (gdbarch, 32);
4518 set_gdbarch_double_bit (gdbarch, 64);
4519 set_gdbarch_long_double_bit (gdbarch, 128);
4520 set_gdbarch_long_bit (gdbarch, 64);
4521 set_gdbarch_long_long_bit (gdbarch, 64);
4522 set_gdbarch_ptr_bit (gdbarch, 64);
4523 set_gdbarch_char_signed (gdbarch, 0);
4524 set_gdbarch_wchar_signed (gdbarch, 0);
4525 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
4526 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
4527 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_quad);
4528 set_gdbarch_type_align (gdbarch, aarch64_type_align);
4529
4530 /* Detect whether PC is at a point where the stack has been destroyed. */
4531 set_gdbarch_stack_frame_destroyed_p (gdbarch, aarch64_stack_frame_destroyed_p);
4532
4533 /* Internal <-> external register number maps. */
4534 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
4535
4536 /* Returning results. */
4537 set_gdbarch_return_value_as_value (gdbarch, aarch64_return_value);
4538
4539 /* Disassembly. */
4540 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
4541
4542 /* Virtual tables. */
4543 set_gdbarch_vbit_in_delta (gdbarch, 1);
4544
4545 /* Hook in the ABI-specific overrides, if they have been registered. */
4546 info.target_desc = tdesc;
4547 info.tdesc_data = tdesc_data.get ();
4548 gdbarch_init_osabi (info, gdbarch);
4549
4550 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
4551 /* Register DWARF CFA vendor handler. */
4552 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
4553 aarch64_execute_dwarf_cfa_vendor_op);
4554
4555 /* Permanent/Program breakpoint handling. */
4556 set_gdbarch_program_breakpoint_here_p (gdbarch,
4557 aarch64_program_breakpoint_here_p);
4558
4559 /* Add some default predicates. */
4560 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
4561 dwarf2_append_unwinders (gdbarch);
4562 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
4563
4564 frame_base_set_default (gdbarch, &aarch64_normal_base);
4565
4566 /* Now we have tuned the configuration, set a few final things,
4567 based on what the OS ABI has told us. */
4568
4569 if (tdep->jb_pc >= 0)
4570 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
4571
4572 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
4573
4574 set_gdbarch_get_pc_address_flags (gdbarch, aarch64_get_pc_address_flags);
4575
4576 tdesc_use_registers (gdbarch, tdesc, std::move (tdesc_data));
4577
4578 /* Fetch the updated number of registers after we're done adding all
4579 entries from features we don't explicitly care about. This is the case
4580 for bare metal debugging stubs that include a lot of system registers. */
4581 num_regs = gdbarch_num_regs (gdbarch);
4582
4583 /* With the number of real registers updated, setup the pseudo-registers and
4584 record their numbers. */
4585
4586 /* Setup W pseudo-register numbers. */
4587 tdep->w_pseudo_base = first_w_regnum + num_regs;
4588 tdep->w_pseudo_count = 31;
4589
4590 /* Pointer authentication pseudo-registers. */
4591 if (tdep->has_pauth ())
4592 tdep->ra_sign_state_regnum = ra_sign_state_offset + num_regs;
4593
4594 /* Architecture hook to remove bits of a pointer that are not part of the
4595 address, like memory tags (MTE) and pointer authentication signatures. */
4596 set_gdbarch_remove_non_address_bits (gdbarch,
4597 aarch64_remove_non_address_bits);
4598
4599 /* SME pseudo-registers. */
4600 if (tdep->has_sme ())
4601 {
4602 tdep->sme_pseudo_base = num_regs + first_sme_pseudo_regnum;
4603 tdep->sme_tile_slice_pseudo_base = tdep->sme_pseudo_base;
4604 tdep->sme_tile_slice_pseudo_count = (svq * 32) * 5;
4605 tdep->sme_tile_pseudo_base
4606 = tdep->sme_pseudo_base + tdep->sme_tile_slice_pseudo_count;
4607 tdep->sme_pseudo_count
4608 = tdep->sme_tile_slice_pseudo_count + AARCH64_ZA_TILES_NUM;
4609
4610 /* The SME ZA pseudo-registers are a set of 160 to 2560 pseudo-registers
4611 depending on the value of svl.
4612
4613 The tile pseudo-registers are organized around their qualifiers
4614 (b, h, s, d and q). Their numbers are distributed as follows:
4615
4616 b 0
4617 h 1~2
4618 s 3~6
4619 d 7~14
4620 q 15~30
4621
4622 The naming of the tile pseudo-registers follows the pattern za<t><q>,
4623 where:
4624
4625 <t> is the tile number, with the following possible values based on
4626 the qualifiers:
4627
4628 Qualifier - Allocated indexes
4629
4630 b - 0
4631 h - 0~1
4632 s - 0~3
4633 d - 0~7
4634 q - 0~15
4635
4636 <q> is the qualifier: b, h, s, d and q.
4637
4638 The tile slice pseudo-registers are organized around their
4639 qualifiers as well (b, h, s, d and q), but also around their
4640 direction (h - horizontal and v - vertical).
4641
4642 Even-numbered tile slice pseudo-registers are horizontally-oriented
4643 and odd-numbered tile slice pseudo-registers are vertically-oriented.
4644
4645 Their numbers are distributed as follows:
4646
4647 Qualifier - Allocated indexes
4648
4649 b tile slices - 0~511
4650 h tile slices - 512~1023
4651 s tile slices - 1024~1535
4652 d tile slices - 1536~2047
4653 q tile slices - 2048~2559
4654
4655 The naming of the tile slice pseudo-registers follows the pattern
4656 za<t><d><q><s>, where:
4657
4658 <t> is the tile number as described for the tile pseudo-registers.
4659 <d> is the direction of the tile slice (h or v)
4660 <q> is the qualifier of the tile slice (b, h, s, d or q)
4661 <s> is the slice number, defined as follows:
4662
4663 Qualifier - Allocated indexes
4664
4665 b - 0~15
4666 h - 0~7
4667 s - 0~3
4668 d - 0~1
4669 q - 0
4670
4671 We have helper functions to translate to/from register index from/to
4672 the set of fields that make the pseudo-register names. */
4673
4674 /* Build the array of pseudo-register names available for this
4675 particular gdbarch configuration. */
4676 aarch64_initialize_sme_pseudo_names (gdbarch, tdep->sme_pseudo_names);
4677 }
4678
4679 /* Add standard register aliases. */
4680 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
4681 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
4682 value_of_aarch64_user_reg,
4683 &aarch64_register_aliases[i].regnum);
4684
4685 register_aarch64_ravenscar_ops (gdbarch);
4686
4687 return gdbarch;
4688 }
4689
4690 static void
4691 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
4692 {
4693 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch);
4694
4695 if (tdep == NULL)
4696 return;
4697
4698 gdb_printf (file, _("aarch64_dump_tdep: Lowest pc = 0x%s\n"),
4699 paddress (gdbarch, tdep->lowest_pc));
4700
4701 /* SME fields. */
4702 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_type_q = %s\n"),
4703 host_address_to_string (tdep->sme_tile_type_q));
4704 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_type_d = %s\n"),
4705 host_address_to_string (tdep->sme_tile_type_d));
4706 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_type_s = %s\n"),
4707 host_address_to_string (tdep->sme_tile_type_s));
4708 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_type_h = %s\n"),
4709 host_address_to_string (tdep->sme_tile_type_h));
4710 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_type_n = %s\n"),
4711 host_address_to_string (tdep->sme_tile_type_b));
4712 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_type_q = %s\n"),
4713 host_address_to_string (tdep->sme_tile_slice_type_q));
4714 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_type_d = %s\n"),
4715 host_address_to_string (tdep->sme_tile_slice_type_d));
4716 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_type_s = %s\n"),
4717 host_address_to_string (tdep->sme_tile_slice_type_s));
4718 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_type_h = %s\n"),
4719 host_address_to_string (tdep->sme_tile_slice_type_h));
4720 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_type_b = %s\n"),
4721 host_address_to_string (tdep->sme_tile_slice_type_b));
4722 gdb_printf (file, _("aarch64_dump_tdep: sme_reg_base = %s\n"),
4723 pulongest (tdep->sme_reg_base));
4724 gdb_printf (file, _("aarch64_dump_tdep: sme_svg_regnum = %s\n"),
4725 pulongest (tdep->sme_svg_regnum));
4726 gdb_printf (file, _("aarch64_dump_tdep: sme_svcr_regnum = %s\n"),
4727 pulongest (tdep->sme_svcr_regnum));
4728 gdb_printf (file, _("aarch64_dump_tdep: sme_za_regnum = %s\n"),
4729 pulongest (tdep->sme_za_regnum));
4730 gdb_printf (file, _("aarch64_dump_tdep: sme_pseudo_base = %s\n"),
4731 pulongest (tdep->sme_pseudo_base));
4732 gdb_printf (file, _("aarch64_dump_tdep: sme_pseudo_count = %s\n"),
4733 pulongest (tdep->sme_pseudo_count));
4734 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_pseudo_base = %s\n"),
4735 pulongest (tdep->sme_tile_slice_pseudo_base));
4736 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_pseudo_count = %s\n"),
4737 pulongest (tdep->sme_tile_slice_pseudo_count));
4738 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_pseudo_base = %s\n"),
4739 pulongest (tdep->sme_tile_pseudo_base));
4740 gdb_printf (file, _("aarch64_dump_tdep: sme_svq = %s\n"),
4741 pulongest (tdep->sme_svq));
4742 }
4743
4744 #if GDB_SELF_TEST
4745 namespace selftests
4746 {
4747 static void aarch64_process_record_test (void);
4748 }
4749 #endif
4750
4751 void _initialize_aarch64_tdep ();
4752 void
4753 _initialize_aarch64_tdep ()
4754 {
4755 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
4756 aarch64_dump_tdep);
4757
4758 /* Debug this file's internals. */
4759 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
4760 Set AArch64 debugging."), _("\
4761 Show AArch64 debugging."), _("\
4762 When on, AArch64 specific debugging is enabled."),
4763 NULL,
4764 show_aarch64_debug,
4765 &setdebuglist, &showdebuglist);
4766
4767 #if GDB_SELF_TEST
4768 selftests::register_test ("aarch64-analyze-prologue",
4769 selftests::aarch64_analyze_prologue_test);
4770 selftests::register_test ("aarch64-process-record",
4771 selftests::aarch64_process_record_test);
4772 #endif
4773 }
4774
4775 /* AArch64 process record-replay related structures, defines etc. */
4776
4777 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
4778 do \
4779 { \
4780 unsigned int reg_len = LENGTH; \
4781 if (reg_len) \
4782 { \
4783 REGS = XNEWVEC (uint32_t, reg_len); \
4784 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
4785 } \
4786 } \
4787 while (0)
4788
4789 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
4790 do \
4791 { \
4792 unsigned int mem_len = LENGTH; \
4793 if (mem_len) \
4794 { \
4795 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
4796 memcpy(MEMS, &RECORD_BUF[0], \
4797 sizeof(struct aarch64_mem_r) * LENGTH); \
4798 } \
4799 } \
4800 while (0)
4801
4802 /* AArch64 record/replay structures and enumerations. */
4803
4804 struct aarch64_mem_r
4805 {
4806 uint64_t len; /* Record length. */
4807 uint64_t addr; /* Memory address. */
4808 };
4809
4810 enum aarch64_record_result
4811 {
4812 AARCH64_RECORD_SUCCESS,
4813 AARCH64_RECORD_UNSUPPORTED,
4814 AARCH64_RECORD_UNKNOWN
4815 };
4816
4817 struct aarch64_insn_decode_record
4818 {
4819 struct gdbarch *gdbarch;
4820 struct regcache *regcache;
4821 CORE_ADDR this_addr; /* Address of insn to be recorded. */
4822 uint32_t aarch64_insn; /* Insn to be recorded. */
4823 uint32_t mem_rec_count; /* Count of memory records. */
4824 uint32_t reg_rec_count; /* Count of register records. */
4825 uint32_t *aarch64_regs; /* Registers to be recorded. */
4826 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
4827 };
4828
4829 /* Record handler for data processing - register instructions. */
4830
4831 static unsigned int
4832 aarch64_record_data_proc_reg (aarch64_insn_decode_record *aarch64_insn_r)
4833 {
4834 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
4835 uint32_t record_buf[4];
4836
4837 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4838 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4839 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
4840
4841 if (!bit (aarch64_insn_r->aarch64_insn, 28))
4842 {
4843 uint8_t setflags;
4844
4845 /* Logical (shifted register). */
4846 if (insn_bits24_27 == 0x0a)
4847 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
4848 /* Add/subtract. */
4849 else if (insn_bits24_27 == 0x0b)
4850 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
4851 else
4852 return AARCH64_RECORD_UNKNOWN;
4853
4854 record_buf[0] = reg_rd;
4855 aarch64_insn_r->reg_rec_count = 1;
4856 if (setflags)
4857 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
4858 }
4859 else
4860 {
4861 if (insn_bits24_27 == 0x0b)
4862 {
4863 /* Data-processing (3 source). */
4864 record_buf[0] = reg_rd;
4865 aarch64_insn_r->reg_rec_count = 1;
4866 }
4867 else if (insn_bits24_27 == 0x0a)
4868 {
4869 if (insn_bits21_23 == 0x00)
4870 {
4871 /* Add/subtract (with carry). */
4872 record_buf[0] = reg_rd;
4873 aarch64_insn_r->reg_rec_count = 1;
4874 if (bit (aarch64_insn_r->aarch64_insn, 29))
4875 {
4876 record_buf[1] = AARCH64_CPSR_REGNUM;
4877 aarch64_insn_r->reg_rec_count = 2;
4878 }
4879 }
4880 else if (insn_bits21_23 == 0x02)
4881 {
4882 /* Conditional compare (register) and conditional compare
4883 (immediate) instructions. */
4884 record_buf[0] = AARCH64_CPSR_REGNUM;
4885 aarch64_insn_r->reg_rec_count = 1;
4886 }
4887 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
4888 {
4889 /* Conditional select. */
4890 /* Data-processing (2 source). */
4891 /* Data-processing (1 source). */
4892 record_buf[0] = reg_rd;
4893 aarch64_insn_r->reg_rec_count = 1;
4894 }
4895 else
4896 return AARCH64_RECORD_UNKNOWN;
4897 }
4898 }
4899
4900 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4901 record_buf);
4902 return AARCH64_RECORD_SUCCESS;
4903 }
4904
4905 /* Record handler for data processing - immediate instructions. */
4906
4907 static unsigned int
4908 aarch64_record_data_proc_imm (aarch64_insn_decode_record *aarch64_insn_r)
4909 {
4910 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
4911 uint32_t record_buf[4];
4912
4913 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4914 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
4915 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4916
4917 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
4918 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
4919 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
4920 {
4921 record_buf[0] = reg_rd;
4922 aarch64_insn_r->reg_rec_count = 1;
4923 }
4924 else if (insn_bits24_27 == 0x01)
4925 {
4926 /* Add/Subtract (immediate). */
4927 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
4928 record_buf[0] = reg_rd;
4929 aarch64_insn_r->reg_rec_count = 1;
4930 if (setflags)
4931 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
4932 }
4933 else if (insn_bits24_27 == 0x02 && !insn_bit23)
4934 {
4935 /* Logical (immediate). */
4936 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
4937 record_buf[0] = reg_rd;
4938 aarch64_insn_r->reg_rec_count = 1;
4939 if (setflags)
4940 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
4941 }
4942 else
4943 return AARCH64_RECORD_UNKNOWN;
4944
4945 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4946 record_buf);
4947 return AARCH64_RECORD_SUCCESS;
4948 }
4949
4950 /* Record handler for branch, exception generation and system instructions. */
4951
4952 static unsigned int
4953 aarch64_record_branch_except_sys (aarch64_insn_decode_record *aarch64_insn_r)
4954 {
4955
4956 aarch64_gdbarch_tdep *tdep
4957 = gdbarch_tdep<aarch64_gdbarch_tdep> (aarch64_insn_r->gdbarch);
4958 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
4959 uint32_t record_buf[4];
4960
4961 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4962 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4963 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4964
4965 if (insn_bits28_31 == 0x0d)
4966 {
4967 /* Exception generation instructions. */
4968 if (insn_bits24_27 == 0x04)
4969 {
4970 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
4971 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
4972 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
4973 {
4974 ULONGEST svc_number;
4975
4976 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
4977 &svc_number);
4978 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
4979 svc_number);
4980 }
4981 else
4982 return AARCH64_RECORD_UNSUPPORTED;
4983 }
4984 /* System instructions. */
4985 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
4986 {
4987 uint32_t reg_rt, reg_crn;
4988
4989 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4990 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4991
4992 /* Record rt in case of sysl and mrs instructions. */
4993 if (bit (aarch64_insn_r->aarch64_insn, 21))
4994 {
4995 record_buf[0] = reg_rt;
4996 aarch64_insn_r->reg_rec_count = 1;
4997 }
4998 /* Record cpsr for hint and msr(immediate) instructions. */
4999 else if (reg_crn == 0x02 || reg_crn == 0x04)
5000 {
5001 record_buf[0] = AARCH64_CPSR_REGNUM;
5002 aarch64_insn_r->reg_rec_count = 1;
5003 }
5004 }
5005 /* Unconditional branch (register). */
5006 else if((insn_bits24_27 & 0x0e) == 0x06)
5007 {
5008 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
5009 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
5010 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
5011 }
5012 else
5013 return AARCH64_RECORD_UNKNOWN;
5014 }
5015 /* Unconditional branch (immediate). */
5016 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
5017 {
5018 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
5019 if (bit (aarch64_insn_r->aarch64_insn, 31))
5020 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
5021 }
5022 else
5023 /* Compare & branch (immediate), Test & branch (immediate) and
5024 Conditional branch (immediate). */
5025 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
5026
5027 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
5028 record_buf);
5029 return AARCH64_RECORD_SUCCESS;
5030 }
5031
5032 /* Record handler for advanced SIMD load and store instructions. */
5033
5034 static unsigned int
5035 aarch64_record_asimd_load_store (aarch64_insn_decode_record *aarch64_insn_r)
5036 {
5037 CORE_ADDR address;
5038 uint64_t addr_offset = 0;
5039 uint32_t record_buf[24];
5040 uint64_t record_buf_mem[24];
5041 uint32_t reg_rn, reg_rt;
5042 uint32_t reg_index = 0, mem_index = 0;
5043 uint8_t opcode_bits, size_bits;
5044
5045 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
5046 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
5047 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
5048 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
5049 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
5050
5051 if (record_debug)
5052 debug_printf ("Process record: Advanced SIMD load/store\n");
5053
5054 /* Load/store single structure. */
5055 if (bit (aarch64_insn_r->aarch64_insn, 24))
5056 {
5057 uint8_t sindex, scale, selem, esize, replicate = 0;
5058 scale = opcode_bits >> 2;
5059 selem = ((opcode_bits & 0x02) |
5060 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
5061 switch (scale)
5062 {
5063 case 1:
5064 if (size_bits & 0x01)
5065 return AARCH64_RECORD_UNKNOWN;
5066 break;
5067 case 2:
5068 if ((size_bits >> 1) & 0x01)
5069 return AARCH64_RECORD_UNKNOWN;
5070 if (size_bits & 0x01)
5071 {
5072 if (!((opcode_bits >> 1) & 0x01))
5073 scale = 3;
5074 else
5075 return AARCH64_RECORD_UNKNOWN;
5076 }
5077 break;
5078 case 3:
5079 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
5080 {
5081 scale = size_bits;
5082 replicate = 1;
5083 break;
5084 }
5085 else
5086 return AARCH64_RECORD_UNKNOWN;
5087 default:
5088 break;
5089 }
5090 esize = 8 << scale;
5091 if (replicate)
5092 for (sindex = 0; sindex < selem; sindex++)
5093 {
5094 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
5095 reg_rt = (reg_rt + 1) % 32;
5096 }
5097 else
5098 {
5099 for (sindex = 0; sindex < selem; sindex++)
5100 {
5101 if (bit (aarch64_insn_r->aarch64_insn, 22))
5102 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
5103 else
5104 {
5105 record_buf_mem[mem_index++] = esize / 8;
5106 record_buf_mem[mem_index++] = address + addr_offset;
5107 }
5108 addr_offset = addr_offset + (esize / 8);
5109 reg_rt = (reg_rt + 1) % 32;
5110 }
5111 }
5112 }
5113 /* Load/store multiple structure. */
5114 else
5115 {
5116 uint8_t selem, esize, rpt, elements;
5117 uint8_t eindex, rindex;
5118
5119 esize = 8 << size_bits;
5120 if (bit (aarch64_insn_r->aarch64_insn, 30))
5121 elements = 128 / esize;
5122 else
5123 elements = 64 / esize;
5124
5125 switch (opcode_bits)
5126 {
5127 /*LD/ST4 (4 Registers). */
5128 case 0:
5129 rpt = 1;
5130 selem = 4;
5131 break;
5132 /*LD/ST1 (4 Registers). */
5133 case 2:
5134 rpt = 4;
5135 selem = 1;
5136 break;
5137 /*LD/ST3 (3 Registers). */
5138 case 4:
5139 rpt = 1;
5140 selem = 3;
5141 break;
5142 /*LD/ST1 (3 Registers). */
5143 case 6:
5144 rpt = 3;
5145 selem = 1;
5146 break;
5147 /*LD/ST1 (1 Register). */
5148 case 7:
5149 rpt = 1;
5150 selem = 1;
5151 break;
5152 /*LD/ST2 (2 Registers). */
5153 case 8:
5154 rpt = 1;
5155 selem = 2;
5156 break;
5157 /*LD/ST1 (2 Registers). */
5158 case 10:
5159 rpt = 2;
5160 selem = 1;
5161 break;
5162 default:
5163 return AARCH64_RECORD_UNSUPPORTED;
5164 break;
5165 }
5166 for (rindex = 0; rindex < rpt; rindex++)
5167 for (eindex = 0; eindex < elements; eindex++)
5168 {
5169 uint8_t reg_tt, sindex;
5170 reg_tt = (reg_rt + rindex) % 32;
5171 for (sindex = 0; sindex < selem; sindex++)
5172 {
5173 if (bit (aarch64_insn_r->aarch64_insn, 22))
5174 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
5175 else
5176 {
5177 record_buf_mem[mem_index++] = esize / 8;
5178 record_buf_mem[mem_index++] = address + addr_offset;
5179 }
5180 addr_offset = addr_offset + (esize / 8);
5181 reg_tt = (reg_tt + 1) % 32;
5182 }
5183 }
5184 }
5185
5186 if (bit (aarch64_insn_r->aarch64_insn, 23))
5187 record_buf[reg_index++] = reg_rn;
5188
5189 aarch64_insn_r->reg_rec_count = reg_index;
5190 aarch64_insn_r->mem_rec_count = mem_index / 2;
5191 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
5192 record_buf_mem);
5193 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
5194 record_buf);
5195 return AARCH64_RECORD_SUCCESS;
5196 }
5197
5198 /* Record handler for load and store instructions. */
5199
5200 static unsigned int
5201 aarch64_record_load_store (aarch64_insn_decode_record *aarch64_insn_r)
5202 {
5203 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
5204 uint8_t insn_bit23, insn_bit21;
5205 uint8_t opc, size_bits, ld_flag, vector_flag;
5206 uint32_t reg_rn, reg_rt, reg_rt2;
5207 uint64_t datasize, offset;
5208 uint32_t record_buf[8];
5209 uint64_t record_buf_mem[8];
5210 CORE_ADDR address;
5211
5212 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
5213 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
5214 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
5215 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
5216 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
5217 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
5218 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
5219 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
5220 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
5221 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
5222 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
5223
5224 /* Load/store exclusive. */
5225 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
5226 {
5227 if (record_debug)
5228 debug_printf ("Process record: load/store exclusive\n");
5229
5230 if (ld_flag)
5231 {
5232 record_buf[0] = reg_rt;
5233 aarch64_insn_r->reg_rec_count = 1;
5234 if (insn_bit21)
5235 {
5236 record_buf[1] = reg_rt2;
5237 aarch64_insn_r->reg_rec_count = 2;
5238 }
5239 }
5240 else
5241 {
5242 if (insn_bit21)
5243 datasize = (8 << size_bits) * 2;
5244 else
5245 datasize = (8 << size_bits);
5246 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
5247 &address);
5248 record_buf_mem[0] = datasize / 8;
5249 record_buf_mem[1] = address;
5250 aarch64_insn_r->mem_rec_count = 1;
5251 if (!insn_bit23)
5252 {
5253 /* Save register rs. */
5254 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
5255 aarch64_insn_r->reg_rec_count = 1;
5256 }
5257 }
5258 }
5259 /* Load register (literal) instructions decoding. */
5260 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
5261 {
5262 if (record_debug)
5263 debug_printf ("Process record: load register (literal)\n");
5264 if (vector_flag)
5265 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
5266 else
5267 record_buf[0] = reg_rt;
5268 aarch64_insn_r->reg_rec_count = 1;
5269 }
5270 /* All types of load/store pair instructions decoding. */
5271 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
5272 {
5273 if (record_debug)
5274 debug_printf ("Process record: load/store pair\n");
5275
5276 if (ld_flag)
5277 {
5278 if (vector_flag)
5279 {
5280 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
5281 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
5282 }
5283 else
5284 {
5285 record_buf[0] = reg_rt;
5286 record_buf[1] = reg_rt2;
5287 }
5288 aarch64_insn_r->reg_rec_count = 2;
5289 }
5290 else
5291 {
5292 uint16_t imm7_off;
5293 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
5294 if (!vector_flag)
5295 size_bits = size_bits >> 1;
5296 datasize = 8 << (2 + size_bits);
5297 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
5298 offset = offset << (2 + size_bits);
5299 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
5300 &address);
5301 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
5302 {
5303 if (imm7_off & 0x40)
5304 address = address - offset;
5305 else
5306 address = address + offset;
5307 }
5308
5309 record_buf_mem[0] = datasize / 8;
5310 record_buf_mem[1] = address;
5311 record_buf_mem[2] = datasize / 8;
5312 record_buf_mem[3] = address + (datasize / 8);
5313 aarch64_insn_r->mem_rec_count = 2;
5314 }
5315 if (bit (aarch64_insn_r->aarch64_insn, 23))
5316 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
5317 }
5318 /* Load/store register (unsigned immediate) instructions. */
5319 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
5320 {
5321 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
5322 if (!(opc >> 1))
5323 {
5324 if (opc & 0x01)
5325 ld_flag = 0x01;
5326 else
5327 ld_flag = 0x0;
5328 }
5329 else
5330 {
5331 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
5332 {
5333 /* PRFM (immediate) */
5334 return AARCH64_RECORD_SUCCESS;
5335 }
5336 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
5337 {
5338 /* LDRSW (immediate) */
5339 ld_flag = 0x1;
5340 }
5341 else
5342 {
5343 if (opc & 0x01)
5344 ld_flag = 0x01;
5345 else
5346 ld_flag = 0x0;
5347 }
5348 }
5349
5350 if (record_debug)
5351 {
5352 debug_printf ("Process record: load/store (unsigned immediate):"
5353 " size %x V %d opc %x\n", size_bits, vector_flag,
5354 opc);
5355 }
5356
5357 if (!ld_flag)
5358 {
5359 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
5360 datasize = 8 << size_bits;
5361 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
5362 &address);
5363 offset = offset << size_bits;
5364 address = address + offset;
5365
5366 record_buf_mem[0] = datasize >> 3;
5367 record_buf_mem[1] = address;
5368 aarch64_insn_r->mem_rec_count = 1;
5369 }
5370 else
5371 {
5372 if (vector_flag)
5373 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
5374 else
5375 record_buf[0] = reg_rt;
5376 aarch64_insn_r->reg_rec_count = 1;
5377 }
5378 }
5379 /* Load/store register (register offset) instructions. */
5380 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
5381 && insn_bits10_11 == 0x02 && insn_bit21)
5382 {
5383 if (record_debug)
5384 debug_printf ("Process record: load/store (register offset)\n");
5385 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
5386 if (!(opc >> 1))
5387 if (opc & 0x01)
5388 ld_flag = 0x01;
5389 else
5390 ld_flag = 0x0;
5391 else
5392 if (size_bits != 0x03)
5393 ld_flag = 0x01;
5394 else
5395 return AARCH64_RECORD_UNKNOWN;
5396
5397 if (!ld_flag)
5398 {
5399 ULONGEST reg_rm_val;
5400
5401 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
5402 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
5403 if (bit (aarch64_insn_r->aarch64_insn, 12))
5404 offset = reg_rm_val << size_bits;
5405 else
5406 offset = reg_rm_val;
5407 datasize = 8 << size_bits;
5408 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
5409 &address);
5410 address = address + offset;
5411 record_buf_mem[0] = datasize >> 3;
5412 record_buf_mem[1] = address;
5413 aarch64_insn_r->mem_rec_count = 1;
5414 }
5415 else
5416 {
5417 if (vector_flag)
5418 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
5419 else
5420 record_buf[0] = reg_rt;
5421 aarch64_insn_r->reg_rec_count = 1;
5422 }
5423 }
5424 /* Load/store register (immediate and unprivileged) instructions. */
5425 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
5426 && !insn_bit21)
5427 {
5428 if (record_debug)
5429 {
5430 debug_printf ("Process record: load/store "
5431 "(immediate and unprivileged)\n");
5432 }
5433 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
5434 if (!(opc >> 1))
5435 if (opc & 0x01)
5436 ld_flag = 0x01;
5437 else
5438 ld_flag = 0x0;
5439 else
5440 if (size_bits != 0x03)
5441 ld_flag = 0x01;
5442 else
5443 return AARCH64_RECORD_UNKNOWN;
5444
5445 if (!ld_flag)
5446 {
5447 uint16_t imm9_off;
5448 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
5449 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
5450 datasize = 8 << size_bits;
5451 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
5452 &address);
5453 if (insn_bits10_11 != 0x01)
5454 {
5455 if (imm9_off & 0x0100)
5456 address = address - offset;
5457 else
5458 address = address + offset;
5459 }
5460 record_buf_mem[0] = datasize >> 3;
5461 record_buf_mem[1] = address;
5462 aarch64_insn_r->mem_rec_count = 1;
5463 }
5464 else
5465 {
5466 if (vector_flag)
5467 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
5468 else
5469 record_buf[0] = reg_rt;
5470 aarch64_insn_r->reg_rec_count = 1;
5471 }
5472 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
5473 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
5474 }
5475 /* Advanced SIMD load/store instructions. */
5476 else
5477 return aarch64_record_asimd_load_store (aarch64_insn_r);
5478
5479 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
5480 record_buf_mem);
5481 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
5482 record_buf);
5483 return AARCH64_RECORD_SUCCESS;
5484 }
5485
5486 /* Record handler for data processing SIMD and floating point instructions. */
5487
5488 static unsigned int
5489 aarch64_record_data_proc_simd_fp (aarch64_insn_decode_record *aarch64_insn_r)
5490 {
5491 uint8_t insn_bit21, opcode, rmode, reg_rd;
5492 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
5493 uint8_t insn_bits11_14;
5494 uint32_t record_buf[2];
5495
5496 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
5497 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
5498 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
5499 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
5500 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
5501 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
5502 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
5503 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
5504 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
5505
5506 if (record_debug)
5507 debug_printf ("Process record: data processing SIMD/FP: ");
5508
5509 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
5510 {
5511 /* Floating point - fixed point conversion instructions. */
5512 if (!insn_bit21)
5513 {
5514 if (record_debug)
5515 debug_printf ("FP - fixed point conversion");
5516
5517 if ((opcode >> 1) == 0x0 && rmode == 0x03)
5518 record_buf[0] = reg_rd;
5519 else
5520 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5521 }
5522 /* Floating point - conditional compare instructions. */
5523 else if (insn_bits10_11 == 0x01)
5524 {
5525 if (record_debug)
5526 debug_printf ("FP - conditional compare");
5527
5528 record_buf[0] = AARCH64_CPSR_REGNUM;
5529 }
5530 /* Floating point - data processing (2-source) and
5531 conditional select instructions. */
5532 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
5533 {
5534 if (record_debug)
5535 debug_printf ("FP - DP (2-source)");
5536
5537 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5538 }
5539 else if (insn_bits10_11 == 0x00)
5540 {
5541 /* Floating point - immediate instructions. */
5542 if ((insn_bits12_15 & 0x01) == 0x01
5543 || (insn_bits12_15 & 0x07) == 0x04)
5544 {
5545 if (record_debug)
5546 debug_printf ("FP - immediate");
5547 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5548 }
5549 /* Floating point - compare instructions. */
5550 else if ((insn_bits12_15 & 0x03) == 0x02)
5551 {
5552 if (record_debug)
5553 debug_printf ("FP - immediate");
5554 record_buf[0] = AARCH64_CPSR_REGNUM;
5555 }
5556 /* Floating point - integer conversions instructions. */
5557 else if (insn_bits12_15 == 0x00)
5558 {
5559 /* Convert float to integer instruction. */
5560 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
5561 {
5562 if (record_debug)
5563 debug_printf ("float to int conversion");
5564
5565 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
5566 }
5567 /* Convert integer to float instruction. */
5568 else if ((opcode >> 1) == 0x01 && !rmode)
5569 {
5570 if (record_debug)
5571 debug_printf ("int to float conversion");
5572
5573 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5574 }
5575 /* Move float to integer instruction. */
5576 else if ((opcode >> 1) == 0x03)
5577 {
5578 if (record_debug)
5579 debug_printf ("move float to int");
5580
5581 if (!(opcode & 0x01))
5582 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
5583 else
5584 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5585 }
5586 else
5587 return AARCH64_RECORD_UNKNOWN;
5588 }
5589 else
5590 return AARCH64_RECORD_UNKNOWN;
5591 }
5592 else
5593 return AARCH64_RECORD_UNKNOWN;
5594 }
5595 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
5596 {
5597 if (record_debug)
5598 debug_printf ("SIMD copy");
5599
5600 /* Advanced SIMD copy instructions. */
5601 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
5602 && !bit (aarch64_insn_r->aarch64_insn, 15)
5603 && bit (aarch64_insn_r->aarch64_insn, 10))
5604 {
5605 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
5606 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
5607 else
5608 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5609 }
5610 else
5611 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5612 }
5613 /* All remaining floating point or advanced SIMD instructions. */
5614 else
5615 {
5616 if (record_debug)
5617 debug_printf ("all remain");
5618
5619 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
5620 }
5621
5622 if (record_debug)
5623 debug_printf ("\n");
5624
5625 /* Record the V/X register. */
5626 aarch64_insn_r->reg_rec_count++;
5627
5628 /* Some of these instructions may set bits in the FPSR, so record it
5629 too. */
5630 record_buf[1] = AARCH64_FPSR_REGNUM;
5631 aarch64_insn_r->reg_rec_count++;
5632
5633 gdb_assert (aarch64_insn_r->reg_rec_count == 2);
5634 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
5635 record_buf);
5636 return AARCH64_RECORD_SUCCESS;
5637 }
5638
5639 /* Decodes insns type and invokes its record handler. */
5640
5641 static unsigned int
5642 aarch64_record_decode_insn_handler (aarch64_insn_decode_record *aarch64_insn_r)
5643 {
5644 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
5645
5646 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
5647 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
5648 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
5649 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
5650
5651 /* Data processing - immediate instructions. */
5652 if (!ins_bit26 && !ins_bit27 && ins_bit28)
5653 return aarch64_record_data_proc_imm (aarch64_insn_r);
5654
5655 /* Branch, exception generation and system instructions. */
5656 if (ins_bit26 && !ins_bit27 && ins_bit28)
5657 return aarch64_record_branch_except_sys (aarch64_insn_r);
5658
5659 /* Load and store instructions. */
5660 if (!ins_bit25 && ins_bit27)
5661 return aarch64_record_load_store (aarch64_insn_r);
5662
5663 /* Data processing - register instructions. */
5664 if (ins_bit25 && !ins_bit26 && ins_bit27)
5665 return aarch64_record_data_proc_reg (aarch64_insn_r);
5666
5667 /* Data processing - SIMD and floating point instructions. */
5668 if (ins_bit25 && ins_bit26 && ins_bit27)
5669 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
5670
5671 return AARCH64_RECORD_UNSUPPORTED;
5672 }
5673
5674 /* Cleans up local record registers and memory allocations. */
5675
5676 static void
5677 deallocate_reg_mem (aarch64_insn_decode_record *record)
5678 {
5679 xfree (record->aarch64_regs);
5680 xfree (record->aarch64_mems);
5681 }
5682
5683 #if GDB_SELF_TEST
5684 namespace selftests {
5685
5686 static void
5687 aarch64_process_record_test (void)
5688 {
5689 struct gdbarch_info info;
5690 uint32_t ret;
5691
5692 info.bfd_arch_info = bfd_scan_arch ("aarch64");
5693
5694 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
5695 SELF_CHECK (gdbarch != NULL);
5696
5697 aarch64_insn_decode_record aarch64_record;
5698
5699 memset (&aarch64_record, 0, sizeof (aarch64_insn_decode_record));
5700 aarch64_record.regcache = NULL;
5701 aarch64_record.this_addr = 0;
5702 aarch64_record.gdbarch = gdbarch;
5703
5704 /* 20 00 80 f9 prfm pldl1keep, [x1] */
5705 aarch64_record.aarch64_insn = 0xf9800020;
5706 ret = aarch64_record_decode_insn_handler (&aarch64_record);
5707 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
5708 SELF_CHECK (aarch64_record.reg_rec_count == 0);
5709 SELF_CHECK (aarch64_record.mem_rec_count == 0);
5710
5711 deallocate_reg_mem (&aarch64_record);
5712 }
5713
5714 } // namespace selftests
5715 #endif /* GDB_SELF_TEST */
5716
5717 /* Parse the current instruction and record the values of the registers and
5718 memory that will be changed in current instruction to record_arch_list
5719 return -1 if something is wrong. */
5720
5721 int
5722 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
5723 CORE_ADDR insn_addr)
5724 {
5725 uint32_t rec_no = 0;
5726 uint8_t insn_size = 4;
5727 uint32_t ret = 0;
5728 gdb_byte buf[insn_size];
5729 aarch64_insn_decode_record aarch64_record;
5730
5731 memset (&buf[0], 0, insn_size);
5732 memset (&aarch64_record, 0, sizeof (aarch64_insn_decode_record));
5733 target_read_memory (insn_addr, &buf[0], insn_size);
5734 aarch64_record.aarch64_insn
5735 = (uint32_t) extract_unsigned_integer (&buf[0],
5736 insn_size,
5737 gdbarch_byte_order (gdbarch));
5738 aarch64_record.regcache = regcache;
5739 aarch64_record.this_addr = insn_addr;
5740 aarch64_record.gdbarch = gdbarch;
5741
5742 ret = aarch64_record_decode_insn_handler (&aarch64_record);
5743 if (ret == AARCH64_RECORD_UNSUPPORTED)
5744 {
5745 gdb_printf (gdb_stderr,
5746 _("Process record does not support instruction "
5747 "0x%0x at address %s.\n"),
5748 aarch64_record.aarch64_insn,
5749 paddress (gdbarch, insn_addr));
5750 ret = -1;
5751 }
5752
5753 if (0 == ret)
5754 {
5755 /* Record registers. */
5756 record_full_arch_list_add_reg (aarch64_record.regcache,
5757 AARCH64_PC_REGNUM);
5758 /* Always record register CPSR. */
5759 record_full_arch_list_add_reg (aarch64_record.regcache,
5760 AARCH64_CPSR_REGNUM);
5761 if (aarch64_record.aarch64_regs)
5762 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
5763 if (record_full_arch_list_add_reg (aarch64_record.regcache,
5764 aarch64_record.aarch64_regs[rec_no]))
5765 ret = -1;
5766
5767 /* Record memories. */
5768 if (aarch64_record.aarch64_mems)
5769 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
5770 if (record_full_arch_list_add_mem
5771 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
5772 aarch64_record.aarch64_mems[rec_no].len))
5773 ret = -1;
5774
5775 if (record_full_arch_list_add_end ())
5776 ret = -1;
5777 }
5778
5779 deallocate_reg_mem (&aarch64_record);
5780 return ret;
5781 }