5a0f0e9f52e31e8a0823e544122f4e65c2541570
[binutils-gdb.git] / opcodes / aarch64-opc.c
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2023 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = false;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* The enumeration strings associated with each value of a 6-bit RPRFM
103 operation. */
104 const char *const aarch64_rprfmop_array[64] = {
105 "pldkeep",
106 "pstkeep",
107 0,
108 0,
109 "pldstrm",
110 "pststrm"
111 };
112
113 /* Vector length multiples for a predicate-as-counter operand. Used in things
114 like AARCH64_OPND_SME_VLxN_10. */
115 const char *const aarch64_sme_vlxn_array[2] = {
116 "vlx2",
117 "vlx4"
118 };
119
120 /* Helper functions to determine which operand to be used to encode/decode
121 the size:Q fields for AdvSIMD instructions. */
122
123 static inline bool
124 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
125 {
126 return (qualifier >= AARCH64_OPND_QLF_V_8B
127 && qualifier <= AARCH64_OPND_QLF_V_1Q);
128 }
129
130 static inline bool
131 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
132 {
133 return (qualifier >= AARCH64_OPND_QLF_S_B
134 && qualifier <= AARCH64_OPND_QLF_S_Q);
135 }
136
137 enum data_pattern
138 {
139 DP_UNKNOWN,
140 DP_VECTOR_3SAME,
141 DP_VECTOR_LONG,
142 DP_VECTOR_WIDE,
143 DP_VECTOR_ACROSS_LANES,
144 };
145
146 static const char significant_operand_index [] =
147 {
148 0, /* DP_UNKNOWN, by default using operand 0. */
149 0, /* DP_VECTOR_3SAME */
150 1, /* DP_VECTOR_LONG */
151 2, /* DP_VECTOR_WIDE */
152 1, /* DP_VECTOR_ACROSS_LANES */
153 };
154
155 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
156 the data pattern.
157 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
158 corresponds to one of a sequence of operands. */
159
160 static enum data_pattern
161 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
162 {
163 if (vector_qualifier_p (qualifiers[0]))
164 {
165 /* e.g. v.4s, v.4s, v.4s
166 or v.4h, v.4h, v.h[3]. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2])
169 && (aarch64_get_qualifier_esize (qualifiers[0])
170 == aarch64_get_qualifier_esize (qualifiers[1]))
171 && (aarch64_get_qualifier_esize (qualifiers[0])
172 == aarch64_get_qualifier_esize (qualifiers[2])))
173 return DP_VECTOR_3SAME;
174 /* e.g. v.8h, v.8b, v.8b.
175 or v.4s, v.4h, v.h[2].
176 or v.8h, v.16b. */
177 if (vector_qualifier_p (qualifiers[1])
178 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
179 && (aarch64_get_qualifier_esize (qualifiers[0])
180 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
181 return DP_VECTOR_LONG;
182 /* e.g. v.8h, v.8h, v.8b. */
183 if (qualifiers[0] == qualifiers[1]
184 && vector_qualifier_p (qualifiers[2])
185 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
186 && (aarch64_get_qualifier_esize (qualifiers[0])
187 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
188 && (aarch64_get_qualifier_esize (qualifiers[0])
189 == aarch64_get_qualifier_esize (qualifiers[1])))
190 return DP_VECTOR_WIDE;
191 }
192 else if (fp_qualifier_p (qualifiers[0]))
193 {
194 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
195 if (vector_qualifier_p (qualifiers[1])
196 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
197 return DP_VECTOR_ACROSS_LANES;
198 }
199
200 return DP_UNKNOWN;
201 }
202
203 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
204 the AdvSIMD instructions. */
205 /* N.B. it is possible to do some optimization that doesn't call
206 get_data_pattern each time when we need to select an operand. We can
207 either buffer the caculated the result or statically generate the data,
208 however, it is not obvious that the optimization will bring significant
209 benefit. */
210
211 int
212 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
213 {
214 return
215 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
216 }
217 \f
218 /* Instruction bit-fields.
219 + Keep synced with 'enum aarch64_field_kind'. */
220 const aarch64_field fields[] =
221 {
222 { 0, 0 }, /* NIL. */
223 { 8, 4 }, /* CRm: in the system instructions. */
224 { 10, 2 }, /* CRm_dsb_nxs: 2-bit imm. encoded in CRm<3:2>. */
225 { 12, 4 }, /* CRn: in the system instructions. */
226 { 10, 8 }, /* CSSC_imm8. */
227 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
228 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
229 { 0, 5 }, /* LSE128_Rt: Shared input+output operand register. */
230 { 16, 5 }, /* LSE128_Rt2: Shared input+output operand register 2. */
231 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
232 { 22, 1 }, /* N: in logical (immediate) instructions. */
233 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
234 { 10, 5 }, /* Ra: in fp instructions. */
235 { 0, 5 }, /* Rd: in many integer instructions. */
236 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
237 { 5, 5 }, /* Rn: in many integer instructions. */
238 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
239 { 0, 5 }, /* Rt: in load/store instructions. */
240 { 10, 5 }, /* Rt2: in load/store pair instructions. */
241 { 12, 1 }, /* S: in load/store reg offset instructions. */
242 { 12, 2 }, /* SM3_imm2: Indexed element SM3 2 bits index immediate. */
243 { 1, 3 }, /* SME_Pdx2: predicate register, multiple of 2, [3:1]. */
244 { 13, 3 }, /* SME_Pm: second source scalable predicate register P0-P7. */
245 { 0, 3 }, /* SME_PNd3: PN0-PN7, bits [2:0]. */
246 { 5, 3 }, /* SME_PNn3: PN0-PN7, bits [7:5]. */
247 { 16, 1 }, /* SME_Q: Q class bit, bit 16. */
248 { 16, 2 }, /* SME_Rm: index base register W12-W15 [17:16]. */
249 { 13, 2 }, /* SME_Rv: vector select register W12-W15, bits [14:13]. */
250 { 15, 1 }, /* SME_V: (horizontal / vertical tiles), bit 15. */
251 { 10, 1 }, /* SME_VL_10: VLx2 or VLx4, bit [10]. */
252 { 13, 1 }, /* SME_VL_13: VLx2 or VLx4, bit [13]. */
253 { 0, 2 }, /* SME_ZAda_2b: tile ZA0-ZA3. */
254 { 0, 3 }, /* SME_ZAda_3b: tile ZA0-ZA7. */
255 { 1, 4 }, /* SME_Zdn2: Z0-Z31, multiple of 2, bits [4:1]. */
256 { 2, 3 }, /* SME_Zdn4: Z0-Z31, multiple of 4, bits [4:2]. */
257 { 16, 4 }, /* SME_Zm: Z0-Z15, bits [19:16]. */
258 { 17, 4 }, /* SME_Zm2: Z0-Z31, multiple of 2, bits [20:17]. */
259 { 18, 3 }, /* SME_Zm4: Z0-Z31, multiple of 4, bits [20:18]. */
260 { 6, 4 }, /* SME_Zn2: Z0-Z31, multiple of 2, bits [9:6]. */
261 { 7, 3 }, /* SME_Zn4: Z0-Z31, multiple of 4, bits [9:7]. */
262 { 4, 1 }, /* SME_ZtT: upper bit of Zt, bit [4]. */
263 { 0, 3 }, /* SME_Zt3: lower 3 bits of Zt, bits [2:0]. */
264 { 0, 2 }, /* SME_Zt2: lower 2 bits of Zt, bits [1:0]. */
265 { 23, 1 }, /* SME_i1: immediate field, bit 23. */
266 { 12, 2 }, /* SME_size_12: bits [13:12]. */
267 { 22, 2 }, /* SME_size_22: size<1>, size<0> class field, [23:22]. */
268 { 23, 1 }, /* SME_sz_23: bit [23]. */
269 { 22, 1 }, /* SME_tszh: immediate and qualifier field, bit 22. */
270 { 18, 3 }, /* SME_tszl: immediate and qualifier field, bits [20:18]. */
271 { 0, 8 }, /* SME_zero_mask: list of up to 8 tile names separated by commas [7:0]. */
272 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
273 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
274 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
275 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
276 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
277 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
278 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
279 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
280 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
281 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
282 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
283 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
284 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
285 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
286 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
287 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
288 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
289 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
290 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
291 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
292 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
293 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
294 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
295 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
296 { 5, 1 }, /* SVE_i1: single-bit immediate. */
297 { 20, 1 }, /* SVE_i2h: high bit of 2bit immediate, bits. */
298 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
299 { 19, 2 }, /* SVE_i3h2: two high bits of 3bit immediate, bits [20,19]. */
300 { 11, 1 }, /* SVE_i3l: low bit of 3-bit immediate. */
301 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
302 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
303 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
304 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
305 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
306 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
307 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
308 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
309 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
310 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
311 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
312 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
313 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
314 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
315 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
316 { 10, 1 }, /* SVE_rot3: 1-bit rotation amount at bit 10. */
317 { 17, 2 }, /* SVE_size: 2-bit element size, bits [18,17]. */
318 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
319 { 30, 1 }, /* SVE_sz2: 1-bit element size select. */
320 { 16, 4 }, /* SVE_tsz: triangular size select. */
321 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
322 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
323 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
324 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
325 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
326 { 22, 1 }, /* S_imm10: in LDRAA and LDRAB instructions. */
327 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
328 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
329 { 19, 5 }, /* b40: in the test bit and branch instructions. */
330 { 31, 1 }, /* b5: in the test bit and branch instructions. */
331 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
332 { 12, 4 }, /* cond: condition flags as a source operand. */
333 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
334 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
335 { 21, 2 }, /* hw: in move wide constant instructions. */
336 { 0, 1 }, /* imm1_0: general immediate in bits [0]. */
337 { 2, 1 }, /* imm1_2: general immediate in bits [2]. */
338 { 8, 1 }, /* imm1_8: general immediate in bits [8]. */
339 { 10, 1 }, /* imm1_10: general immediate in bits [10]. */
340 { 15, 1 }, /* imm1_15: general immediate in bits [15]. */
341 { 16, 1 }, /* imm1_16: general immediate in bits [16]. */
342 { 0, 2 }, /* imm2_0: general immediate in bits [1:0]. */
343 { 1, 2 }, /* imm2_1: general immediate in bits [2:1]. */
344 { 8, 2 }, /* imm2_8: general immediate in bits [9:8]. */
345 { 10, 2 }, /* imm2_10: 2-bit immediate, bits [11:10] */
346 { 12, 2 }, /* imm2_12: 2-bit immediate, bits [13:12] */
347 { 15, 2 }, /* imm2_15: 2-bit immediate, bits [16:15] */
348 { 16, 2 }, /* imm2_16: 2-bit immediate, bits [17:16] */
349 { 19, 2 }, /* imm2_19: 2-bit immediate, bits [20:19] */
350 { 0, 3 }, /* imm3_0: general immediate in bits [2:0]. */
351 { 5, 3 }, /* imm3_5: general immediate in bits [7:5]. */
352 { 10, 3 }, /* imm3_10: in add/sub extended reg instructions. */
353 { 12, 3 }, /* imm3_12: general immediate in bits [14:12]. */
354 { 14, 3 }, /* imm3_14: general immediate in bits [16:14]. */
355 { 15, 3 }, /* imm3_15: general immediate in bits [17:15]. */
356 { 0, 4 }, /* imm4_0: in rmif instructions. */
357 { 5, 4 }, /* imm4_5: in SME instructions. */
358 { 10, 4 }, /* imm4_10: in adddg/subg instructions. */
359 { 11, 4 }, /* imm4_11: in advsimd ext and advsimd ins instructions. */
360 { 14, 4 }, /* imm4_14: general immediate in bits [17:14]. */
361 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
362 { 10, 6 }, /* imm6_10: in add/sub reg shifted instructions. */
363 { 15, 6 }, /* imm6_15: in rmif instructions. */
364 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
365 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
366 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
367 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
368 { 5, 14 }, /* imm14: in test bit and branch instructions. */
369 { 0, 16 }, /* imm16_0: in udf instruction. */
370 { 5, 16 }, /* imm16_5: in exception instructions. */
371 { 5, 19 }, /* imm19: e.g. in CBZ. */
372 { 0, 26 }, /* imm26: in unconditional branch instructions. */
373 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
374 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
375 { 5, 19 }, /* immhi: e.g. in ADRP. */
376 { 29, 2 }, /* immlo: e.g. in ADRP. */
377 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
378 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
379 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
380 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
381 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
382 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
383 { 30, 1 }, /* lse_sz: in LSE extension atomic instructions. */
384 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
385 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
386 { 19, 2 }, /* op0: in the system instructions. */
387 { 16, 3 }, /* op1: in the system instructions. */
388 { 5, 3 }, /* op2: in the system instructions. */
389 { 22, 2 }, /* opc: in load/store reg offset instructions. */
390 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
391 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
392 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
393 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
394 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
395 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
396 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
397 { 31, 1 }, /* sf: in integer data processing instructions. */
398 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
399 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
400 { 22, 1 }, /* sz: 1-bit element size select. */
401 { 22, 2 }, /* type: floating point type field in fp data inst. */
402 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
403 };
404
405 enum aarch64_operand_class
406 aarch64_get_operand_class (enum aarch64_opnd type)
407 {
408 return aarch64_operands[type].op_class;
409 }
410
411 const char *
412 aarch64_get_operand_name (enum aarch64_opnd type)
413 {
414 return aarch64_operands[type].name;
415 }
416
417 /* Get operand description string.
418 This is usually for the diagnosis purpose. */
419 const char *
420 aarch64_get_operand_desc (enum aarch64_opnd type)
421 {
422 return aarch64_operands[type].desc;
423 }
424
425 /* Table of all conditional affixes. */
426 const aarch64_cond aarch64_conds[16] =
427 {
428 {{"eq", "none"}, 0x0},
429 {{"ne", "any"}, 0x1},
430 {{"cs", "hs", "nlast"}, 0x2},
431 {{"cc", "lo", "ul", "last"}, 0x3},
432 {{"mi", "first"}, 0x4},
433 {{"pl", "nfrst"}, 0x5},
434 {{"vs"}, 0x6},
435 {{"vc"}, 0x7},
436 {{"hi", "pmore"}, 0x8},
437 {{"ls", "plast"}, 0x9},
438 {{"ge", "tcont"}, 0xa},
439 {{"lt", "tstop"}, 0xb},
440 {{"gt"}, 0xc},
441 {{"le"}, 0xd},
442 {{"al"}, 0xe},
443 {{"nv"}, 0xf},
444 };
445
446 const aarch64_cond *
447 get_cond_from_value (aarch64_insn value)
448 {
449 assert (value < 16);
450 return &aarch64_conds[(unsigned int) value];
451 }
452
453 const aarch64_cond *
454 get_inverted_cond (const aarch64_cond *cond)
455 {
456 return &aarch64_conds[cond->value ^ 0x1];
457 }
458
459 /* Table describing the operand extension/shifting operators; indexed by
460 enum aarch64_modifier_kind.
461
462 The value column provides the most common values for encoding modifiers,
463 which enables table-driven encoding/decoding for the modifiers. */
464 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
465 {
466 {"none", 0x0},
467 {"msl", 0x0},
468 {"ror", 0x3},
469 {"asr", 0x2},
470 {"lsr", 0x1},
471 {"lsl", 0x0},
472 {"uxtb", 0x0},
473 {"uxth", 0x1},
474 {"uxtw", 0x2},
475 {"uxtx", 0x3},
476 {"sxtb", 0x4},
477 {"sxth", 0x5},
478 {"sxtw", 0x6},
479 {"sxtx", 0x7},
480 {"mul", 0x0},
481 {"mul vl", 0x0},
482 {NULL, 0},
483 };
484
485 enum aarch64_modifier_kind
486 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
487 {
488 return desc - aarch64_operand_modifiers;
489 }
490
491 aarch64_insn
492 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
493 {
494 return aarch64_operand_modifiers[kind].value;
495 }
496
497 enum aarch64_modifier_kind
498 aarch64_get_operand_modifier_from_value (aarch64_insn value,
499 bool extend_p)
500 {
501 if (extend_p)
502 return AARCH64_MOD_UXTB + value;
503 else
504 return AARCH64_MOD_LSL - value;
505 }
506
507 bool
508 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
509 {
510 return kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX;
511 }
512
513 static inline bool
514 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
515 {
516 return kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL;
517 }
518
519 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
520 {
521 { "#0x00", 0x0 },
522 { "oshld", 0x1 },
523 { "oshst", 0x2 },
524 { "osh", 0x3 },
525 { "#0x04", 0x4 },
526 { "nshld", 0x5 },
527 { "nshst", 0x6 },
528 { "nsh", 0x7 },
529 { "#0x08", 0x8 },
530 { "ishld", 0x9 },
531 { "ishst", 0xa },
532 { "ish", 0xb },
533 { "#0x0c", 0xc },
534 { "ld", 0xd },
535 { "st", 0xe },
536 { "sy", 0xf },
537 };
538
539 const struct aarch64_name_value_pair aarch64_barrier_dsb_nxs_options[4] =
540 { /* CRm<3:2> #imm */
541 { "oshnxs", 16 }, /* 00 16 */
542 { "nshnxs", 20 }, /* 01 20 */
543 { "ishnxs", 24 }, /* 10 24 */
544 { "synxs", 28 }, /* 11 28 */
545 };
546
547 /* Table describing the operands supported by the aliases of the HINT
548 instruction.
549
550 The name column is the operand that is accepted for the alias. The value
551 column is the hint number of the alias. The list of operands is terminated
552 by NULL in the name column. */
553
554 const struct aarch64_name_value_pair aarch64_hint_options[] =
555 {
556 /* BTI. This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET. */
557 { " ", HINT_ENCODE (HINT_OPD_F_NOPRINT, 0x20) },
558 { "csync", HINT_OPD_CSYNC }, /* PSB CSYNC. */
559 { "dsync", HINT_OPD_DSYNC }, /* GCSB DSYNC. */
560 { "c", HINT_OPD_C }, /* BTI C. */
561 { "j", HINT_OPD_J }, /* BTI J. */
562 { "jc", HINT_OPD_JC }, /* BTI JC. */
563 { NULL, HINT_OPD_NULL },
564 };
565
566 /* op -> op: load = 0 instruction = 1 store = 2
567 l -> level: 1-3
568 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
569 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
570 const struct aarch64_name_value_pair aarch64_prfops[32] =
571 {
572 { "pldl1keep", B(0, 1, 0) },
573 { "pldl1strm", B(0, 1, 1) },
574 { "pldl2keep", B(0, 2, 0) },
575 { "pldl2strm", B(0, 2, 1) },
576 { "pldl3keep", B(0, 3, 0) },
577 { "pldl3strm", B(0, 3, 1) },
578 { NULL, 0x06 },
579 { NULL, 0x07 },
580 { "plil1keep", B(1, 1, 0) },
581 { "plil1strm", B(1, 1, 1) },
582 { "plil2keep", B(1, 2, 0) },
583 { "plil2strm", B(1, 2, 1) },
584 { "plil3keep", B(1, 3, 0) },
585 { "plil3strm", B(1, 3, 1) },
586 { NULL, 0x0e },
587 { NULL, 0x0f },
588 { "pstl1keep", B(2, 1, 0) },
589 { "pstl1strm", B(2, 1, 1) },
590 { "pstl2keep", B(2, 2, 0) },
591 { "pstl2strm", B(2, 2, 1) },
592 { "pstl3keep", B(2, 3, 0) },
593 { "pstl3strm", B(2, 3, 1) },
594 { NULL, 0x16 },
595 { NULL, 0x17 },
596 { NULL, 0x18 },
597 { NULL, 0x19 },
598 { NULL, 0x1a },
599 { NULL, 0x1b },
600 { NULL, 0x1c },
601 { NULL, 0x1d },
602 { NULL, 0x1e },
603 { NULL, 0x1f },
604 };
605 #undef B
606 \f
607 /* Utilities on value constraint. */
608
609 static inline int
610 value_in_range_p (int64_t value, int low, int high)
611 {
612 return (value >= low && value <= high) ? 1 : 0;
613 }
614
615 /* Return true if VALUE is a multiple of ALIGN. */
616 static inline int
617 value_aligned_p (int64_t value, int align)
618 {
619 return (value % align) == 0;
620 }
621
622 /* A signed value fits in a field. */
623 static inline int
624 value_fit_signed_field_p (int64_t value, unsigned width)
625 {
626 assert (width < 32);
627 if (width < sizeof (value) * 8)
628 {
629 int64_t lim = (uint64_t) 1 << (width - 1);
630 if (value >= -lim && value < lim)
631 return 1;
632 }
633 return 0;
634 }
635
636 /* An unsigned value fits in a field. */
637 static inline int
638 value_fit_unsigned_field_p (int64_t value, unsigned width)
639 {
640 assert (width < 32);
641 if (width < sizeof (value) * 8)
642 {
643 int64_t lim = (uint64_t) 1 << width;
644 if (value >= 0 && value < lim)
645 return 1;
646 }
647 return 0;
648 }
649
650 /* Return 1 if OPERAND is SP or WSP. */
651 int
652 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
653 {
654 return ((aarch64_get_operand_class (operand->type)
655 == AARCH64_OPND_CLASS_INT_REG)
656 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
657 && operand->reg.regno == 31);
658 }
659
660 /* Return 1 if OPERAND is XZR or WZP. */
661 int
662 aarch64_zero_register_p (const aarch64_opnd_info *operand)
663 {
664 return ((aarch64_get_operand_class (operand->type)
665 == AARCH64_OPND_CLASS_INT_REG)
666 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
667 && operand->reg.regno == 31);
668 }
669
670 /* Return true if the operand *OPERAND that has the operand code
671 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
672 qualified by the qualifier TARGET. */
673
674 static inline int
675 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
676 aarch64_opnd_qualifier_t target)
677 {
678 switch (operand->qualifier)
679 {
680 case AARCH64_OPND_QLF_W:
681 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
682 return 1;
683 break;
684 case AARCH64_OPND_QLF_X:
685 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
686 return 1;
687 break;
688 case AARCH64_OPND_QLF_WSP:
689 if (target == AARCH64_OPND_QLF_W
690 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
691 return 1;
692 break;
693 case AARCH64_OPND_QLF_SP:
694 if (target == AARCH64_OPND_QLF_X
695 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
696 return 1;
697 break;
698 default:
699 break;
700 }
701
702 return 0;
703 }
704
705 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
706 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
707
708 Return NIL if more than one expected qualifiers are found. */
709
710 aarch64_opnd_qualifier_t
711 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
712 int idx,
713 const aarch64_opnd_qualifier_t known_qlf,
714 int known_idx)
715 {
716 int i, saved_i;
717
718 /* Special case.
719
720 When the known qualifier is NIL, we have to assume that there is only
721 one qualifier sequence in the *QSEQ_LIST and return the corresponding
722 qualifier directly. One scenario is that for instruction
723 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
724 which has only one possible valid qualifier sequence
725 NIL, S_D
726 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
727 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
728
729 Because the qualifier NIL has dual roles in the qualifier sequence:
730 it can mean no qualifier for the operand, or the qualifer sequence is
731 not in use (when all qualifiers in the sequence are NILs), we have to
732 handle this special case here. */
733 if (known_qlf == AARCH64_OPND_NIL)
734 {
735 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
736 return qseq_list[0][idx];
737 }
738
739 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
740 {
741 if (qseq_list[i][known_idx] == known_qlf)
742 {
743 if (saved_i != -1)
744 /* More than one sequences are found to have KNOWN_QLF at
745 KNOWN_IDX. */
746 return AARCH64_OPND_NIL;
747 saved_i = i;
748 }
749 }
750
751 return qseq_list[saved_i][idx];
752 }
753
754 enum operand_qualifier_kind
755 {
756 OQK_NIL,
757 OQK_OPD_VARIANT,
758 OQK_VALUE_IN_RANGE,
759 OQK_MISC,
760 };
761
762 /* Operand qualifier description. */
763 struct operand_qualifier_data
764 {
765 /* The usage of the three data fields depends on the qualifier kind. */
766 int data0;
767 int data1;
768 int data2;
769 /* Description. */
770 const char *desc;
771 /* Kind. */
772 enum operand_qualifier_kind kind;
773 };
774
775 /* Indexed by the operand qualifier enumerators. */
776 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
777 {
778 {0, 0, 0, "NIL", OQK_NIL},
779
780 /* Operand variant qualifiers.
781 First 3 fields:
782 element size, number of elements and common value for encoding. */
783
784 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
785 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
786 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
787 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
788
789 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
790 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
791 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
792 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
793 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
794 {4, 1, 0x0, "4b", OQK_OPD_VARIANT},
795 {4, 1, 0x0, "2h", OQK_OPD_VARIANT},
796
797 {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
798 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
799 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
800 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
801 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
802 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
803 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
804 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
805 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
806 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
807 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
808
809 {0, 0, 0, "z", OQK_OPD_VARIANT},
810 {0, 0, 0, "m", OQK_OPD_VARIANT},
811
812 /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc). */
813 {16, 0, 0, "tag", OQK_OPD_VARIANT},
814
815 /* Qualifiers constraining the value range.
816 First 3 fields:
817 Lower bound, higher bound, unused. */
818
819 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE},
820 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
821 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
822 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
823 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
824 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
825 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
826
827 /* Qualifiers for miscellaneous purpose.
828 First 3 fields:
829 unused, unused and unused. */
830
831 {0, 0, 0, "lsl", 0},
832 {0, 0, 0, "msl", 0},
833
834 {0, 0, 0, "retrieving", 0},
835 };
836
837 static inline bool
838 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
839 {
840 return aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT;
841 }
842
843 static inline bool
844 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
845 {
846 return aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE;
847 }
848
849 const char*
850 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
851 {
852 return aarch64_opnd_qualifiers[qualifier].desc;
853 }
854
855 /* Given an operand qualifier, return the expected data element size
856 of a qualified operand. */
857 unsigned char
858 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
859 {
860 assert (operand_variant_qualifier_p (qualifier));
861 return aarch64_opnd_qualifiers[qualifier].data0;
862 }
863
864 unsigned char
865 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
866 {
867 assert (operand_variant_qualifier_p (qualifier));
868 return aarch64_opnd_qualifiers[qualifier].data1;
869 }
870
871 aarch64_insn
872 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
873 {
874 assert (operand_variant_qualifier_p (qualifier));
875 return aarch64_opnd_qualifiers[qualifier].data2;
876 }
877
878 static int
879 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
880 {
881 assert (qualifier_value_in_range_constraint_p (qualifier));
882 return aarch64_opnd_qualifiers[qualifier].data0;
883 }
884
885 static int
886 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
887 {
888 assert (qualifier_value_in_range_constraint_p (qualifier));
889 return aarch64_opnd_qualifiers[qualifier].data1;
890 }
891
892 #ifdef DEBUG_AARCH64
893 void
894 aarch64_verbose (const char *str, ...)
895 {
896 va_list ap;
897 va_start (ap, str);
898 printf ("#### ");
899 vprintf (str, ap);
900 printf ("\n");
901 va_end (ap);
902 }
903
904 static inline void
905 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
906 {
907 int i;
908 printf ("#### \t");
909 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
910 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
911 printf ("\n");
912 }
913
914 static void
915 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
916 const aarch64_opnd_qualifier_t *qualifier)
917 {
918 int i;
919 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
920
921 aarch64_verbose ("dump_match_qualifiers:");
922 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
923 curr[i] = opnd[i].qualifier;
924 dump_qualifier_sequence (curr);
925 aarch64_verbose ("against");
926 dump_qualifier_sequence (qualifier);
927 }
928 #endif /* DEBUG_AARCH64 */
929
930 /* This function checks if the given instruction INSN is a destructive
931 instruction based on the usage of the registers. It does not recognize
932 unary destructive instructions. */
933 bool
934 aarch64_is_destructive_by_operands (const aarch64_opcode *opcode)
935 {
936 int i = 0;
937 const enum aarch64_opnd *opnds = opcode->operands;
938
939 if (opnds[0] == AARCH64_OPND_NIL)
940 return false;
941
942 while (opnds[++i] != AARCH64_OPND_NIL)
943 if (opnds[i] == opnds[0])
944 return true;
945
946 return false;
947 }
948
949 /* TODO improve this, we can have an extra field at the runtime to
950 store the number of operands rather than calculating it every time. */
951
952 int
953 aarch64_num_of_operands (const aarch64_opcode *opcode)
954 {
955 int i = 0;
956 const enum aarch64_opnd *opnds = opcode->operands;
957 while (opnds[i++] != AARCH64_OPND_NIL)
958 ;
959 --i;
960 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
961 return i;
962 }
963
964 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
965 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
966
967 Store the smallest number of non-matching qualifiers in *INVALID_COUNT.
968 This is always 0 if the function succeeds.
969
970 N.B. on the entry, it is very likely that only some operands in *INST
971 have had their qualifiers been established.
972
973 If STOP_AT is not -1, the function will only try to match
974 the qualifier sequence for operands before and including the operand
975 of index STOP_AT; and on success *RET will only be filled with the first
976 (STOP_AT+1) qualifiers.
977
978 A couple examples of the matching algorithm:
979
980 X,W,NIL should match
981 X,W,NIL
982
983 NIL,NIL should match
984 X ,NIL
985
986 Apart from serving the main encoding routine, this can also be called
987 during or after the operand decoding. */
988
989 int
990 aarch64_find_best_match (const aarch64_inst *inst,
991 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
992 int stop_at, aarch64_opnd_qualifier_t *ret,
993 int *invalid_count)
994 {
995 int i, num_opnds, invalid, min_invalid;
996 const aarch64_opnd_qualifier_t *qualifiers;
997
998 num_opnds = aarch64_num_of_operands (inst->opcode);
999 if (num_opnds == 0)
1000 {
1001 DEBUG_TRACE ("SUCCEED: no operand");
1002 *invalid_count = 0;
1003 return 1;
1004 }
1005
1006 if (stop_at < 0 || stop_at >= num_opnds)
1007 stop_at = num_opnds - 1;
1008
1009 /* For each pattern. */
1010 min_invalid = num_opnds;
1011 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
1012 {
1013 int j;
1014 qualifiers = *qualifiers_list;
1015
1016 /* Start as positive. */
1017 invalid = 0;
1018
1019 DEBUG_TRACE ("%d", i);
1020 #ifdef DEBUG_AARCH64
1021 if (debug_dump)
1022 dump_match_qualifiers (inst->operands, qualifiers);
1023 #endif
1024
1025 /* The first entry should be taken literally, even if it's an empty
1026 qualifier sequence. (This matters for strict testing.) In other
1027 positions an empty sequence acts as a terminator. */
1028 if (i > 0 && empty_qualifier_sequence_p (qualifiers))
1029 break;
1030
1031 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
1032 {
1033 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL
1034 && !(inst->opcode->flags & F_STRICT))
1035 {
1036 /* Either the operand does not have qualifier, or the qualifier
1037 for the operand needs to be deduced from the qualifier
1038 sequence.
1039 In the latter case, any constraint checking related with
1040 the obtained qualifier should be done later in
1041 operand_general_constraint_met_p. */
1042 continue;
1043 }
1044 else if (*qualifiers != inst->operands[j].qualifier)
1045 {
1046 /* Unless the target qualifier can also qualify the operand
1047 (which has already had a non-nil qualifier), non-equal
1048 qualifiers are generally un-matched. */
1049 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
1050 continue;
1051 else
1052 invalid += 1;
1053 }
1054 else
1055 continue; /* Equal qualifiers are certainly matched. */
1056 }
1057
1058 if (min_invalid > invalid)
1059 min_invalid = invalid;
1060
1061 /* Qualifiers established. */
1062 if (min_invalid == 0)
1063 break;
1064 }
1065
1066 *invalid_count = min_invalid;
1067 if (min_invalid == 0)
1068 {
1069 /* Fill the result in *RET. */
1070 int j;
1071 qualifiers = *qualifiers_list;
1072
1073 DEBUG_TRACE ("complete qualifiers using list %d", i);
1074 #ifdef DEBUG_AARCH64
1075 if (debug_dump)
1076 dump_qualifier_sequence (qualifiers);
1077 #endif
1078
1079 for (j = 0; j <= stop_at; ++j, ++qualifiers)
1080 ret[j] = *qualifiers;
1081 for (; j < AARCH64_MAX_OPND_NUM; ++j)
1082 ret[j] = AARCH64_OPND_QLF_NIL;
1083
1084 DEBUG_TRACE ("SUCCESS");
1085 return 1;
1086 }
1087
1088 DEBUG_TRACE ("FAIL");
1089 return 0;
1090 }
1091
1092 /* Operand qualifier matching and resolving.
1093
1094 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
1095 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
1096
1097 Store the smallest number of non-matching qualifiers in *INVALID_COUNT.
1098 This is always 0 if the function succeeds.
1099
1100 if UPDATE_P, update the qualifier(s) in *INST after the matching
1101 succeeds. */
1102
1103 static int
1104 match_operands_qualifier (aarch64_inst *inst, bool update_p,
1105 int *invalid_count)
1106 {
1107 int i;
1108 aarch64_opnd_qualifier_seq_t qualifiers;
1109
1110 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
1111 qualifiers, invalid_count))
1112 {
1113 DEBUG_TRACE ("matching FAIL");
1114 return 0;
1115 }
1116
1117 /* Update the qualifiers. */
1118 if (update_p)
1119 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1120 {
1121 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1122 break;
1123 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1124 "update %s with %s for operand %d",
1125 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1126 aarch64_get_qualifier_name (qualifiers[i]), i);
1127 inst->operands[i].qualifier = qualifiers[i];
1128 }
1129
1130 DEBUG_TRACE ("matching SUCCESS");
1131 return 1;
1132 }
1133
1134 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1135 register by MOVZ.
1136
1137 IS32 indicates whether value is a 32-bit immediate or not.
1138 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1139 amount will be returned in *SHIFT_AMOUNT. */
1140
1141 bool
1142 aarch64_wide_constant_p (uint64_t value, int is32, unsigned int *shift_amount)
1143 {
1144 int amount;
1145
1146 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1147
1148 if (is32)
1149 {
1150 /* Allow all zeros or all ones in top 32-bits, so that
1151 32-bit constant expressions like ~0x80000000 are
1152 permitted. */
1153 if (value >> 32 != 0 && value >> 32 != 0xffffffff)
1154 /* Immediate out of range. */
1155 return false;
1156 value &= 0xffffffff;
1157 }
1158
1159 /* first, try movz then movn */
1160 amount = -1;
1161 if ((value & ((uint64_t) 0xffff << 0)) == value)
1162 amount = 0;
1163 else if ((value & ((uint64_t) 0xffff << 16)) == value)
1164 amount = 16;
1165 else if (!is32 && (value & ((uint64_t) 0xffff << 32)) == value)
1166 amount = 32;
1167 else if (!is32 && (value & ((uint64_t) 0xffff << 48)) == value)
1168 amount = 48;
1169
1170 if (amount == -1)
1171 {
1172 DEBUG_TRACE ("exit false with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1173 return false;
1174 }
1175
1176 if (shift_amount != NULL)
1177 *shift_amount = amount;
1178
1179 DEBUG_TRACE ("exit true with amount %d", amount);
1180
1181 return true;
1182 }
1183
1184 /* Build the accepted values for immediate logical SIMD instructions.
1185
1186 The standard encodings of the immediate value are:
1187 N imms immr SIMD size R S
1188 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1189 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1190 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1191 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1192 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1193 0 11110s 00000r 2 UInt(r) UInt(s)
1194 where all-ones value of S is reserved.
1195
1196 Let's call E the SIMD size.
1197
1198 The immediate value is: S+1 bits '1' rotated to the right by R.
1199
1200 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1201 (remember S != E - 1). */
1202
1203 #define TOTAL_IMM_NB 5334
1204
1205 typedef struct
1206 {
1207 uint64_t imm;
1208 aarch64_insn encoding;
1209 } simd_imm_encoding;
1210
1211 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1212
1213 static int
1214 simd_imm_encoding_cmp(const void *i1, const void *i2)
1215 {
1216 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1217 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1218
1219 if (imm1->imm < imm2->imm)
1220 return -1;
1221 if (imm1->imm > imm2->imm)
1222 return +1;
1223 return 0;
1224 }
1225
1226 /* immediate bitfield standard encoding
1227 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1228 1 ssssss rrrrrr 64 rrrrrr ssssss
1229 0 0sssss 0rrrrr 32 rrrrr sssss
1230 0 10ssss 00rrrr 16 rrrr ssss
1231 0 110sss 000rrr 8 rrr sss
1232 0 1110ss 0000rr 4 rr ss
1233 0 11110s 00000r 2 r s */
1234 static inline int
1235 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1236 {
1237 return (is64 << 12) | (r << 6) | s;
1238 }
1239
1240 static void
1241 build_immediate_table (void)
1242 {
1243 uint32_t log_e, e, s, r, s_mask;
1244 uint64_t mask, imm;
1245 int nb_imms;
1246 int is64;
1247
1248 nb_imms = 0;
1249 for (log_e = 1; log_e <= 6; log_e++)
1250 {
1251 /* Get element size. */
1252 e = 1u << log_e;
1253 if (log_e == 6)
1254 {
1255 is64 = 1;
1256 mask = 0xffffffffffffffffull;
1257 s_mask = 0;
1258 }
1259 else
1260 {
1261 is64 = 0;
1262 mask = (1ull << e) - 1;
1263 /* log_e s_mask
1264 1 ((1 << 4) - 1) << 2 = 111100
1265 2 ((1 << 3) - 1) << 3 = 111000
1266 3 ((1 << 2) - 1) << 4 = 110000
1267 4 ((1 << 1) - 1) << 5 = 100000
1268 5 ((1 << 0) - 1) << 6 = 000000 */
1269 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1270 }
1271 for (s = 0; s < e - 1; s++)
1272 for (r = 0; r < e; r++)
1273 {
1274 /* s+1 consecutive bits to 1 (s < 63) */
1275 imm = (1ull << (s + 1)) - 1;
1276 /* rotate right by r */
1277 if (r != 0)
1278 imm = (imm >> r) | ((imm << (e - r)) & mask);
1279 /* replicate the constant depending on SIMD size */
1280 switch (log_e)
1281 {
1282 case 1: imm = (imm << 2) | imm;
1283 /* Fall through. */
1284 case 2: imm = (imm << 4) | imm;
1285 /* Fall through. */
1286 case 3: imm = (imm << 8) | imm;
1287 /* Fall through. */
1288 case 4: imm = (imm << 16) | imm;
1289 /* Fall through. */
1290 case 5: imm = (imm << 32) | imm;
1291 /* Fall through. */
1292 case 6: break;
1293 default: abort ();
1294 }
1295 simd_immediates[nb_imms].imm = imm;
1296 simd_immediates[nb_imms].encoding =
1297 encode_immediate_bitfield(is64, s | s_mask, r);
1298 nb_imms++;
1299 }
1300 }
1301 assert (nb_imms == TOTAL_IMM_NB);
1302 qsort(simd_immediates, nb_imms,
1303 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1304 }
1305
1306 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1307 be accepted by logical (immediate) instructions
1308 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1309
1310 ESIZE is the number of bytes in the decoded immediate value.
1311 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1312 VALUE will be returned in *ENCODING. */
1313
1314 bool
1315 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1316 {
1317 simd_imm_encoding imm_enc;
1318 const simd_imm_encoding *imm_encoding;
1319 static bool initialized = false;
1320 uint64_t upper;
1321 int i;
1322
1323 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
1324 value, esize);
1325
1326 if (!initialized)
1327 {
1328 build_immediate_table ();
1329 initialized = true;
1330 }
1331
1332 /* Allow all zeros or all ones in top bits, so that
1333 constant expressions like ~1 are permitted. */
1334 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1335 if ((value & ~upper) != value && (value | upper) != value)
1336 return false;
1337
1338 /* Replicate to a full 64-bit value. */
1339 value &= ~upper;
1340 for (i = esize * 8; i < 64; i *= 2)
1341 value |= (value << i);
1342
1343 imm_enc.imm = value;
1344 imm_encoding = (const simd_imm_encoding *)
1345 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1346 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1347 if (imm_encoding == NULL)
1348 {
1349 DEBUG_TRACE ("exit with false");
1350 return false;
1351 }
1352 if (encoding != NULL)
1353 *encoding = imm_encoding->encoding;
1354 DEBUG_TRACE ("exit with true");
1355 return true;
1356 }
1357
1358 /* If 64-bit immediate IMM is in the format of
1359 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1360 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1361 of value "abcdefgh". Otherwise return -1. */
1362 int
1363 aarch64_shrink_expanded_imm8 (uint64_t imm)
1364 {
1365 int i, ret;
1366 uint32_t byte;
1367
1368 ret = 0;
1369 for (i = 0; i < 8; i++)
1370 {
1371 byte = (imm >> (8 * i)) & 0xff;
1372 if (byte == 0xff)
1373 ret |= 1 << i;
1374 else if (byte != 0x00)
1375 return -1;
1376 }
1377 return ret;
1378 }
1379
1380 /* Utility inline functions for operand_general_constraint_met_p. */
1381
1382 static inline void
1383 set_error (aarch64_operand_error *mismatch_detail,
1384 enum aarch64_operand_error_kind kind, int idx,
1385 const char* error)
1386 {
1387 if (mismatch_detail == NULL)
1388 return;
1389 mismatch_detail->kind = kind;
1390 mismatch_detail->index = idx;
1391 mismatch_detail->error = error;
1392 }
1393
1394 static inline void
1395 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1396 const char* error)
1397 {
1398 if (mismatch_detail == NULL)
1399 return;
1400 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1401 }
1402
1403 static inline void
1404 set_invalid_regno_error (aarch64_operand_error *mismatch_detail, int idx,
1405 const char *prefix, int lower_bound, int upper_bound)
1406 {
1407 if (mismatch_detail == NULL)
1408 return;
1409 set_error (mismatch_detail, AARCH64_OPDE_INVALID_REGNO, idx, NULL);
1410 mismatch_detail->data[0].s = prefix;
1411 mismatch_detail->data[1].i = lower_bound;
1412 mismatch_detail->data[2].i = upper_bound;
1413 }
1414
1415 static inline void
1416 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1417 int idx, int lower_bound, int upper_bound,
1418 const char* error)
1419 {
1420 if (mismatch_detail == NULL)
1421 return;
1422 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1423 mismatch_detail->data[0].i = lower_bound;
1424 mismatch_detail->data[1].i = upper_bound;
1425 }
1426
1427 static inline void
1428 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1429 int idx, int lower_bound, int upper_bound)
1430 {
1431 if (mismatch_detail == NULL)
1432 return;
1433 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1434 _("immediate value"));
1435 }
1436
1437 static inline void
1438 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1439 int idx, int lower_bound, int upper_bound)
1440 {
1441 if (mismatch_detail == NULL)
1442 return;
1443 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1444 _("immediate offset"));
1445 }
1446
1447 static inline void
1448 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1449 int idx, int lower_bound, int upper_bound)
1450 {
1451 if (mismatch_detail == NULL)
1452 return;
1453 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1454 _("register number"));
1455 }
1456
1457 static inline void
1458 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1459 int idx, int lower_bound, int upper_bound)
1460 {
1461 if (mismatch_detail == NULL)
1462 return;
1463 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1464 _("register element index"));
1465 }
1466
1467 static inline void
1468 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1469 int idx, int lower_bound, int upper_bound)
1470 {
1471 if (mismatch_detail == NULL)
1472 return;
1473 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1474 _("shift amount"));
1475 }
1476
1477 /* Report that the MUL modifier in operand IDX should be in the range
1478 [LOWER_BOUND, UPPER_BOUND]. */
1479 static inline void
1480 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1481 int idx, int lower_bound, int upper_bound)
1482 {
1483 if (mismatch_detail == NULL)
1484 return;
1485 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1486 _("multiplier"));
1487 }
1488
1489 static inline void
1490 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1491 int alignment)
1492 {
1493 if (mismatch_detail == NULL)
1494 return;
1495 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1496 mismatch_detail->data[0].i = alignment;
1497 }
1498
1499 static inline void
1500 set_reg_list_length_error (aarch64_operand_error *mismatch_detail, int idx,
1501 int expected_num)
1502 {
1503 if (mismatch_detail == NULL)
1504 return;
1505 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST_LENGTH, idx, NULL);
1506 mismatch_detail->data[0].i = 1 << expected_num;
1507 }
1508
1509 static inline void
1510 set_reg_list_stride_error (aarch64_operand_error *mismatch_detail, int idx,
1511 int expected_num)
1512 {
1513 if (mismatch_detail == NULL)
1514 return;
1515 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST_STRIDE, idx, NULL);
1516 mismatch_detail->data[0].i = 1 << expected_num;
1517 }
1518
1519 static inline void
1520 set_invalid_vg_size (aarch64_operand_error *mismatch_detail,
1521 int idx, int expected)
1522 {
1523 if (mismatch_detail == NULL)
1524 return;
1525 set_error (mismatch_detail, AARCH64_OPDE_INVALID_VG_SIZE, idx, NULL);
1526 mismatch_detail->data[0].i = expected;
1527 }
1528
1529 static inline void
1530 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1531 const char* error)
1532 {
1533 if (mismatch_detail == NULL)
1534 return;
1535 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1536 }
1537
1538 /* Check that indexed register operand OPND has a register in the range
1539 [MIN_REGNO, MAX_REGNO] and an index in the range [MIN_INDEX, MAX_INDEX].
1540 PREFIX is the register prefix, such as "z" for SVE vector registers. */
1541
1542 static bool
1543 check_reglane (const aarch64_opnd_info *opnd,
1544 aarch64_operand_error *mismatch_detail, int idx,
1545 const char *prefix, int min_regno, int max_regno,
1546 int min_index, int max_index)
1547 {
1548 if (!value_in_range_p (opnd->reglane.regno, min_regno, max_regno))
1549 {
1550 set_invalid_regno_error (mismatch_detail, idx, prefix, min_regno,
1551 max_regno);
1552 return false;
1553 }
1554 if (!value_in_range_p (opnd->reglane.index, min_index, max_index))
1555 {
1556 set_elem_idx_out_of_range_error (mismatch_detail, idx, min_index,
1557 max_index);
1558 return false;
1559 }
1560 return true;
1561 }
1562
1563 /* Check that register list operand OPND has NUM_REGS registers and a
1564 register stride of STRIDE. */
1565
1566 static bool
1567 check_reglist (const aarch64_opnd_info *opnd,
1568 aarch64_operand_error *mismatch_detail, int idx,
1569 int num_regs, int stride)
1570 {
1571 if (opnd->reglist.num_regs != num_regs)
1572 {
1573 set_reg_list_length_error (mismatch_detail, idx, num_regs);
1574 return false;
1575 }
1576 if (opnd->reglist.stride != stride)
1577 {
1578 set_reg_list_stride_error (mismatch_detail, idx, stride);
1579 return false;
1580 }
1581 return true;
1582 }
1583
1584 /* Check that indexed ZA operand OPND has:
1585
1586 - a selection register in the range [MIN_WREG, MIN_WREG + 3]
1587
1588 - RANGE_SIZE consecutive immediate offsets.
1589
1590 - an initial immediate offset that is a multiple of RANGE_SIZE
1591 in the range [0, MAX_VALUE * RANGE_SIZE]
1592
1593 - a vector group size of GROUP_SIZE. */
1594
1595 static bool
1596 check_za_access (const aarch64_opnd_info *opnd,
1597 aarch64_operand_error *mismatch_detail, int idx,
1598 int min_wreg, int max_value, unsigned int range_size,
1599 int group_size)
1600 {
1601 if (!value_in_range_p (opnd->indexed_za.index.regno, min_wreg, min_wreg + 3))
1602 {
1603 if (min_wreg == 12)
1604 set_other_error (mismatch_detail, idx,
1605 _("expected a selection register in the"
1606 " range w12-w15"));
1607 else if (min_wreg == 8)
1608 set_other_error (mismatch_detail, idx,
1609 _("expected a selection register in the"
1610 " range w8-w11"));
1611 else
1612 abort ();
1613 return false;
1614 }
1615
1616 int max_index = max_value * range_size;
1617 if (!value_in_range_p (opnd->indexed_za.index.imm, 0, max_index))
1618 {
1619 set_offset_out_of_range_error (mismatch_detail, idx, 0, max_index);
1620 return false;
1621 }
1622
1623 if ((opnd->indexed_za.index.imm % range_size) != 0)
1624 {
1625 assert (range_size == 2 || range_size == 4);
1626 set_other_error (mismatch_detail, idx,
1627 range_size == 2
1628 ? _("starting offset is not a multiple of 2")
1629 : _("starting offset is not a multiple of 4"));
1630 return false;
1631 }
1632
1633 if (opnd->indexed_za.index.countm1 != range_size - 1)
1634 {
1635 if (range_size == 1)
1636 set_other_error (mismatch_detail, idx,
1637 _("expected a single offset rather than"
1638 " a range"));
1639 else if (range_size == 2)
1640 set_other_error (mismatch_detail, idx,
1641 _("expected a range of two offsets"));
1642 else if (range_size == 4)
1643 set_other_error (mismatch_detail, idx,
1644 _("expected a range of four offsets"));
1645 else
1646 abort ();
1647 return false;
1648 }
1649
1650 /* The vector group specifier is optional in assembly code. */
1651 if (opnd->indexed_za.group_size != 0
1652 && opnd->indexed_za.group_size != group_size)
1653 {
1654 set_invalid_vg_size (mismatch_detail, idx, group_size);
1655 return false;
1656 }
1657
1658 return true;
1659 }
1660
1661 /* General constraint checking based on operand code.
1662
1663 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1664 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1665
1666 This function has to be called after the qualifiers for all operands
1667 have been resolved.
1668
1669 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1670 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1671 of error message during the disassembling where error message is not
1672 wanted. We avoid the dynamic construction of strings of error messages
1673 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1674 use a combination of error code, static string and some integer data to
1675 represent an error. */
1676
1677 static int
1678 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1679 enum aarch64_opnd type,
1680 const aarch64_opcode *opcode,
1681 aarch64_operand_error *mismatch_detail)
1682 {
1683 unsigned num, modifiers, shift;
1684 unsigned char size;
1685 int64_t imm, min_value, max_value;
1686 uint64_t uvalue, mask;
1687 const aarch64_opnd_info *opnd = opnds + idx;
1688 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1689 int i;
1690
1691 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1692
1693 switch (aarch64_operands[type].op_class)
1694 {
1695 case AARCH64_OPND_CLASS_INT_REG:
1696 /* Check pair reg constraints for cas* instructions. */
1697 if (type == AARCH64_OPND_PAIRREG)
1698 {
1699 assert (idx == 1 || idx == 3);
1700 if (opnds[idx - 1].reg.regno % 2 != 0)
1701 {
1702 set_syntax_error (mismatch_detail, idx - 1,
1703 _("reg pair must start from even reg"));
1704 return 0;
1705 }
1706 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1707 {
1708 set_syntax_error (mismatch_detail, idx,
1709 _("reg pair must be contiguous"));
1710 return 0;
1711 }
1712 break;
1713 }
1714
1715 /* <Xt> may be optional in some IC and TLBI instructions. */
1716 if (type == AARCH64_OPND_Rt_SYS)
1717 {
1718 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1719 == AARCH64_OPND_CLASS_SYSTEM));
1720 if (opnds[1].present
1721 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1722 {
1723 set_other_error (mismatch_detail, idx, _("extraneous register"));
1724 return 0;
1725 }
1726 if (!opnds[1].present
1727 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1728 {
1729 set_other_error (mismatch_detail, idx, _("missing register"));
1730 return 0;
1731 }
1732 }
1733 switch (qualifier)
1734 {
1735 case AARCH64_OPND_QLF_WSP:
1736 case AARCH64_OPND_QLF_SP:
1737 if (!aarch64_stack_pointer_p (opnd))
1738 {
1739 set_other_error (mismatch_detail, idx,
1740 _("stack pointer register expected"));
1741 return 0;
1742 }
1743 break;
1744 default:
1745 break;
1746 }
1747 break;
1748
1749 case AARCH64_OPND_CLASS_SVE_REG:
1750 switch (type)
1751 {
1752 case AARCH64_OPND_SVE_Zm3_INDEX:
1753 case AARCH64_OPND_SVE_Zm3_22_INDEX:
1754 case AARCH64_OPND_SVE_Zm3_19_INDEX:
1755 case AARCH64_OPND_SVE_Zm3_11_INDEX:
1756 case AARCH64_OPND_SVE_Zm4_11_INDEX:
1757 case AARCH64_OPND_SVE_Zm4_INDEX:
1758 size = get_operand_fields_width (get_operand_from_code (type));
1759 shift = get_operand_specific_data (&aarch64_operands[type]);
1760 if (!check_reglane (opnd, mismatch_detail, idx,
1761 "z", 0, (1 << shift) - 1,
1762 0, (1u << (size - shift)) - 1))
1763 return 0;
1764 break;
1765
1766 case AARCH64_OPND_SVE_Zn_INDEX:
1767 size = aarch64_get_qualifier_esize (opnd->qualifier);
1768 if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31,
1769 0, 64 / size - 1))
1770 return 0;
1771 break;
1772
1773 case AARCH64_OPND_SME_PNn3_INDEX1:
1774 case AARCH64_OPND_SME_PNn3_INDEX2:
1775 size = get_operand_field_width (get_operand_from_code (type), 1);
1776 if (!check_reglane (opnd, mismatch_detail, idx, "pn", 8, 15,
1777 0, (1 << size) - 1))
1778 return 0;
1779 break;
1780
1781 case AARCH64_OPND_SME_Zn_INDEX1_16:
1782 case AARCH64_OPND_SME_Zn_INDEX2_15:
1783 case AARCH64_OPND_SME_Zn_INDEX2_16:
1784 case AARCH64_OPND_SME_Zn_INDEX3_14:
1785 case AARCH64_OPND_SME_Zn_INDEX3_15:
1786 case AARCH64_OPND_SME_Zn_INDEX4_14:
1787 size = get_operand_fields_width (get_operand_from_code (type)) - 5;
1788 if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31,
1789 0, (1 << size) - 1))
1790 return 0;
1791 break;
1792
1793 case AARCH64_OPND_SME_Zm_INDEX1:
1794 case AARCH64_OPND_SME_Zm_INDEX2:
1795 case AARCH64_OPND_SME_Zm_INDEX3_1:
1796 case AARCH64_OPND_SME_Zm_INDEX3_2:
1797 case AARCH64_OPND_SME_Zm_INDEX3_10:
1798 case AARCH64_OPND_SME_Zm_INDEX4_1:
1799 case AARCH64_OPND_SME_Zm_INDEX4_10:
1800 size = get_operand_fields_width (get_operand_from_code (type)) - 4;
1801 if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 15,
1802 0, (1 << size) - 1))
1803 return 0;
1804 break;
1805
1806 case AARCH64_OPND_SME_Zm:
1807 if (opnd->reg.regno > 15)
1808 {
1809 set_invalid_regno_error (mismatch_detail, idx, "z", 0, 15);
1810 return 0;
1811 }
1812 break;
1813
1814 case AARCH64_OPND_SME_PnT_Wm_imm:
1815 size = aarch64_get_qualifier_esize (opnd->qualifier);
1816 max_value = 16 / size - 1;
1817 if (!check_za_access (opnd, mismatch_detail, idx,
1818 12, max_value, 1, 0))
1819 return 0;
1820 break;
1821
1822 default:
1823 break;
1824 }
1825 break;
1826
1827 case AARCH64_OPND_CLASS_SVE_REGLIST:
1828 switch (type)
1829 {
1830 case AARCH64_OPND_SME_Pdx2:
1831 case AARCH64_OPND_SME_Zdnx2:
1832 case AARCH64_OPND_SME_Zdnx4:
1833 case AARCH64_OPND_SME_Zmx2:
1834 case AARCH64_OPND_SME_Zmx4:
1835 case AARCH64_OPND_SME_Znx2:
1836 case AARCH64_OPND_SME_Znx4:
1837 num = get_operand_specific_data (&aarch64_operands[type]);
1838 if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
1839 return 0;
1840 if ((opnd->reglist.first_regno % num) != 0)
1841 {
1842 set_other_error (mismatch_detail, idx,
1843 _("start register out of range"));
1844 return 0;
1845 }
1846 break;
1847
1848 case AARCH64_OPND_SME_Ztx2_STRIDED:
1849 case AARCH64_OPND_SME_Ztx4_STRIDED:
1850 /* 2-register lists have a stride of 8 and 4-register lists
1851 have a stride of 4. */
1852 num = get_operand_specific_data (&aarch64_operands[type]);
1853 if (!check_reglist (opnd, mismatch_detail, idx, num, 16 / num))
1854 return 0;
1855 num = 16 | (opnd->reglist.stride - 1);
1856 if ((opnd->reglist.first_regno & ~num) != 0)
1857 {
1858 set_other_error (mismatch_detail, idx,
1859 _("start register out of range"));
1860 return 0;
1861 }
1862 break;
1863
1864 case AARCH64_OPND_SME_PdxN:
1865 case AARCH64_OPND_SVE_ZnxN:
1866 case AARCH64_OPND_SVE_ZtxN:
1867 num = get_opcode_dependent_value (opcode);
1868 if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
1869 return 0;
1870 break;
1871
1872 default:
1873 abort ();
1874 }
1875 break;
1876
1877 case AARCH64_OPND_CLASS_ZA_ACCESS:
1878 switch (type)
1879 {
1880 case AARCH64_OPND_SME_ZA_HV_idx_src:
1881 case AARCH64_OPND_SME_ZA_HV_idx_dest:
1882 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
1883 size = aarch64_get_qualifier_esize (opnd->qualifier);
1884 max_value = 16 / size - 1;
1885 if (!check_za_access (opnd, mismatch_detail, idx, 12, max_value, 1,
1886 get_opcode_dependent_value (opcode)))
1887 return 0;
1888 break;
1889
1890 case AARCH64_OPND_SME_ZA_array_off4:
1891 if (!check_za_access (opnd, mismatch_detail, idx, 12, 15, 1,
1892 get_opcode_dependent_value (opcode)))
1893 return 0;
1894 break;
1895
1896 case AARCH64_OPND_SME_ZA_array_off3_0:
1897 case AARCH64_OPND_SME_ZA_array_off3_5:
1898 if (!check_za_access (opnd, mismatch_detail, idx, 8, 7, 1,
1899 get_opcode_dependent_value (opcode)))
1900 return 0;
1901 break;
1902
1903 case AARCH64_OPND_SME_ZA_array_off1x4:
1904 if (!check_za_access (opnd, mismatch_detail, idx, 8, 1, 4,
1905 get_opcode_dependent_value (opcode)))
1906 return 0;
1907 break;
1908
1909 case AARCH64_OPND_SME_ZA_array_off2x2:
1910 if (!check_za_access (opnd, mismatch_detail, idx, 8, 3, 2,
1911 get_opcode_dependent_value (opcode)))
1912 return 0;
1913 break;
1914
1915 case AARCH64_OPND_SME_ZA_array_off2x4:
1916 if (!check_za_access (opnd, mismatch_detail, idx, 8, 3, 4,
1917 get_opcode_dependent_value (opcode)))
1918 return 0;
1919 break;
1920
1921 case AARCH64_OPND_SME_ZA_array_off3x2:
1922 if (!check_za_access (opnd, mismatch_detail, idx, 8, 7, 2,
1923 get_opcode_dependent_value (opcode)))
1924 return 0;
1925 break;
1926
1927 case AARCH64_OPND_SME_ZA_HV_idx_srcxN:
1928 case AARCH64_OPND_SME_ZA_HV_idx_destxN:
1929 size = aarch64_get_qualifier_esize (opnd->qualifier);
1930 num = get_opcode_dependent_value (opcode);
1931 max_value = 16 / num / size;
1932 if (max_value > 0)
1933 max_value -= 1;
1934 if (!check_za_access (opnd, mismatch_detail, idx,
1935 12, max_value, num, 0))
1936 return 0;
1937 break;
1938
1939 default:
1940 abort ();
1941 }
1942 break;
1943
1944 case AARCH64_OPND_CLASS_PRED_REG:
1945 switch (type)
1946 {
1947 case AARCH64_OPND_SME_PNd3:
1948 case AARCH64_OPND_SME_PNg3:
1949 if (opnd->reg.regno < 8)
1950 {
1951 set_invalid_regno_error (mismatch_detail, idx, "pn", 8, 15);
1952 return 0;
1953 }
1954 break;
1955
1956 default:
1957 if (opnd->reg.regno >= 8
1958 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1959 {
1960 set_invalid_regno_error (mismatch_detail, idx, "p", 0, 7);
1961 return 0;
1962 }
1963 break;
1964 }
1965 break;
1966
1967 case AARCH64_OPND_CLASS_COND:
1968 if (type == AARCH64_OPND_COND1
1969 && (opnds[idx].cond->value & 0xe) == 0xe)
1970 {
1971 /* Not allow AL or NV. */
1972 set_syntax_error (mismatch_detail, idx, NULL);
1973 }
1974 break;
1975
1976 case AARCH64_OPND_CLASS_ADDRESS:
1977 /* Check writeback. */
1978 switch (opcode->iclass)
1979 {
1980 case ldst_pos:
1981 case ldst_unscaled:
1982 case ldstnapair_offs:
1983 case ldstpair_off:
1984 case ldst_unpriv:
1985 if (opnd->addr.writeback == 1)
1986 {
1987 set_syntax_error (mismatch_detail, idx,
1988 _("unexpected address writeback"));
1989 return 0;
1990 }
1991 break;
1992 case ldst_imm10:
1993 if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
1994 {
1995 set_syntax_error (mismatch_detail, idx,
1996 _("unexpected address writeback"));
1997 return 0;
1998 }
1999 break;
2000 case ldst_imm9:
2001 case ldstpair_indexed:
2002 case asisdlsep:
2003 case asisdlsop:
2004 if (opnd->addr.writeback == 0)
2005 {
2006 set_syntax_error (mismatch_detail, idx,
2007 _("address writeback expected"));
2008 return 0;
2009 }
2010 break;
2011 default:
2012 assert (opnd->addr.writeback == 0);
2013 break;
2014 }
2015 switch (type)
2016 {
2017 case AARCH64_OPND_ADDR_SIMM7:
2018 /* Scaled signed 7 bits immediate offset. */
2019 /* Get the size of the data element that is accessed, which may be
2020 different from that of the source register size,
2021 e.g. in strb/ldrb. */
2022 size = aarch64_get_qualifier_esize (opnd->qualifier);
2023 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
2024 {
2025 set_offset_out_of_range_error (mismatch_detail, idx,
2026 -64 * size, 63 * size);
2027 return 0;
2028 }
2029 if (!value_aligned_p (opnd->addr.offset.imm, size))
2030 {
2031 set_unaligned_error (mismatch_detail, idx, size);
2032 return 0;
2033 }
2034 break;
2035 case AARCH64_OPND_ADDR_OFFSET:
2036 case AARCH64_OPND_ADDR_SIMM9:
2037 /* Unscaled signed 9 bits immediate offset. */
2038 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
2039 {
2040 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
2041 return 0;
2042 }
2043 break;
2044
2045 case AARCH64_OPND_ADDR_SIMM9_2:
2046 /* Unscaled signed 9 bits immediate offset, which has to be negative
2047 or unaligned. */
2048 size = aarch64_get_qualifier_esize (qualifier);
2049 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
2050 && !value_aligned_p (opnd->addr.offset.imm, size))
2051 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
2052 return 1;
2053 set_other_error (mismatch_detail, idx,
2054 _("negative or unaligned offset expected"));
2055 return 0;
2056
2057 case AARCH64_OPND_ADDR_SIMM10:
2058 /* Scaled signed 10 bits immediate offset. */
2059 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
2060 {
2061 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
2062 return 0;
2063 }
2064 if (!value_aligned_p (opnd->addr.offset.imm, 8))
2065 {
2066 set_unaligned_error (mismatch_detail, idx, 8);
2067 return 0;
2068 }
2069 break;
2070
2071 case AARCH64_OPND_ADDR_SIMM11:
2072 /* Signed 11 bits immediate offset (multiple of 16). */
2073 if (!value_in_range_p (opnd->addr.offset.imm, -1024, 1008))
2074 {
2075 set_offset_out_of_range_error (mismatch_detail, idx, -1024, 1008);
2076 return 0;
2077 }
2078
2079 if (!value_aligned_p (opnd->addr.offset.imm, 16))
2080 {
2081 set_unaligned_error (mismatch_detail, idx, 16);
2082 return 0;
2083 }
2084 break;
2085
2086 case AARCH64_OPND_ADDR_SIMM13:
2087 /* Signed 13 bits immediate offset (multiple of 16). */
2088 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4080))
2089 {
2090 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4080);
2091 return 0;
2092 }
2093
2094 if (!value_aligned_p (opnd->addr.offset.imm, 16))
2095 {
2096 set_unaligned_error (mismatch_detail, idx, 16);
2097 return 0;
2098 }
2099 break;
2100
2101 case AARCH64_OPND_SIMD_ADDR_POST:
2102 /* AdvSIMD load/store multiple structures, post-index. */
2103 assert (idx == 1);
2104 if (opnd->addr.offset.is_reg)
2105 {
2106 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
2107 return 1;
2108 else
2109 {
2110 set_other_error (mismatch_detail, idx,
2111 _("invalid register offset"));
2112 return 0;
2113 }
2114 }
2115 else
2116 {
2117 const aarch64_opnd_info *prev = &opnds[idx-1];
2118 unsigned num_bytes; /* total number of bytes transferred. */
2119 /* The opcode dependent area stores the number of elements in
2120 each structure to be loaded/stored. */
2121 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
2122 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
2123 /* Special handling of loading single structure to all lane. */
2124 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
2125 * aarch64_get_qualifier_esize (prev->qualifier);
2126 else
2127 num_bytes = prev->reglist.num_regs
2128 * aarch64_get_qualifier_esize (prev->qualifier)
2129 * aarch64_get_qualifier_nelem (prev->qualifier);
2130 if ((int) num_bytes != opnd->addr.offset.imm)
2131 {
2132 set_other_error (mismatch_detail, idx,
2133 _("invalid post-increment amount"));
2134 return 0;
2135 }
2136 }
2137 break;
2138
2139 case AARCH64_OPND_ADDR_REGOFF:
2140 /* Get the size of the data element that is accessed, which may be
2141 different from that of the source register size,
2142 e.g. in strb/ldrb. */
2143 size = aarch64_get_qualifier_esize (opnd->qualifier);
2144 /* It is either no shift or shift by the binary logarithm of SIZE. */
2145 if (opnd->shifter.amount != 0
2146 && opnd->shifter.amount != (int)get_logsz (size))
2147 {
2148 set_other_error (mismatch_detail, idx,
2149 _("invalid shift amount"));
2150 return 0;
2151 }
2152 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
2153 operators. */
2154 switch (opnd->shifter.kind)
2155 {
2156 case AARCH64_MOD_UXTW:
2157 case AARCH64_MOD_LSL:
2158 case AARCH64_MOD_SXTW:
2159 case AARCH64_MOD_SXTX: break;
2160 default:
2161 set_other_error (mismatch_detail, idx,
2162 _("invalid extend/shift operator"));
2163 return 0;
2164 }
2165 break;
2166
2167 case AARCH64_OPND_ADDR_UIMM12:
2168 imm = opnd->addr.offset.imm;
2169 /* Get the size of the data element that is accessed, which may be
2170 different from that of the source register size,
2171 e.g. in strb/ldrb. */
2172 size = aarch64_get_qualifier_esize (qualifier);
2173 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
2174 {
2175 set_offset_out_of_range_error (mismatch_detail, idx,
2176 0, 4095 * size);
2177 return 0;
2178 }
2179 if (!value_aligned_p (opnd->addr.offset.imm, size))
2180 {
2181 set_unaligned_error (mismatch_detail, idx, size);
2182 return 0;
2183 }
2184 break;
2185
2186 case AARCH64_OPND_ADDR_PCREL14:
2187 case AARCH64_OPND_ADDR_PCREL19:
2188 case AARCH64_OPND_ADDR_PCREL21:
2189 case AARCH64_OPND_ADDR_PCREL26:
2190 imm = opnd->imm.value;
2191 if (operand_need_shift_by_two (get_operand_from_code (type)))
2192 {
2193 /* The offset value in a PC-relative branch instruction is alway
2194 4-byte aligned and is encoded without the lowest 2 bits. */
2195 if (!value_aligned_p (imm, 4))
2196 {
2197 set_unaligned_error (mismatch_detail, idx, 4);
2198 return 0;
2199 }
2200 /* Right shift by 2 so that we can carry out the following check
2201 canonically. */
2202 imm >>= 2;
2203 }
2204 size = get_operand_fields_width (get_operand_from_code (type));
2205 if (!value_fit_signed_field_p (imm, size))
2206 {
2207 set_other_error (mismatch_detail, idx,
2208 _("immediate out of range"));
2209 return 0;
2210 }
2211 break;
2212
2213 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
2214 if (!value_in_range_p (opnd->addr.offset.imm, 0, 15))
2215 {
2216 set_offset_out_of_range_error (mismatch_detail, idx, 0, 15);
2217 return 0;
2218 }
2219 break;
2220
2221 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
2222 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
2223 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
2224 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
2225 min_value = -8;
2226 max_value = 7;
2227 sve_imm_offset_vl:
2228 assert (!opnd->addr.offset.is_reg);
2229 assert (opnd->addr.preind);
2230 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
2231 min_value *= num;
2232 max_value *= num;
2233 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
2234 || (opnd->shifter.operator_present
2235 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
2236 {
2237 set_other_error (mismatch_detail, idx,
2238 _("invalid addressing mode"));
2239 return 0;
2240 }
2241 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
2242 {
2243 set_offset_out_of_range_error (mismatch_detail, idx,
2244 min_value, max_value);
2245 return 0;
2246 }
2247 if (!value_aligned_p (opnd->addr.offset.imm, num))
2248 {
2249 set_unaligned_error (mismatch_detail, idx, num);
2250 return 0;
2251 }
2252 break;
2253
2254 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
2255 min_value = -32;
2256 max_value = 31;
2257 goto sve_imm_offset_vl;
2258
2259 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
2260 min_value = -256;
2261 max_value = 255;
2262 goto sve_imm_offset_vl;
2263
2264 case AARCH64_OPND_SVE_ADDR_RI_U6:
2265 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
2266 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
2267 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
2268 min_value = 0;
2269 max_value = 63;
2270 sve_imm_offset:
2271 assert (!opnd->addr.offset.is_reg);
2272 assert (opnd->addr.preind);
2273 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
2274 min_value *= num;
2275 max_value *= num;
2276 if (opnd->shifter.operator_present
2277 || opnd->shifter.amount_present)
2278 {
2279 set_other_error (mismatch_detail, idx,
2280 _("invalid addressing mode"));
2281 return 0;
2282 }
2283 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
2284 {
2285 set_offset_out_of_range_error (mismatch_detail, idx,
2286 min_value, max_value);
2287 return 0;
2288 }
2289 if (!value_aligned_p (opnd->addr.offset.imm, num))
2290 {
2291 set_unaligned_error (mismatch_detail, idx, num);
2292 return 0;
2293 }
2294 break;
2295
2296 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
2297 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
2298 min_value = -8;
2299 max_value = 7;
2300 goto sve_imm_offset;
2301
2302 case AARCH64_OPND_SVE_ADDR_ZX:
2303 /* Everything is already ensured by parse_operands or
2304 aarch64_ext_sve_addr_rr_lsl (because this is a very specific
2305 argument type). */
2306 assert (opnd->addr.offset.is_reg);
2307 assert (opnd->addr.preind);
2308 assert ((aarch64_operands[type].flags & OPD_F_NO_ZR) == 0);
2309 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2310 assert (opnd->shifter.operator_present == 0);
2311 break;
2312
2313 case AARCH64_OPND_SVE_ADDR_R:
2314 case AARCH64_OPND_SVE_ADDR_RR:
2315 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
2316 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
2317 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
2318 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
2319 case AARCH64_OPND_SVE_ADDR_RX:
2320 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
2321 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
2322 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
2323 case AARCH64_OPND_SVE_ADDR_RZ:
2324 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
2325 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
2326 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
2327 modifiers = 1 << AARCH64_MOD_LSL;
2328 sve_rr_operand:
2329 assert (opnd->addr.offset.is_reg);
2330 assert (opnd->addr.preind);
2331 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
2332 && opnd->addr.offset.regno == 31)
2333 {
2334 set_other_error (mismatch_detail, idx,
2335 _("index register xzr is not allowed"));
2336 return 0;
2337 }
2338 if (((1 << opnd->shifter.kind) & modifiers) == 0
2339 || (opnd->shifter.amount
2340 != get_operand_specific_data (&aarch64_operands[type])))
2341 {
2342 set_other_error (mismatch_detail, idx,
2343 _("invalid addressing mode"));
2344 return 0;
2345 }
2346 break;
2347
2348 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
2349 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
2350 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
2351 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
2352 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
2353 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
2354 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
2355 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
2356 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
2357 goto sve_rr_operand;
2358
2359 case AARCH64_OPND_SVE_ADDR_ZI_U5:
2360 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
2361 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
2362 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
2363 min_value = 0;
2364 max_value = 31;
2365 goto sve_imm_offset;
2366
2367 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
2368 modifiers = 1 << AARCH64_MOD_LSL;
2369 sve_zz_operand:
2370 assert (opnd->addr.offset.is_reg);
2371 assert (opnd->addr.preind);
2372 if (((1 << opnd->shifter.kind) & modifiers) == 0
2373 || opnd->shifter.amount < 0
2374 || opnd->shifter.amount > 3)
2375 {
2376 set_other_error (mismatch_detail, idx,
2377 _("invalid addressing mode"));
2378 return 0;
2379 }
2380 break;
2381
2382 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
2383 modifiers = (1 << AARCH64_MOD_SXTW);
2384 goto sve_zz_operand;
2385
2386 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
2387 modifiers = 1 << AARCH64_MOD_UXTW;
2388 goto sve_zz_operand;
2389
2390 default:
2391 break;
2392 }
2393 break;
2394
2395 case AARCH64_OPND_CLASS_SIMD_REGLIST:
2396 if (type == AARCH64_OPND_LEt)
2397 {
2398 /* Get the upper bound for the element index. */
2399 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2400 if (!value_in_range_p (opnd->reglist.index, 0, num))
2401 {
2402 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2403 return 0;
2404 }
2405 }
2406 /* The opcode dependent area stores the number of elements in
2407 each structure to be loaded/stored. */
2408 num = get_opcode_dependent_value (opcode);
2409 switch (type)
2410 {
2411 case AARCH64_OPND_LVt:
2412 assert (num >= 1 && num <= 4);
2413 /* Unless LD1/ST1, the number of registers should be equal to that
2414 of the structure elements. */
2415 if (num != 1 && !check_reglist (opnd, mismatch_detail, idx, num, 1))
2416 return 0;
2417 break;
2418 case AARCH64_OPND_LVt_AL:
2419 case AARCH64_OPND_LEt:
2420 assert (num >= 1 && num <= 4);
2421 /* The number of registers should be equal to that of the structure
2422 elements. */
2423 if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
2424 return 0;
2425 break;
2426 default:
2427 break;
2428 }
2429 if (opnd->reglist.stride != 1)
2430 {
2431 set_reg_list_stride_error (mismatch_detail, idx, 1);
2432 return 0;
2433 }
2434 break;
2435
2436 case AARCH64_OPND_CLASS_IMMEDIATE:
2437 /* Constraint check on immediate operand. */
2438 imm = opnd->imm.value;
2439 /* E.g. imm_0_31 constrains value to be 0..31. */
2440 if (qualifier_value_in_range_constraint_p (qualifier)
2441 && !value_in_range_p (imm, get_lower_bound (qualifier),
2442 get_upper_bound (qualifier)))
2443 {
2444 set_imm_out_of_range_error (mismatch_detail, idx,
2445 get_lower_bound (qualifier),
2446 get_upper_bound (qualifier));
2447 return 0;
2448 }
2449
2450 switch (type)
2451 {
2452 case AARCH64_OPND_AIMM:
2453 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2454 {
2455 set_other_error (mismatch_detail, idx,
2456 _("invalid shift operator"));
2457 return 0;
2458 }
2459 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
2460 {
2461 set_other_error (mismatch_detail, idx,
2462 _("shift amount must be 0 or 12"));
2463 return 0;
2464 }
2465 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
2466 {
2467 set_other_error (mismatch_detail, idx,
2468 _("immediate out of range"));
2469 return 0;
2470 }
2471 break;
2472
2473 case AARCH64_OPND_HALF:
2474 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
2475 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2476 {
2477 set_other_error (mismatch_detail, idx,
2478 _("invalid shift operator"));
2479 return 0;
2480 }
2481 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2482 if (!value_aligned_p (opnd->shifter.amount, 16))
2483 {
2484 set_other_error (mismatch_detail, idx,
2485 _("shift amount must be a multiple of 16"));
2486 return 0;
2487 }
2488 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2489 {
2490 set_sft_amount_out_of_range_error (mismatch_detail, idx,
2491 0, size * 8 - 16);
2492 return 0;
2493 }
2494 if (opnd->imm.value < 0)
2495 {
2496 set_other_error (mismatch_detail, idx,
2497 _("negative immediate value not allowed"));
2498 return 0;
2499 }
2500 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2501 {
2502 set_other_error (mismatch_detail, idx,
2503 _("immediate out of range"));
2504 return 0;
2505 }
2506 break;
2507
2508 case AARCH64_OPND_IMM_MOV:
2509 {
2510 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2511 imm = opnd->imm.value;
2512 assert (idx == 1);
2513 switch (opcode->op)
2514 {
2515 case OP_MOV_IMM_WIDEN:
2516 imm = ~imm;
2517 /* Fall through. */
2518 case OP_MOV_IMM_WIDE:
2519 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2520 {
2521 set_other_error (mismatch_detail, idx,
2522 _("immediate out of range"));
2523 return 0;
2524 }
2525 break;
2526 case OP_MOV_IMM_LOG:
2527 if (!aarch64_logical_immediate_p (imm, esize, NULL))
2528 {
2529 set_other_error (mismatch_detail, idx,
2530 _("immediate out of range"));
2531 return 0;
2532 }
2533 break;
2534 default:
2535 assert (0);
2536 return 0;
2537 }
2538 }
2539 break;
2540
2541 case AARCH64_OPND_NZCV:
2542 case AARCH64_OPND_CCMP_IMM:
2543 case AARCH64_OPND_EXCEPTION:
2544 case AARCH64_OPND_UNDEFINED:
2545 case AARCH64_OPND_TME_UIMM16:
2546 case AARCH64_OPND_UIMM4:
2547 case AARCH64_OPND_UIMM4_ADDG:
2548 case AARCH64_OPND_UIMM7:
2549 case AARCH64_OPND_UIMM3_OP1:
2550 case AARCH64_OPND_UIMM3_OP2:
2551 case AARCH64_OPND_SVE_UIMM3:
2552 case AARCH64_OPND_SVE_UIMM7:
2553 case AARCH64_OPND_SVE_UIMM8:
2554 case AARCH64_OPND_SVE_UIMM8_53:
2555 case AARCH64_OPND_CSSC_UIMM8:
2556 size = get_operand_fields_width (get_operand_from_code (type));
2557 assert (size < 32);
2558 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2559 {
2560 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2561 (1u << size) - 1);
2562 return 0;
2563 }
2564 break;
2565
2566 case AARCH64_OPND_UIMM10:
2567 /* Scaled unsigned 10 bits immediate offset. */
2568 if (!value_in_range_p (opnd->imm.value, 0, 1008))
2569 {
2570 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1008);
2571 return 0;
2572 }
2573
2574 if (!value_aligned_p (opnd->imm.value, 16))
2575 {
2576 set_unaligned_error (mismatch_detail, idx, 16);
2577 return 0;
2578 }
2579 break;
2580
2581 case AARCH64_OPND_SIMM5:
2582 case AARCH64_OPND_SVE_SIMM5:
2583 case AARCH64_OPND_SVE_SIMM5B:
2584 case AARCH64_OPND_SVE_SIMM6:
2585 case AARCH64_OPND_SVE_SIMM8:
2586 case AARCH64_OPND_CSSC_SIMM8:
2587 size = get_operand_fields_width (get_operand_from_code (type));
2588 assert (size < 32);
2589 if (!value_fit_signed_field_p (opnd->imm.value, size))
2590 {
2591 set_imm_out_of_range_error (mismatch_detail, idx,
2592 -(1 << (size - 1)),
2593 (1 << (size - 1)) - 1);
2594 return 0;
2595 }
2596 break;
2597
2598 case AARCH64_OPND_WIDTH:
2599 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2600 && opnds[0].type == AARCH64_OPND_Rd);
2601 size = get_upper_bound (qualifier);
2602 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2603 /* lsb+width <= reg.size */
2604 {
2605 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2606 size - opnds[idx-1].imm.value);
2607 return 0;
2608 }
2609 break;
2610
2611 case AARCH64_OPND_LIMM:
2612 case AARCH64_OPND_SVE_LIMM:
2613 {
2614 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2615 uint64_t uimm = opnd->imm.value;
2616 if (opcode->op == OP_BIC)
2617 uimm = ~uimm;
2618 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2619 {
2620 set_other_error (mismatch_detail, idx,
2621 _("immediate out of range"));
2622 return 0;
2623 }
2624 }
2625 break;
2626
2627 case AARCH64_OPND_IMM0:
2628 case AARCH64_OPND_FPIMM0:
2629 if (opnd->imm.value != 0)
2630 {
2631 set_other_error (mismatch_detail, idx,
2632 _("immediate zero expected"));
2633 return 0;
2634 }
2635 break;
2636
2637 case AARCH64_OPND_IMM_ROT1:
2638 case AARCH64_OPND_IMM_ROT2:
2639 case AARCH64_OPND_SVE_IMM_ROT2:
2640 if (opnd->imm.value != 0
2641 && opnd->imm.value != 90
2642 && opnd->imm.value != 180
2643 && opnd->imm.value != 270)
2644 {
2645 set_other_error (mismatch_detail, idx,
2646 _("rotate expected to be 0, 90, 180 or 270"));
2647 return 0;
2648 }
2649 break;
2650
2651 case AARCH64_OPND_IMM_ROT3:
2652 case AARCH64_OPND_SVE_IMM_ROT1:
2653 case AARCH64_OPND_SVE_IMM_ROT3:
2654 if (opnd->imm.value != 90 && opnd->imm.value != 270)
2655 {
2656 set_other_error (mismatch_detail, idx,
2657 _("rotate expected to be 90 or 270"));
2658 return 0;
2659 }
2660 break;
2661
2662 case AARCH64_OPND_SHLL_IMM:
2663 assert (idx == 2);
2664 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2665 if (opnd->imm.value != size)
2666 {
2667 set_other_error (mismatch_detail, idx,
2668 _("invalid shift amount"));
2669 return 0;
2670 }
2671 break;
2672
2673 case AARCH64_OPND_IMM_VLSL:
2674 size = aarch64_get_qualifier_esize (qualifier);
2675 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2676 {
2677 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2678 size * 8 - 1);
2679 return 0;
2680 }
2681 break;
2682
2683 case AARCH64_OPND_IMM_VLSR:
2684 size = aarch64_get_qualifier_esize (qualifier);
2685 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2686 {
2687 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2688 return 0;
2689 }
2690 break;
2691
2692 case AARCH64_OPND_SIMD_IMM:
2693 case AARCH64_OPND_SIMD_IMM_SFT:
2694 /* Qualifier check. */
2695 switch (qualifier)
2696 {
2697 case AARCH64_OPND_QLF_LSL:
2698 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2699 {
2700 set_other_error (mismatch_detail, idx,
2701 _("invalid shift operator"));
2702 return 0;
2703 }
2704 break;
2705 case AARCH64_OPND_QLF_MSL:
2706 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2707 {
2708 set_other_error (mismatch_detail, idx,
2709 _("invalid shift operator"));
2710 return 0;
2711 }
2712 break;
2713 case AARCH64_OPND_QLF_NIL:
2714 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2715 {
2716 set_other_error (mismatch_detail, idx,
2717 _("shift is not permitted"));
2718 return 0;
2719 }
2720 break;
2721 default:
2722 assert (0);
2723 return 0;
2724 }
2725 /* Is the immediate valid? */
2726 assert (idx == 1);
2727 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2728 {
2729 /* uimm8 or simm8 */
2730 if (!value_in_range_p (opnd->imm.value, -128, 255))
2731 {
2732 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2733 return 0;
2734 }
2735 }
2736 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2737 {
2738 /* uimm64 is not
2739 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2740 ffffffffgggggggghhhhhhhh'. */
2741 set_other_error (mismatch_detail, idx,
2742 _("invalid value for immediate"));
2743 return 0;
2744 }
2745 /* Is the shift amount valid? */
2746 switch (opnd->shifter.kind)
2747 {
2748 case AARCH64_MOD_LSL:
2749 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2750 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2751 {
2752 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2753 (size - 1) * 8);
2754 return 0;
2755 }
2756 if (!value_aligned_p (opnd->shifter.amount, 8))
2757 {
2758 set_unaligned_error (mismatch_detail, idx, 8);
2759 return 0;
2760 }
2761 break;
2762 case AARCH64_MOD_MSL:
2763 /* Only 8 and 16 are valid shift amount. */
2764 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2765 {
2766 set_other_error (mismatch_detail, idx,
2767 _("shift amount must be 0 or 16"));
2768 return 0;
2769 }
2770 break;
2771 default:
2772 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2773 {
2774 set_other_error (mismatch_detail, idx,
2775 _("invalid shift operator"));
2776 return 0;
2777 }
2778 break;
2779 }
2780 break;
2781
2782 case AARCH64_OPND_FPIMM:
2783 case AARCH64_OPND_SIMD_FPIMM:
2784 case AARCH64_OPND_SVE_FPIMM8:
2785 if (opnd->imm.is_fp == 0)
2786 {
2787 set_other_error (mismatch_detail, idx,
2788 _("floating-point immediate expected"));
2789 return 0;
2790 }
2791 /* The value is expected to be an 8-bit floating-point constant with
2792 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2793 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2794 instruction). */
2795 if (!value_in_range_p (opnd->imm.value, 0, 255))
2796 {
2797 set_other_error (mismatch_detail, idx,
2798 _("immediate out of range"));
2799 return 0;
2800 }
2801 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2802 {
2803 set_other_error (mismatch_detail, idx,
2804 _("invalid shift operator"));
2805 return 0;
2806 }
2807 break;
2808
2809 case AARCH64_OPND_SVE_AIMM:
2810 min_value = 0;
2811 sve_aimm:
2812 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2813 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2814 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2815 uvalue = opnd->imm.value;
2816 shift = opnd->shifter.amount;
2817 if (size == 1)
2818 {
2819 if (shift != 0)
2820 {
2821 set_other_error (mismatch_detail, idx,
2822 _("no shift amount allowed for"
2823 " 8-bit constants"));
2824 return 0;
2825 }
2826 }
2827 else
2828 {
2829 if (shift != 0 && shift != 8)
2830 {
2831 set_other_error (mismatch_detail, idx,
2832 _("shift amount must be 0 or 8"));
2833 return 0;
2834 }
2835 if (shift == 0 && (uvalue & 0xff) == 0)
2836 {
2837 shift = 8;
2838 uvalue = (int64_t) uvalue / 256;
2839 }
2840 }
2841 mask >>= shift;
2842 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2843 {
2844 set_other_error (mismatch_detail, idx,
2845 _("immediate too big for element size"));
2846 return 0;
2847 }
2848 uvalue = (uvalue - min_value) & mask;
2849 if (uvalue > 0xff)
2850 {
2851 set_other_error (mismatch_detail, idx,
2852 _("invalid arithmetic immediate"));
2853 return 0;
2854 }
2855 break;
2856
2857 case AARCH64_OPND_SVE_ASIMM:
2858 min_value = -128;
2859 goto sve_aimm;
2860
2861 case AARCH64_OPND_SVE_I1_HALF_ONE:
2862 assert (opnd->imm.is_fp);
2863 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2864 {
2865 set_other_error (mismatch_detail, idx,
2866 _("floating-point value must be 0.5 or 1.0"));
2867 return 0;
2868 }
2869 break;
2870
2871 case AARCH64_OPND_SVE_I1_HALF_TWO:
2872 assert (opnd->imm.is_fp);
2873 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2874 {
2875 set_other_error (mismatch_detail, idx,
2876 _("floating-point value must be 0.5 or 2.0"));
2877 return 0;
2878 }
2879 break;
2880
2881 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2882 assert (opnd->imm.is_fp);
2883 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2884 {
2885 set_other_error (mismatch_detail, idx,
2886 _("floating-point value must be 0.0 or 1.0"));
2887 return 0;
2888 }
2889 break;
2890
2891 case AARCH64_OPND_SVE_INV_LIMM:
2892 {
2893 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2894 uint64_t uimm = ~opnd->imm.value;
2895 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2896 {
2897 set_other_error (mismatch_detail, idx,
2898 _("immediate out of range"));
2899 return 0;
2900 }
2901 }
2902 break;
2903
2904 case AARCH64_OPND_SVE_LIMM_MOV:
2905 {
2906 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2907 uint64_t uimm = opnd->imm.value;
2908 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2909 {
2910 set_other_error (mismatch_detail, idx,
2911 _("immediate out of range"));
2912 return 0;
2913 }
2914 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2915 {
2916 set_other_error (mismatch_detail, idx,
2917 _("invalid replicated MOV immediate"));
2918 return 0;
2919 }
2920 }
2921 break;
2922
2923 case AARCH64_OPND_SVE_PATTERN_SCALED:
2924 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2925 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2926 {
2927 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2928 return 0;
2929 }
2930 break;
2931
2932 case AARCH64_OPND_SVE_SHLIMM_PRED:
2933 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2934 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
2935 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2936 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2937 {
2938 set_imm_out_of_range_error (mismatch_detail, idx,
2939 0, 8 * size - 1);
2940 return 0;
2941 }
2942 break;
2943
2944 case AARCH64_OPND_SME_SHRIMM4:
2945 size = 1 << get_operand_fields_width (get_operand_from_code (type));
2946 if (!value_in_range_p (opnd->imm.value, 1, size))
2947 {
2948 set_imm_out_of_range_error (mismatch_detail, idx, 1, size);
2949 return 0;
2950 }
2951 break;
2952
2953 case AARCH64_OPND_SME_SHRIMM5:
2954 case AARCH64_OPND_SVE_SHRIMM_PRED:
2955 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2956 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
2957 num = (type == AARCH64_OPND_SVE_SHRIMM_UNPRED_22) ? 2 : 1;
2958 size = aarch64_get_qualifier_esize (opnds[idx - num].qualifier);
2959 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2960 {
2961 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8*size);
2962 return 0;
2963 }
2964 break;
2965
2966 case AARCH64_OPND_SME_ZT0_INDEX:
2967 if (!value_in_range_p (opnd->imm.value, 0, 56))
2968 {
2969 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, 56);
2970 return 0;
2971 }
2972 if (opnd->imm.value % 8 != 0)
2973 {
2974 set_other_error (mismatch_detail, idx,
2975 _("byte index must be a multiple of 8"));
2976 return 0;
2977 }
2978 break;
2979
2980 default:
2981 break;
2982 }
2983 break;
2984
2985 case AARCH64_OPND_CLASS_SYSTEM:
2986 switch (type)
2987 {
2988 case AARCH64_OPND_PSTATEFIELD:
2989 for (i = 0; aarch64_pstatefields[i].name; ++i)
2990 if (aarch64_pstatefields[i].value == opnd->pstatefield)
2991 break;
2992 assert (aarch64_pstatefields[i].name);
2993 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2994 max_value = F_GET_REG_MAX_VALUE (aarch64_pstatefields[i].flags);
2995 if (opnds[1].imm.value < 0 || opnds[1].imm.value > max_value)
2996 {
2997 set_imm_out_of_range_error (mismatch_detail, 1, 0, max_value);
2998 return 0;
2999 }
3000 break;
3001 case AARCH64_OPND_PRFOP:
3002 if (opcode->iclass == ldst_regoff && opnd->prfop->value >= 24)
3003 {
3004 set_other_error (mismatch_detail, idx,
3005 _("the register-index form of PRFM does"
3006 " not accept opcodes in the range 24-31"));
3007 return 0;
3008 }
3009 break;
3010 default:
3011 break;
3012 }
3013 break;
3014
3015 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
3016 /* Get the upper bound for the element index. */
3017 if (opcode->op == OP_FCMLA_ELEM)
3018 /* FCMLA index range depends on the vector size of other operands
3019 and is halfed because complex numbers take two elements. */
3020 num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
3021 * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
3022 else
3023 num = 16;
3024 num = num / aarch64_get_qualifier_esize (qualifier) - 1;
3025 assert (aarch64_get_qualifier_nelem (qualifier) == 1);
3026
3027 /* Index out-of-range. */
3028 if (!value_in_range_p (opnd->reglane.index, 0, num))
3029 {
3030 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
3031 return 0;
3032 }
3033 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
3034 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
3035 number is encoded in "size:M:Rm":
3036 size <Vm>
3037 00 RESERVED
3038 01 0:Rm
3039 10 M:Rm
3040 11 RESERVED */
3041 if (type == AARCH64_OPND_Em16 && qualifier == AARCH64_OPND_QLF_S_H
3042 && !value_in_range_p (opnd->reglane.regno, 0, 15))
3043 {
3044 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
3045 return 0;
3046 }
3047 break;
3048
3049 case AARCH64_OPND_CLASS_MODIFIED_REG:
3050 assert (idx == 1 || idx == 2);
3051 switch (type)
3052 {
3053 case AARCH64_OPND_Rm_EXT:
3054 if (!aarch64_extend_operator_p (opnd->shifter.kind)
3055 && opnd->shifter.kind != AARCH64_MOD_LSL)
3056 {
3057 set_other_error (mismatch_detail, idx,
3058 _("extend operator expected"));
3059 return 0;
3060 }
3061 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
3062 (i.e. SP), in which case it defaults to LSL. The LSL alias is
3063 only valid when "Rd" or "Rn" is '11111', and is preferred in that
3064 case. */
3065 if (!aarch64_stack_pointer_p (opnds + 0)
3066 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
3067 {
3068 if (!opnd->shifter.operator_present)
3069 {
3070 set_other_error (mismatch_detail, idx,
3071 _("missing extend operator"));
3072 return 0;
3073 }
3074 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
3075 {
3076 set_other_error (mismatch_detail, idx,
3077 _("'LSL' operator not allowed"));
3078 return 0;
3079 }
3080 }
3081 assert (opnd->shifter.operator_present /* Default to LSL. */
3082 || opnd->shifter.kind == AARCH64_MOD_LSL);
3083 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
3084 {
3085 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
3086 return 0;
3087 }
3088 /* In the 64-bit form, the final register operand is written as Wm
3089 for all but the (possibly omitted) UXTX/LSL and SXTX
3090 operators.
3091 N.B. GAS allows X register to be used with any operator as a
3092 programming convenience. */
3093 if (qualifier == AARCH64_OPND_QLF_X
3094 && opnd->shifter.kind != AARCH64_MOD_LSL
3095 && opnd->shifter.kind != AARCH64_MOD_UXTX
3096 && opnd->shifter.kind != AARCH64_MOD_SXTX)
3097 {
3098 set_other_error (mismatch_detail, idx, _("W register expected"));
3099 return 0;
3100 }
3101 break;
3102
3103 case AARCH64_OPND_Rm_SFT:
3104 /* ROR is not available to the shifted register operand in
3105 arithmetic instructions. */
3106 if (!aarch64_shift_operator_p (opnd->shifter.kind))
3107 {
3108 set_other_error (mismatch_detail, idx,
3109 _("shift operator expected"));
3110 return 0;
3111 }
3112 if (opnd->shifter.kind == AARCH64_MOD_ROR
3113 && opcode->iclass != log_shift)
3114 {
3115 set_other_error (mismatch_detail, idx,
3116 _("'ROR' operator not allowed"));
3117 return 0;
3118 }
3119 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
3120 if (!value_in_range_p (opnd->shifter.amount, 0, num))
3121 {
3122 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
3123 return 0;
3124 }
3125 break;
3126
3127 default:
3128 break;
3129 }
3130 break;
3131
3132 default:
3133 break;
3134 }
3135
3136 return 1;
3137 }
3138
3139 /* Main entrypoint for the operand constraint checking.
3140
3141 Return 1 if operands of *INST meet the constraint applied by the operand
3142 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
3143 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
3144 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
3145 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
3146 error kind when it is notified that an instruction does not pass the check).
3147
3148 Un-determined operand qualifiers may get established during the process. */
3149
3150 int
3151 aarch64_match_operands_constraint (aarch64_inst *inst,
3152 aarch64_operand_error *mismatch_detail)
3153 {
3154 int i;
3155
3156 DEBUG_TRACE ("enter");
3157
3158 i = inst->opcode->tied_operand;
3159
3160 if (i > 0)
3161 {
3162 /* Check for tied_operands with specific opcode iclass. */
3163 switch (inst->opcode->iclass)
3164 {
3165 /* For SME LDR and STR instructions #imm must have the same numerical
3166 value for both operands.
3167 */
3168 case sme_ldr:
3169 case sme_str:
3170 assert (inst->operands[0].type == AARCH64_OPND_SME_ZA_array_off4);
3171 assert (inst->operands[1].type == AARCH64_OPND_SME_ADDR_RI_U4xVL);
3172 if (inst->operands[0].indexed_za.index.imm
3173 != inst->operands[1].addr.offset.imm)
3174 {
3175 if (mismatch_detail)
3176 {
3177 mismatch_detail->kind = AARCH64_OPDE_UNTIED_IMMS;
3178 mismatch_detail->index = i;
3179 }
3180 return 0;
3181 }
3182 break;
3183
3184 default:
3185 {
3186 /* Check for cases where a source register needs to be the
3187 same as the destination register. Do this before
3188 matching qualifiers since if an instruction has both
3189 invalid tying and invalid qualifiers, the error about
3190 qualifiers would suggest several alternative instructions
3191 that also have invalid tying. */
3192 enum aarch64_operand_class op_class1
3193 = aarch64_get_operand_class (inst->operands[0].type);
3194 enum aarch64_operand_class op_class2
3195 = aarch64_get_operand_class (inst->operands[i].type);
3196 assert (op_class1 == op_class2);
3197 if (op_class1 == AARCH64_OPND_CLASS_SVE_REGLIST
3198 ? ((inst->operands[0].reglist.first_regno
3199 != inst->operands[i].reglist.first_regno)
3200 || (inst->operands[0].reglist.num_regs
3201 != inst->operands[i].reglist.num_regs)
3202 || (inst->operands[0].reglist.stride
3203 != inst->operands[i].reglist.stride))
3204 : (inst->operands[0].reg.regno
3205 != inst->operands[i].reg.regno))
3206 {
3207 if (mismatch_detail)
3208 {
3209 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
3210 mismatch_detail->index = i;
3211 mismatch_detail->error = NULL;
3212 }
3213 return 0;
3214 }
3215 break;
3216 }
3217 }
3218 }
3219
3220 /* Match operands' qualifier.
3221 *INST has already had qualifier establish for some, if not all, of
3222 its operands; we need to find out whether these established
3223 qualifiers match one of the qualifier sequence in
3224 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
3225 with the corresponding qualifier in such a sequence.
3226 Only basic operand constraint checking is done here; the more thorough
3227 constraint checking will carried out by operand_general_constraint_met_p,
3228 which has be to called after this in order to get all of the operands'
3229 qualifiers established. */
3230 int invalid_count;
3231 if (match_operands_qualifier (inst, true /* update_p */,
3232 &invalid_count) == 0)
3233 {
3234 DEBUG_TRACE ("FAIL on operand qualifier matching");
3235 if (mismatch_detail)
3236 {
3237 /* Return an error type to indicate that it is the qualifier
3238 matching failure; we don't care about which operand as there
3239 are enough information in the opcode table to reproduce it. */
3240 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
3241 mismatch_detail->index = -1;
3242 mismatch_detail->error = NULL;
3243 mismatch_detail->data[0].i = invalid_count;
3244 }
3245 return 0;
3246 }
3247
3248 /* Match operands' constraint. */
3249 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3250 {
3251 enum aarch64_opnd type = inst->opcode->operands[i];
3252 if (type == AARCH64_OPND_NIL)
3253 break;
3254 if (inst->operands[i].skip)
3255 {
3256 DEBUG_TRACE ("skip the incomplete operand %d", i);
3257 continue;
3258 }
3259 if (operand_general_constraint_met_p (inst->operands, i, type,
3260 inst->opcode, mismatch_detail) == 0)
3261 {
3262 DEBUG_TRACE ("FAIL on operand %d", i);
3263 return 0;
3264 }
3265 }
3266
3267 DEBUG_TRACE ("PASS");
3268
3269 return 1;
3270 }
3271
3272 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
3273 Also updates the TYPE of each INST->OPERANDS with the corresponding
3274 value of OPCODE->OPERANDS.
3275
3276 Note that some operand qualifiers may need to be manually cleared by
3277 the caller before it further calls the aarch64_opcode_encode; by
3278 doing this, it helps the qualifier matching facilities work
3279 properly. */
3280
3281 const aarch64_opcode*
3282 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
3283 {
3284 int i;
3285 const aarch64_opcode *old = inst->opcode;
3286
3287 inst->opcode = opcode;
3288
3289 /* Update the operand types. */
3290 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3291 {
3292 inst->operands[i].type = opcode->operands[i];
3293 if (opcode->operands[i] == AARCH64_OPND_NIL)
3294 break;
3295 }
3296
3297 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
3298
3299 return old;
3300 }
3301
3302 int
3303 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
3304 {
3305 int i;
3306 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3307 if (operands[i] == operand)
3308 return i;
3309 else if (operands[i] == AARCH64_OPND_NIL)
3310 break;
3311 return -1;
3312 }
3313 \f
3314 /* R0...R30, followed by FOR31. */
3315 #define BANK(R, FOR31) \
3316 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
3317 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
3318 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
3319 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
3320 /* [0][0] 32-bit integer regs with sp Wn
3321 [0][1] 64-bit integer regs with sp Xn sf=1
3322 [1][0] 32-bit integer regs with #0 Wn
3323 [1][1] 64-bit integer regs with #0 Xn sf=1 */
3324 static const char *int_reg[2][2][32] = {
3325 #define R32(X) "w" #X
3326 #define R64(X) "x" #X
3327 { BANK (R32, "wsp"), BANK (R64, "sp") },
3328 { BANK (R32, "wzr"), BANK (R64, "xzr") }
3329 #undef R64
3330 #undef R32
3331 };
3332
3333 /* Names of the SVE vector registers, first with .S suffixes,
3334 then with .D suffixes. */
3335
3336 static const char *sve_reg[2][32] = {
3337 #define ZS(X) "z" #X ".s"
3338 #define ZD(X) "z" #X ".d"
3339 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
3340 #undef ZD
3341 #undef ZS
3342 };
3343 #undef BANK
3344
3345 /* Return the integer register name.
3346 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
3347
3348 static inline const char *
3349 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
3350 {
3351 const int has_zr = sp_reg_p ? 0 : 1;
3352 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
3353 return int_reg[has_zr][is_64][regno];
3354 }
3355
3356 /* Like get_int_reg_name, but IS_64 is always 1. */
3357
3358 static inline const char *
3359 get_64bit_int_reg_name (int regno, int sp_reg_p)
3360 {
3361 const int has_zr = sp_reg_p ? 0 : 1;
3362 return int_reg[has_zr][1][regno];
3363 }
3364
3365 /* Get the name of the integer offset register in OPND, using the shift type
3366 to decide whether it's a word or doubleword. */
3367
3368 static inline const char *
3369 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
3370 {
3371 switch (opnd->shifter.kind)
3372 {
3373 case AARCH64_MOD_UXTW:
3374 case AARCH64_MOD_SXTW:
3375 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
3376
3377 case AARCH64_MOD_LSL:
3378 case AARCH64_MOD_SXTX:
3379 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
3380
3381 default:
3382 abort ();
3383 }
3384 }
3385
3386 /* Get the name of the SVE vector offset register in OPND, using the operand
3387 qualifier to decide whether the suffix should be .S or .D. */
3388
3389 static inline const char *
3390 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
3391 {
3392 assert (qualifier == AARCH64_OPND_QLF_S_S
3393 || qualifier == AARCH64_OPND_QLF_S_D);
3394 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
3395 }
3396
3397 /* Types for expanding an encoded 8-bit value to a floating-point value. */
3398
3399 typedef union
3400 {
3401 uint64_t i;
3402 double d;
3403 } double_conv_t;
3404
3405 typedef union
3406 {
3407 uint32_t i;
3408 float f;
3409 } single_conv_t;
3410
3411 typedef union
3412 {
3413 uint32_t i;
3414 float f;
3415 } half_conv_t;
3416
3417 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
3418 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
3419 (depending on the type of the instruction). IMM8 will be expanded to a
3420 single-precision floating-point value (SIZE == 4) or a double-precision
3421 floating-point value (SIZE == 8). A half-precision floating-point value
3422 (SIZE == 2) is expanded to a single-precision floating-point value. The
3423 expanded value is returned. */
3424
3425 static uint64_t
3426 expand_fp_imm (int size, uint32_t imm8)
3427 {
3428 uint64_t imm = 0;
3429 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
3430
3431 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
3432 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
3433 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
3434 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
3435 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
3436 if (size == 8)
3437 {
3438 imm = (imm8_7 << (63-32)) /* imm8<7> */
3439 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
3440 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
3441 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
3442 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
3443 imm <<= 32;
3444 }
3445 else if (size == 4 || size == 2)
3446 {
3447 imm = (imm8_7 << 31) /* imm8<7> */
3448 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
3449 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
3450 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
3451 }
3452 else
3453 {
3454 /* An unsupported size. */
3455 assert (0);
3456 }
3457
3458 return imm;
3459 }
3460
3461 /* Return a string based on FMT with the register style applied. */
3462
3463 static const char *
3464 style_reg (struct aarch64_styler *styler, const char *fmt, ...)
3465 {
3466 const char *txt;
3467 va_list ap;
3468
3469 va_start (ap, fmt);
3470 txt = styler->apply_style (styler, dis_style_register, fmt, ap);
3471 va_end (ap);
3472
3473 return txt;
3474 }
3475
3476 /* Return a string based on FMT with the immediate style applied. */
3477
3478 static const char *
3479 style_imm (struct aarch64_styler *styler, const char *fmt, ...)
3480 {
3481 const char *txt;
3482 va_list ap;
3483
3484 va_start (ap, fmt);
3485 txt = styler->apply_style (styler, dis_style_immediate, fmt, ap);
3486 va_end (ap);
3487
3488 return txt;
3489 }
3490
3491 /* Return a string based on FMT with the sub-mnemonic style applied. */
3492
3493 static const char *
3494 style_sub_mnem (struct aarch64_styler *styler, const char *fmt, ...)
3495 {
3496 const char *txt;
3497 va_list ap;
3498
3499 va_start (ap, fmt);
3500 txt = styler->apply_style (styler, dis_style_sub_mnemonic, fmt, ap);
3501 va_end (ap);
3502
3503 return txt;
3504 }
3505
3506 /* Return a string based on FMT with the address style applied. */
3507
3508 static const char *
3509 style_addr (struct aarch64_styler *styler, const char *fmt, ...)
3510 {
3511 const char *txt;
3512 va_list ap;
3513
3514 va_start (ap, fmt);
3515 txt = styler->apply_style (styler, dis_style_address, fmt, ap);
3516 va_end (ap);
3517
3518 return txt;
3519 }
3520
3521 /* Produce the string representation of the register list operand *OPND
3522 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
3523 the register name that comes before the register number, such as "v". */
3524 static void
3525 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
3526 const char *prefix, struct aarch64_styler *styler)
3527 {
3528 const int mask = (prefix[0] == 'p' ? 15 : 31);
3529 const int num_regs = opnd->reglist.num_regs;
3530 const int stride = opnd->reglist.stride;
3531 const int first_reg = opnd->reglist.first_regno;
3532 const int last_reg = (first_reg + (num_regs - 1) * stride) & mask;
3533 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
3534 char tb[16]; /* Temporary buffer. */
3535
3536 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
3537 assert (num_regs >= 1 && num_regs <= 4);
3538
3539 /* Prepare the index if any. */
3540 if (opnd->reglist.has_index)
3541 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3542 snprintf (tb, sizeof (tb), "[%s]",
3543 style_imm (styler, "%" PRIi64, (opnd->reglist.index % 100)));
3544 else
3545 tb[0] = '\0';
3546
3547 /* The hyphenated form is preferred for disassembly if there are
3548 more than two registers in the list, and the register numbers
3549 are monotonically increasing in increments of one. */
3550 if (stride == 1 && num_regs > 1)
3551 snprintf (buf, size, "{%s-%s}%s",
3552 style_reg (styler, "%s%d.%s", prefix, first_reg, qlf_name),
3553 style_reg (styler, "%s%d.%s", prefix, last_reg, qlf_name), tb);
3554 else
3555 {
3556 const int reg0 = first_reg;
3557 const int reg1 = (first_reg + stride) & mask;
3558 const int reg2 = (first_reg + stride * 2) & mask;
3559 const int reg3 = (first_reg + stride * 3) & mask;
3560
3561 switch (num_regs)
3562 {
3563 case 1:
3564 snprintf (buf, size, "{%s}%s",
3565 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3566 tb);
3567 break;
3568 case 2:
3569 snprintf (buf, size, "{%s, %s}%s",
3570 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3571 style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3572 tb);
3573 break;
3574 case 3:
3575 snprintf (buf, size, "{%s, %s, %s}%s",
3576 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3577 style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3578 style_reg (styler, "%s%d.%s", prefix, reg2, qlf_name),
3579 tb);
3580 break;
3581 case 4:
3582 snprintf (buf, size, "{%s, %s, %s, %s}%s",
3583 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3584 style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3585 style_reg (styler, "%s%d.%s", prefix, reg2, qlf_name),
3586 style_reg (styler, "%s%d.%s", prefix, reg3, qlf_name),
3587 tb);
3588 break;
3589 }
3590 }
3591 }
3592
3593 /* Print the register+immediate address in OPND to BUF, which has SIZE
3594 characters. BASE is the name of the base register. */
3595
3596 static void
3597 print_immediate_offset_address (char *buf, size_t size,
3598 const aarch64_opnd_info *opnd,
3599 const char *base,
3600 struct aarch64_styler *styler)
3601 {
3602 if (opnd->addr.writeback)
3603 {
3604 if (opnd->addr.preind)
3605 {
3606 if (opnd->type == AARCH64_OPND_ADDR_SIMM10 && !opnd->addr.offset.imm)
3607 snprintf (buf, size, "[%s]!", style_reg (styler, base));
3608 else
3609 snprintf (buf, size, "[%s, %s]!",
3610 style_reg (styler, base),
3611 style_imm (styler, "#%d", opnd->addr.offset.imm));
3612 }
3613 else
3614 snprintf (buf, size, "[%s], %s",
3615 style_reg (styler, base),
3616 style_imm (styler, "#%d", opnd->addr.offset.imm));
3617 }
3618 else
3619 {
3620 if (opnd->shifter.operator_present)
3621 {
3622 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
3623 snprintf (buf, size, "[%s, %s, %s]",
3624 style_reg (styler, base),
3625 style_imm (styler, "#%d", opnd->addr.offset.imm),
3626 style_sub_mnem (styler, "mul vl"));
3627 }
3628 else if (opnd->addr.offset.imm)
3629 snprintf (buf, size, "[%s, %s]",
3630 style_reg (styler, base),
3631 style_imm (styler, "#%d", opnd->addr.offset.imm));
3632 else
3633 snprintf (buf, size, "[%s]", style_reg (styler, base));
3634 }
3635 }
3636
3637 /* Produce the string representation of the register offset address operand
3638 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
3639 the names of the base and offset registers. */
3640 static void
3641 print_register_offset_address (char *buf, size_t size,
3642 const aarch64_opnd_info *opnd,
3643 const char *base, const char *offset,
3644 struct aarch64_styler *styler)
3645 {
3646 char tb[32]; /* Temporary buffer. */
3647 bool print_extend_p = true;
3648 bool print_amount_p = true;
3649 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
3650
3651 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
3652 || !opnd->shifter.amount_present))
3653 {
3654 /* Not print the shift/extend amount when the amount is zero and
3655 when it is not the special case of 8-bit load/store instruction. */
3656 print_amount_p = false;
3657 /* Likewise, no need to print the shift operator LSL in such a
3658 situation. */
3659 if (opnd->shifter.kind == AARCH64_MOD_LSL)
3660 print_extend_p = false;
3661 }
3662
3663 /* Prepare for the extend/shift. */
3664 if (print_extend_p)
3665 {
3666 if (print_amount_p)
3667 snprintf (tb, sizeof (tb), ", %s %s",
3668 style_sub_mnem (styler, shift_name),
3669 style_imm (styler, "#%" PRIi64,
3670 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3671 (opnd->shifter.amount % 100)));
3672 else
3673 snprintf (tb, sizeof (tb), ", %s",
3674 style_sub_mnem (styler, shift_name));
3675 }
3676 else
3677 tb[0] = '\0';
3678
3679 snprintf (buf, size, "[%s, %s%s]", style_reg (styler, base),
3680 style_reg (styler, offset), tb);
3681 }
3682
3683 /* Print ZA tiles from imm8 in ZERO instruction.
3684
3685 The preferred disassembly of this instruction uses the shortest list of tile
3686 names that represent the encoded immediate mask.
3687
3688 For example:
3689 * An all-ones immediate is disassembled as {ZA}.
3690 * An all-zeros immediate is disassembled as an empty list { }.
3691 */
3692 static void
3693 print_sme_za_list (char *buf, size_t size, int mask,
3694 struct aarch64_styler *styler)
3695 {
3696 const char* zan[] = { "za", "za0.h", "za1.h", "za0.s",
3697 "za1.s", "za2.s", "za3.s", "za0.d",
3698 "za1.d", "za2.d", "za3.d", "za4.d",
3699 "za5.d", "za6.d", "za7.d", " " };
3700 const int zan_v[] = { 0xff, 0x55, 0xaa, 0x11,
3701 0x22, 0x44, 0x88, 0x01,
3702 0x02, 0x04, 0x08, 0x10,
3703 0x20, 0x40, 0x80, 0x00 };
3704 int i, k;
3705 const int ZAN_SIZE = sizeof(zan) / sizeof(zan[0]);
3706
3707 k = snprintf (buf, size, "{");
3708 for (i = 0; i < ZAN_SIZE; i++)
3709 {
3710 if ((mask & zan_v[i]) == zan_v[i])
3711 {
3712 mask &= ~zan_v[i];
3713 if (k > 1)
3714 k += snprintf (buf + k, size - k, ", ");
3715
3716 k += snprintf (buf + k, size - k, "%s", style_reg (styler, zan[i]));
3717 }
3718 if (mask == 0)
3719 break;
3720 }
3721 snprintf (buf + k, size - k, "}");
3722 }
3723
3724 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3725 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3726 PC, PCREL_P and ADDRESS are used to pass in and return information about
3727 the PC-relative address calculation, where the PC value is passed in
3728 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3729 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3730 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3731
3732 The function serves both the disassembler and the assembler diagnostics
3733 issuer, which is the reason why it lives in this file. */
3734
3735 void
3736 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3737 const aarch64_opcode *opcode,
3738 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3739 bfd_vma *address, char** notes,
3740 char *comment, size_t comment_size,
3741 aarch64_feature_set features,
3742 struct aarch64_styler *styler)
3743 {
3744 unsigned int i, num_conds;
3745 const char *name = NULL;
3746 const aarch64_opnd_info *opnd = opnds + idx;
3747 enum aarch64_modifier_kind kind;
3748 uint64_t addr, enum_value;
3749
3750 if (comment != NULL)
3751 {
3752 assert (comment_size > 0);
3753 comment[0] = '\0';
3754 }
3755 else
3756 assert (comment_size == 0);
3757
3758 buf[0] = '\0';
3759 if (pcrel_p)
3760 *pcrel_p = 0;
3761
3762 switch (opnd->type)
3763 {
3764 case AARCH64_OPND_Rd:
3765 case AARCH64_OPND_Rn:
3766 case AARCH64_OPND_Rm:
3767 case AARCH64_OPND_Rt:
3768 case AARCH64_OPND_Rt2:
3769 case AARCH64_OPND_Rs:
3770 case AARCH64_OPND_Ra:
3771 case AARCH64_OPND_Rt_LS64:
3772 case AARCH64_OPND_Rt_SYS:
3773 case AARCH64_OPND_PAIRREG:
3774 case AARCH64_OPND_SVE_Rm:
3775 case AARCH64_OPND_LSE128_Rt:
3776 case AARCH64_OPND_LSE128_Rt2:
3777 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3778 the <ic_op>, therefore we use opnd->present to override the
3779 generic optional-ness information. */
3780 if (opnd->type == AARCH64_OPND_Rt_SYS)
3781 {
3782 if (!opnd->present)
3783 break;
3784 }
3785 /* Omit the operand, e.g. RET. */
3786 else if (optional_operand_p (opcode, idx)
3787 && (opnd->reg.regno
3788 == get_optional_operand_default_value (opcode)))
3789 break;
3790 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3791 || opnd->qualifier == AARCH64_OPND_QLF_X);
3792 snprintf (buf, size, "%s",
3793 style_reg (styler, get_int_reg_name (opnd->reg.regno,
3794 opnd->qualifier, 0)));
3795 break;
3796
3797 case AARCH64_OPND_Rd_SP:
3798 case AARCH64_OPND_Rn_SP:
3799 case AARCH64_OPND_Rt_SP:
3800 case AARCH64_OPND_SVE_Rn_SP:
3801 case AARCH64_OPND_Rm_SP:
3802 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3803 || opnd->qualifier == AARCH64_OPND_QLF_WSP
3804 || opnd->qualifier == AARCH64_OPND_QLF_X
3805 || opnd->qualifier == AARCH64_OPND_QLF_SP);
3806 snprintf (buf, size, "%s",
3807 style_reg (styler, get_int_reg_name (opnd->reg.regno,
3808 opnd->qualifier, 1)));
3809 break;
3810
3811 case AARCH64_OPND_Rm_EXT:
3812 kind = opnd->shifter.kind;
3813 assert (idx == 1 || idx == 2);
3814 if ((aarch64_stack_pointer_p (opnds)
3815 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3816 && ((opnd->qualifier == AARCH64_OPND_QLF_W
3817 && opnds[0].qualifier == AARCH64_OPND_QLF_W
3818 && kind == AARCH64_MOD_UXTW)
3819 || (opnd->qualifier == AARCH64_OPND_QLF_X
3820 && kind == AARCH64_MOD_UXTX)))
3821 {
3822 /* 'LSL' is the preferred form in this case. */
3823 kind = AARCH64_MOD_LSL;
3824 if (opnd->shifter.amount == 0)
3825 {
3826 /* Shifter omitted. */
3827 snprintf (buf, size, "%s",
3828 style_reg (styler,
3829 get_int_reg_name (opnd->reg.regno,
3830 opnd->qualifier, 0)));
3831 break;
3832 }
3833 }
3834 if (opnd->shifter.amount)
3835 snprintf (buf, size, "%s, %s %s",
3836 style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
3837 style_sub_mnem (styler, aarch64_operand_modifiers[kind].name),
3838 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3839 else
3840 snprintf (buf, size, "%s, %s",
3841 style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
3842 style_sub_mnem (styler, aarch64_operand_modifiers[kind].name));
3843 break;
3844
3845 case AARCH64_OPND_Rm_SFT:
3846 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3847 || opnd->qualifier == AARCH64_OPND_QLF_X);
3848 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3849 snprintf (buf, size, "%s",
3850 style_reg (styler, get_int_reg_name (opnd->reg.regno,
3851 opnd->qualifier, 0)));
3852 else
3853 snprintf (buf, size, "%s, %s %s",
3854 style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
3855 style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name),
3856 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3857 break;
3858
3859 case AARCH64_OPND_Fd:
3860 case AARCH64_OPND_Fn:
3861 case AARCH64_OPND_Fm:
3862 case AARCH64_OPND_Fa:
3863 case AARCH64_OPND_Ft:
3864 case AARCH64_OPND_Ft2:
3865 case AARCH64_OPND_Sd:
3866 case AARCH64_OPND_Sn:
3867 case AARCH64_OPND_Sm:
3868 case AARCH64_OPND_SVE_VZn:
3869 case AARCH64_OPND_SVE_Vd:
3870 case AARCH64_OPND_SVE_Vm:
3871 case AARCH64_OPND_SVE_Vn:
3872 snprintf (buf, size, "%s",
3873 style_reg (styler, "%s%d",
3874 aarch64_get_qualifier_name (opnd->qualifier),
3875 opnd->reg.regno));
3876 break;
3877
3878 case AARCH64_OPND_Va:
3879 case AARCH64_OPND_Vd:
3880 case AARCH64_OPND_Vn:
3881 case AARCH64_OPND_Vm:
3882 snprintf (buf, size, "%s",
3883 style_reg (styler, "v%d.%s", opnd->reg.regno,
3884 aarch64_get_qualifier_name (opnd->qualifier)));
3885 break;
3886
3887 case AARCH64_OPND_Ed:
3888 case AARCH64_OPND_En:
3889 case AARCH64_OPND_Em:
3890 case AARCH64_OPND_Em16:
3891 case AARCH64_OPND_SM3_IMM2:
3892 snprintf (buf, size, "%s[%s]",
3893 style_reg (styler, "v%d.%s", opnd->reglane.regno,
3894 aarch64_get_qualifier_name (opnd->qualifier)),
3895 style_imm (styler, "%" PRIi64, opnd->reglane.index));
3896 break;
3897
3898 case AARCH64_OPND_VdD1:
3899 case AARCH64_OPND_VnD1:
3900 snprintf (buf, size, "%s[%s]",
3901 style_reg (styler, "v%d.d", opnd->reg.regno),
3902 style_imm (styler, "1"));
3903 break;
3904
3905 case AARCH64_OPND_LVn:
3906 case AARCH64_OPND_LVt:
3907 case AARCH64_OPND_LVt_AL:
3908 case AARCH64_OPND_LEt:
3909 print_register_list (buf, size, opnd, "v", styler);
3910 break;
3911
3912 case AARCH64_OPND_SVE_Pd:
3913 case AARCH64_OPND_SVE_Pg3:
3914 case AARCH64_OPND_SVE_Pg4_5:
3915 case AARCH64_OPND_SVE_Pg4_10:
3916 case AARCH64_OPND_SVE_Pg4_16:
3917 case AARCH64_OPND_SVE_Pm:
3918 case AARCH64_OPND_SVE_Pn:
3919 case AARCH64_OPND_SVE_Pt:
3920 case AARCH64_OPND_SME_Pm:
3921 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3922 snprintf (buf, size, "%s",
3923 style_reg (styler, "p%d", opnd->reg.regno));
3924 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3925 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3926 snprintf (buf, size, "%s",
3927 style_reg (styler, "p%d/%s", opnd->reg.regno,
3928 aarch64_get_qualifier_name (opnd->qualifier)));
3929 else
3930 snprintf (buf, size, "%s",
3931 style_reg (styler, "p%d.%s", opnd->reg.regno,
3932 aarch64_get_qualifier_name (opnd->qualifier)));
3933 break;
3934
3935 case AARCH64_OPND_SVE_PNd:
3936 case AARCH64_OPND_SVE_PNg4_10:
3937 case AARCH64_OPND_SVE_PNn:
3938 case AARCH64_OPND_SVE_PNt:
3939 case AARCH64_OPND_SME_PNd3:
3940 case AARCH64_OPND_SME_PNg3:
3941 case AARCH64_OPND_SME_PNn:
3942 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3943 snprintf (buf, size, "%s",
3944 style_reg (styler, "pn%d", opnd->reg.regno));
3945 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3946 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3947 snprintf (buf, size, "%s",
3948 style_reg (styler, "pn%d/%s", opnd->reg.regno,
3949 aarch64_get_qualifier_name (opnd->qualifier)));
3950 else
3951 snprintf (buf, size, "%s",
3952 style_reg (styler, "pn%d.%s", opnd->reg.regno,
3953 aarch64_get_qualifier_name (opnd->qualifier)));
3954 break;
3955
3956 case AARCH64_OPND_SME_Pdx2:
3957 case AARCH64_OPND_SME_PdxN:
3958 print_register_list (buf, size, opnd, "p", styler);
3959 break;
3960
3961 case AARCH64_OPND_SME_PNn3_INDEX1:
3962 case AARCH64_OPND_SME_PNn3_INDEX2:
3963 snprintf (buf, size, "%s[%s]",
3964 style_reg (styler, "pn%d", opnd->reglane.regno),
3965 style_imm (styler, "%" PRIi64, opnd->reglane.index));
3966 break;
3967
3968 case AARCH64_OPND_SVE_Za_5:
3969 case AARCH64_OPND_SVE_Za_16:
3970 case AARCH64_OPND_SVE_Zd:
3971 case AARCH64_OPND_SVE_Zm_5:
3972 case AARCH64_OPND_SVE_Zm_16:
3973 case AARCH64_OPND_SVE_Zn:
3974 case AARCH64_OPND_SVE_Zt:
3975 case AARCH64_OPND_SME_Zm:
3976 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3977 snprintf (buf, size, "%s", style_reg (styler, "z%d", opnd->reg.regno));
3978 else
3979 snprintf (buf, size, "%s",
3980 style_reg (styler, "z%d.%s", opnd->reg.regno,
3981 aarch64_get_qualifier_name (opnd->qualifier)));
3982 break;
3983
3984 case AARCH64_OPND_SVE_ZnxN:
3985 case AARCH64_OPND_SVE_ZtxN:
3986 case AARCH64_OPND_SME_Zdnx2:
3987 case AARCH64_OPND_SME_Zdnx4:
3988 case AARCH64_OPND_SME_Zmx2:
3989 case AARCH64_OPND_SME_Zmx4:
3990 case AARCH64_OPND_SME_Znx2:
3991 case AARCH64_OPND_SME_Znx4:
3992 case AARCH64_OPND_SME_Ztx2_STRIDED:
3993 case AARCH64_OPND_SME_Ztx4_STRIDED:
3994 print_register_list (buf, size, opnd, "z", styler);
3995 break;
3996
3997 case AARCH64_OPND_SVE_Zm3_INDEX:
3998 case AARCH64_OPND_SVE_Zm3_22_INDEX:
3999 case AARCH64_OPND_SVE_Zm3_19_INDEX:
4000 case AARCH64_OPND_SVE_Zm3_11_INDEX:
4001 case AARCH64_OPND_SVE_Zm4_11_INDEX:
4002 case AARCH64_OPND_SVE_Zm4_INDEX:
4003 case AARCH64_OPND_SVE_Zn_INDEX:
4004 case AARCH64_OPND_SME_Zm_INDEX1:
4005 case AARCH64_OPND_SME_Zm_INDEX2:
4006 case AARCH64_OPND_SME_Zm_INDEX3_1:
4007 case AARCH64_OPND_SME_Zm_INDEX3_2:
4008 case AARCH64_OPND_SME_Zm_INDEX3_10:
4009 case AARCH64_OPND_SME_Zm_INDEX4_1:
4010 case AARCH64_OPND_SME_Zm_INDEX4_10:
4011 case AARCH64_OPND_SME_Zn_INDEX1_16:
4012 case AARCH64_OPND_SME_Zn_INDEX2_15:
4013 case AARCH64_OPND_SME_Zn_INDEX2_16:
4014 case AARCH64_OPND_SME_Zn_INDEX3_14:
4015 case AARCH64_OPND_SME_Zn_INDEX3_15:
4016 case AARCH64_OPND_SME_Zn_INDEX4_14:
4017 snprintf (buf, size, "%s[%s]",
4018 (opnd->qualifier == AARCH64_OPND_QLF_NIL
4019 ? style_reg (styler, "z%d", opnd->reglane.regno)
4020 : style_reg (styler, "z%d.%s", opnd->reglane.regno,
4021 aarch64_get_qualifier_name (opnd->qualifier))),
4022 style_imm (styler, "%" PRIi64, opnd->reglane.index));
4023 break;
4024
4025 case AARCH64_OPND_SME_ZAda_2b:
4026 case AARCH64_OPND_SME_ZAda_3b:
4027 snprintf (buf, size, "%s",
4028 style_reg (styler, "za%d.%s", opnd->reg.regno,
4029 aarch64_get_qualifier_name (opnd->qualifier)));
4030 break;
4031
4032 case AARCH64_OPND_SME_ZA_HV_idx_src:
4033 case AARCH64_OPND_SME_ZA_HV_idx_srcxN:
4034 case AARCH64_OPND_SME_ZA_HV_idx_dest:
4035 case AARCH64_OPND_SME_ZA_HV_idx_destxN:
4036 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
4037 snprintf (buf, size, "%s%s[%s, %s%s%s%s%s]%s",
4038 opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "{" : "",
4039 style_reg (styler, "za%d%c.%s",
4040 opnd->indexed_za.regno,
4041 opnd->indexed_za.v == 1 ? 'v' : 'h',
4042 aarch64_get_qualifier_name (opnd->qualifier)),
4043 style_reg (styler, "w%d", opnd->indexed_za.index.regno),
4044 style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm),
4045 opnd->indexed_za.index.countm1 ? ":" : "",
4046 (opnd->indexed_za.index.countm1
4047 ? style_imm (styler, "%d",
4048 opnd->indexed_za.index.imm
4049 + opnd->indexed_za.index.countm1)
4050 : ""),
4051 opnd->indexed_za.group_size ? ", " : "",
4052 opnd->indexed_za.group_size == 2
4053 ? style_sub_mnem (styler, "vgx2")
4054 : opnd->indexed_za.group_size == 4
4055 ? style_sub_mnem (styler, "vgx4") : "",
4056 opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "}" : "");
4057 break;
4058
4059 case AARCH64_OPND_SME_list_of_64bit_tiles:
4060 print_sme_za_list (buf, size, opnd->reg.regno, styler);
4061 break;
4062
4063 case AARCH64_OPND_SME_ZA_array_off1x4:
4064 case AARCH64_OPND_SME_ZA_array_off2x2:
4065 case AARCH64_OPND_SME_ZA_array_off2x4:
4066 case AARCH64_OPND_SME_ZA_array_off3_0:
4067 case AARCH64_OPND_SME_ZA_array_off3_5:
4068 case AARCH64_OPND_SME_ZA_array_off3x2:
4069 case AARCH64_OPND_SME_ZA_array_off4:
4070 snprintf (buf, size, "%s[%s, %s%s%s%s%s]",
4071 style_reg (styler, "za%s%s",
4072 opnd->qualifier == AARCH64_OPND_QLF_NIL ? "" : ".",
4073 (opnd->qualifier == AARCH64_OPND_QLF_NIL
4074 ? ""
4075 : aarch64_get_qualifier_name (opnd->qualifier))),
4076 style_reg (styler, "w%d", opnd->indexed_za.index.regno),
4077 style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm),
4078 opnd->indexed_za.index.countm1 ? ":" : "",
4079 (opnd->indexed_za.index.countm1
4080 ? style_imm (styler, "%d",
4081 opnd->indexed_za.index.imm
4082 + opnd->indexed_za.index.countm1)
4083 : ""),
4084 opnd->indexed_za.group_size ? ", " : "",
4085 opnd->indexed_za.group_size == 2
4086 ? style_sub_mnem (styler, "vgx2")
4087 : opnd->indexed_za.group_size == 4
4088 ? style_sub_mnem (styler, "vgx4") : "");
4089 break;
4090
4091 case AARCH64_OPND_SME_SM_ZA:
4092 snprintf (buf, size, "%s",
4093 style_reg (styler, opnd->reg.regno == 's' ? "sm" : "za"));
4094 break;
4095
4096 case AARCH64_OPND_SME_PnT_Wm_imm:
4097 snprintf (buf, size, "%s[%s, %s]",
4098 style_reg (styler, "p%d.%s", opnd->indexed_za.regno,
4099 aarch64_get_qualifier_name (opnd->qualifier)),
4100 style_reg (styler, "w%d", opnd->indexed_za.index.regno),
4101 style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm));
4102 break;
4103
4104 case AARCH64_OPND_SME_VLxN_10:
4105 case AARCH64_OPND_SME_VLxN_13:
4106 enum_value = opnd->imm.value;
4107 assert (enum_value < ARRAY_SIZE (aarch64_sme_vlxn_array));
4108 snprintf (buf, size, "%s",
4109 style_sub_mnem (styler, aarch64_sme_vlxn_array[enum_value]));
4110 break;
4111
4112 case AARCH64_OPND_CRn:
4113 case AARCH64_OPND_CRm:
4114 snprintf (buf, size, "%s",
4115 style_reg (styler, "C%" PRIi64, opnd->imm.value));
4116 break;
4117
4118 case AARCH64_OPND_IDX:
4119 case AARCH64_OPND_MASK:
4120 case AARCH64_OPND_IMM:
4121 case AARCH64_OPND_IMM_2:
4122 case AARCH64_OPND_WIDTH:
4123 case AARCH64_OPND_UIMM3_OP1:
4124 case AARCH64_OPND_UIMM3_OP2:
4125 case AARCH64_OPND_BIT_NUM:
4126 case AARCH64_OPND_IMM_VLSL:
4127 case AARCH64_OPND_IMM_VLSR:
4128 case AARCH64_OPND_SHLL_IMM:
4129 case AARCH64_OPND_IMM0:
4130 case AARCH64_OPND_IMMR:
4131 case AARCH64_OPND_IMMS:
4132 case AARCH64_OPND_UNDEFINED:
4133 case AARCH64_OPND_FBITS:
4134 case AARCH64_OPND_TME_UIMM16:
4135 case AARCH64_OPND_SIMM5:
4136 case AARCH64_OPND_SME_SHRIMM4:
4137 case AARCH64_OPND_SME_SHRIMM5:
4138 case AARCH64_OPND_SVE_SHLIMM_PRED:
4139 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
4140 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
4141 case AARCH64_OPND_SVE_SHRIMM_PRED:
4142 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
4143 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
4144 case AARCH64_OPND_SVE_SIMM5:
4145 case AARCH64_OPND_SVE_SIMM5B:
4146 case AARCH64_OPND_SVE_SIMM6:
4147 case AARCH64_OPND_SVE_SIMM8:
4148 case AARCH64_OPND_SVE_UIMM3:
4149 case AARCH64_OPND_SVE_UIMM7:
4150 case AARCH64_OPND_SVE_UIMM8:
4151 case AARCH64_OPND_SVE_UIMM8_53:
4152 case AARCH64_OPND_IMM_ROT1:
4153 case AARCH64_OPND_IMM_ROT2:
4154 case AARCH64_OPND_IMM_ROT3:
4155 case AARCH64_OPND_SVE_IMM_ROT1:
4156 case AARCH64_OPND_SVE_IMM_ROT2:
4157 case AARCH64_OPND_SVE_IMM_ROT3:
4158 case AARCH64_OPND_CSSC_SIMM8:
4159 case AARCH64_OPND_CSSC_UIMM8:
4160 snprintf (buf, size, "%s",
4161 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4162 break;
4163
4164 case AARCH64_OPND_SVE_I1_HALF_ONE:
4165 case AARCH64_OPND_SVE_I1_HALF_TWO:
4166 case AARCH64_OPND_SVE_I1_ZERO_ONE:
4167 {
4168 single_conv_t c;
4169 c.i = opnd->imm.value;
4170 snprintf (buf, size, "%s", style_imm (styler, "#%.1f", c.f));
4171 break;
4172 }
4173
4174 case AARCH64_OPND_SVE_PATTERN:
4175 if (optional_operand_p (opcode, idx)
4176 && opnd->imm.value == get_optional_operand_default_value (opcode))
4177 break;
4178 enum_value = opnd->imm.value;
4179 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
4180 if (aarch64_sve_pattern_array[enum_value])
4181 snprintf (buf, size, "%s",
4182 style_reg (styler, aarch64_sve_pattern_array[enum_value]));
4183 else
4184 snprintf (buf, size, "%s",
4185 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4186 break;
4187
4188 case AARCH64_OPND_SVE_PATTERN_SCALED:
4189 if (optional_operand_p (opcode, idx)
4190 && !opnd->shifter.operator_present
4191 && opnd->imm.value == get_optional_operand_default_value (opcode))
4192 break;
4193 enum_value = opnd->imm.value;
4194 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
4195 if (aarch64_sve_pattern_array[opnd->imm.value])
4196 snprintf (buf, size, "%s",
4197 style_reg (styler,
4198 aarch64_sve_pattern_array[opnd->imm.value]));
4199 else
4200 snprintf (buf, size, "%s",
4201 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4202 if (opnd->shifter.operator_present)
4203 {
4204 size_t len = strlen (buf);
4205 const char *shift_name
4206 = aarch64_operand_modifiers[opnd->shifter.kind].name;
4207 snprintf (buf + len, size - len, ", %s %s",
4208 style_sub_mnem (styler, shift_name),
4209 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
4210 }
4211 break;
4212
4213 case AARCH64_OPND_SVE_PRFOP:
4214 enum_value = opnd->imm.value;
4215 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
4216 if (aarch64_sve_prfop_array[enum_value])
4217 snprintf (buf, size, "%s",
4218 style_reg (styler, aarch64_sve_prfop_array[enum_value]));
4219 else
4220 snprintf (buf, size, "%s",
4221 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4222 break;
4223
4224 case AARCH64_OPND_IMM_MOV:
4225 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
4226 {
4227 case 4: /* e.g. MOV Wd, #<imm32>. */
4228 {
4229 int imm32 = opnd->imm.value;
4230 snprintf (buf, size, "%s",
4231 style_imm (styler, "#0x%-20x", imm32));
4232 snprintf (comment, comment_size, "#%d", imm32);
4233 }
4234 break;
4235 case 8: /* e.g. MOV Xd, #<imm64>. */
4236 snprintf (buf, size, "%s", style_imm (styler, "#0x%-20" PRIx64,
4237 opnd->imm.value));
4238 snprintf (comment, comment_size, "#%" PRIi64, opnd->imm.value);
4239 break;
4240 default:
4241 snprintf (buf, size, "<invalid>");
4242 break;
4243 }
4244 break;
4245
4246 case AARCH64_OPND_FPIMM0:
4247 snprintf (buf, size, "%s", style_imm (styler, "#0.0"));
4248 break;
4249
4250 case AARCH64_OPND_LIMM:
4251 case AARCH64_OPND_AIMM:
4252 case AARCH64_OPND_HALF:
4253 case AARCH64_OPND_SVE_INV_LIMM:
4254 case AARCH64_OPND_SVE_LIMM:
4255 case AARCH64_OPND_SVE_LIMM_MOV:
4256 if (opnd->shifter.amount)
4257 snprintf (buf, size, "%s, %s %s",
4258 style_imm (styler, "#0x%" PRIx64, opnd->imm.value),
4259 style_sub_mnem (styler, "lsl"),
4260 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
4261 else
4262 snprintf (buf, size, "%s",
4263 style_imm (styler, "#0x%" PRIx64, opnd->imm.value));
4264 break;
4265
4266 case AARCH64_OPND_SIMD_IMM:
4267 case AARCH64_OPND_SIMD_IMM_SFT:
4268 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
4269 || opnd->shifter.kind == AARCH64_MOD_NONE)
4270 snprintf (buf, size, "%s",
4271 style_imm (styler, "#0x%" PRIx64, opnd->imm.value));
4272 else
4273 snprintf (buf, size, "%s, %s %s",
4274 style_imm (styler, "#0x%" PRIx64, opnd->imm.value),
4275 style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name),
4276 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
4277 break;
4278
4279 case AARCH64_OPND_SVE_AIMM:
4280 case AARCH64_OPND_SVE_ASIMM:
4281 if (opnd->shifter.amount)
4282 snprintf (buf, size, "%s, %s %s",
4283 style_imm (styler, "#%" PRIi64, opnd->imm.value),
4284 style_sub_mnem (styler, "lsl"),
4285 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
4286 else
4287 snprintf (buf, size, "%s",
4288 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4289 break;
4290
4291 case AARCH64_OPND_FPIMM:
4292 case AARCH64_OPND_SIMD_FPIMM:
4293 case AARCH64_OPND_SVE_FPIMM8:
4294 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
4295 {
4296 case 2: /* e.g. FMOV <Hd>, #<imm>. */
4297 {
4298 half_conv_t c;
4299 c.i = expand_fp_imm (2, opnd->imm.value);
4300 snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.f));
4301 }
4302 break;
4303 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
4304 {
4305 single_conv_t c;
4306 c.i = expand_fp_imm (4, opnd->imm.value);
4307 snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.f));
4308 }
4309 break;
4310 case 8: /* e.g. FMOV <Sd>, #<imm>. */
4311 {
4312 double_conv_t c;
4313 c.i = expand_fp_imm (8, opnd->imm.value);
4314 snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.d));
4315 }
4316 break;
4317 default:
4318 snprintf (buf, size, "<invalid>");
4319 break;
4320 }
4321 break;
4322
4323 case AARCH64_OPND_CCMP_IMM:
4324 case AARCH64_OPND_NZCV:
4325 case AARCH64_OPND_EXCEPTION:
4326 case AARCH64_OPND_UIMM4:
4327 case AARCH64_OPND_UIMM4_ADDG:
4328 case AARCH64_OPND_UIMM7:
4329 case AARCH64_OPND_UIMM10:
4330 if (optional_operand_p (opcode, idx)
4331 && (opnd->imm.value ==
4332 (int64_t) get_optional_operand_default_value (opcode)))
4333 /* Omit the operand, e.g. DCPS1. */
4334 break;
4335 snprintf (buf, size, "%s",
4336 style_imm (styler, "#0x%x", (unsigned int) opnd->imm.value));
4337 break;
4338
4339 case AARCH64_OPND_COND:
4340 case AARCH64_OPND_COND1:
4341 snprintf (buf, size, "%s",
4342 style_sub_mnem (styler, opnd->cond->names[0]));
4343 num_conds = ARRAY_SIZE (opnd->cond->names);
4344 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
4345 {
4346 size_t len = comment != NULL ? strlen (comment) : 0;
4347 if (i == 1)
4348 snprintf (comment + len, comment_size - len, "%s = %s",
4349 opnd->cond->names[0], opnd->cond->names[i]);
4350 else
4351 snprintf (comment + len, comment_size - len, ", %s",
4352 opnd->cond->names[i]);
4353 }
4354 break;
4355
4356 case AARCH64_OPND_ADDR_ADRP:
4357 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
4358 + opnd->imm.value;
4359 if (pcrel_p)
4360 *pcrel_p = 1;
4361 if (address)
4362 *address = addr;
4363 /* This is not necessary during the disassembling, as print_address_func
4364 in the disassemble_info will take care of the printing. But some
4365 other callers may be still interested in getting the string in *STR,
4366 so here we do snprintf regardless. */
4367 snprintf (buf, size, "%s", style_addr (styler, "#0x%" PRIx64 , addr));
4368 break;
4369
4370 case AARCH64_OPND_ADDR_PCREL14:
4371 case AARCH64_OPND_ADDR_PCREL19:
4372 case AARCH64_OPND_ADDR_PCREL21:
4373 case AARCH64_OPND_ADDR_PCREL26:
4374 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
4375 if (pcrel_p)
4376 *pcrel_p = 1;
4377 if (address)
4378 *address = addr;
4379 /* This is not necessary during the disassembling, as print_address_func
4380 in the disassemble_info will take care of the printing. But some
4381 other callers may be still interested in getting the string in *STR,
4382 so here we do snprintf regardless. */
4383 snprintf (buf, size, "%s", style_addr (styler, "#0x%" PRIx64, addr));
4384 break;
4385
4386 case AARCH64_OPND_ADDR_SIMPLE:
4387 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
4388 case AARCH64_OPND_SIMD_ADDR_POST:
4389 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
4390 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
4391 {
4392 if (opnd->addr.offset.is_reg)
4393 snprintf (buf, size, "[%s], %s",
4394 style_reg (styler, name),
4395 style_reg (styler, "x%d", opnd->addr.offset.regno));
4396 else
4397 snprintf (buf, size, "[%s], %s",
4398 style_reg (styler, name),
4399 style_imm (styler, "#%d", opnd->addr.offset.imm));
4400 }
4401 else
4402 snprintf (buf, size, "[%s]", style_reg (styler, name));
4403 break;
4404
4405 case AARCH64_OPND_ADDR_REGOFF:
4406 case AARCH64_OPND_SVE_ADDR_R:
4407 case AARCH64_OPND_SVE_ADDR_RR:
4408 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
4409 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
4410 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
4411 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
4412 case AARCH64_OPND_SVE_ADDR_RX:
4413 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
4414 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
4415 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
4416 print_register_offset_address
4417 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
4418 get_offset_int_reg_name (opnd), styler);
4419 break;
4420
4421 case AARCH64_OPND_SVE_ADDR_ZX:
4422 print_register_offset_address
4423 (buf, size, opnd,
4424 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
4425 get_64bit_int_reg_name (opnd->addr.offset.regno, 0), styler);
4426 break;
4427
4428 case AARCH64_OPND_SVE_ADDR_RZ:
4429 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
4430 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
4431 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
4432 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
4433 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
4434 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
4435 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
4436 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
4437 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
4438 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
4439 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
4440 print_register_offset_address
4441 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
4442 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier),
4443 styler);
4444 break;
4445
4446 case AARCH64_OPND_ADDR_SIMM7:
4447 case AARCH64_OPND_ADDR_SIMM9:
4448 case AARCH64_OPND_ADDR_SIMM9_2:
4449 case AARCH64_OPND_ADDR_SIMM10:
4450 case AARCH64_OPND_ADDR_SIMM11:
4451 case AARCH64_OPND_ADDR_SIMM13:
4452 case AARCH64_OPND_ADDR_OFFSET:
4453 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
4454 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
4455 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
4456 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
4457 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
4458 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
4459 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
4460 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
4461 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
4462 case AARCH64_OPND_SVE_ADDR_RI_U6:
4463 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
4464 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
4465 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
4466 print_immediate_offset_address
4467 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
4468 styler);
4469 break;
4470
4471 case AARCH64_OPND_SVE_ADDR_ZI_U5:
4472 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
4473 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
4474 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
4475 print_immediate_offset_address
4476 (buf, size, opnd,
4477 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
4478 styler);
4479 break;
4480
4481 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
4482 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
4483 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
4484 print_register_offset_address
4485 (buf, size, opnd,
4486 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
4487 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier),
4488 styler);
4489 break;
4490
4491 case AARCH64_OPND_ADDR_UIMM12:
4492 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
4493 if (opnd->addr.offset.imm)
4494 snprintf (buf, size, "[%s, %s]",
4495 style_reg (styler, name),
4496 style_imm (styler, "#%d", opnd->addr.offset.imm));
4497 else
4498 snprintf (buf, size, "[%s]", style_reg (styler, name));
4499 break;
4500
4501 case AARCH64_OPND_SYSREG:
4502 for (i = 0; aarch64_sys_regs[i].name; ++i)
4503 {
4504 const aarch64_sys_reg *sr = aarch64_sys_regs + i;
4505
4506 bool exact_match
4507 = (!(sr->flags & (F_REG_READ | F_REG_WRITE))
4508 || (sr->flags & opnd->sysreg.flags) == opnd->sysreg.flags)
4509 && AARCH64_CPU_HAS_ALL_FEATURES (features, sr->features);
4510
4511 /* Try and find an exact match, But if that fails, return the first
4512 partial match that was found. */
4513 if (aarch64_sys_regs[i].value == opnd->sysreg.value
4514 && ! aarch64_sys_reg_deprecated_p (aarch64_sys_regs[i].flags)
4515 && ! aarch64_sys_reg_alias_p (aarch64_sys_regs[i].flags)
4516 && (name == NULL || exact_match))
4517 {
4518 name = aarch64_sys_regs[i].name;
4519 if (exact_match)
4520 {
4521 if (notes)
4522 *notes = NULL;
4523 break;
4524 }
4525
4526 /* If we didn't match exactly, that means the presense of a flag
4527 indicates what we didn't want for this instruction. e.g. If
4528 F_REG_READ is there, that means we were looking for a write
4529 register. See aarch64_ext_sysreg. */
4530 if (aarch64_sys_regs[i].flags & F_REG_WRITE)
4531 *notes = _("reading from a write-only register");
4532 else if (aarch64_sys_regs[i].flags & F_REG_READ)
4533 *notes = _("writing to a read-only register");
4534 }
4535 }
4536
4537 if (name)
4538 snprintf (buf, size, "%s", style_reg (styler, name));
4539 else
4540 {
4541 /* Implementation defined system register. */
4542 unsigned int value = opnd->sysreg.value;
4543 snprintf (buf, size, "%s",
4544 style_reg (styler, "s%u_%u_c%u_c%u_%u",
4545 (value >> 14) & 0x3, (value >> 11) & 0x7,
4546 (value >> 7) & 0xf, (value >> 3) & 0xf,
4547 value & 0x7));
4548 }
4549 break;
4550
4551 case AARCH64_OPND_PSTATEFIELD:
4552 for (i = 0; aarch64_pstatefields[i].name; ++i)
4553 if (aarch64_pstatefields[i].value == opnd->pstatefield)
4554 {
4555 /* PSTATEFIELD name is encoded partially in CRm[3:1] for SVCRSM,
4556 SVCRZA and SVCRSMZA. */
4557 uint32_t flags = aarch64_pstatefields[i].flags;
4558 if (flags & F_REG_IN_CRM
4559 && (PSTATE_DECODE_CRM (opnd->sysreg.flags)
4560 != PSTATE_DECODE_CRM (flags)))
4561 continue;
4562 break;
4563 }
4564 assert (aarch64_pstatefields[i].name);
4565 snprintf (buf, size, "%s",
4566 style_reg (styler, aarch64_pstatefields[i].name));
4567 break;
4568
4569 case AARCH64_OPND_SYSREG_AT:
4570 case AARCH64_OPND_SYSREG_DC:
4571 case AARCH64_OPND_SYSREG_IC:
4572 case AARCH64_OPND_SYSREG_TLBI:
4573 case AARCH64_OPND_SYSREG_SR:
4574 snprintf (buf, size, "%s", style_reg (styler, opnd->sysins_op->name));
4575 break;
4576
4577 case AARCH64_OPND_BARRIER:
4578 case AARCH64_OPND_BARRIER_DSB_NXS:
4579 {
4580 if (opnd->barrier->name[0] == '#')
4581 snprintf (buf, size, "%s", style_imm (styler, opnd->barrier->name));
4582 else
4583 snprintf (buf, size, "%s",
4584 style_sub_mnem (styler, opnd->barrier->name));
4585 }
4586 break;
4587
4588 case AARCH64_OPND_BARRIER_ISB:
4589 /* Operand can be omitted, e.g. in DCPS1. */
4590 if (! optional_operand_p (opcode, idx)
4591 || (opnd->barrier->value
4592 != get_optional_operand_default_value (opcode)))
4593 snprintf (buf, size, "%s",
4594 style_imm (styler, "#0x%x", opnd->barrier->value));
4595 break;
4596
4597 case AARCH64_OPND_PRFOP:
4598 if (opnd->prfop->name != NULL)
4599 snprintf (buf, size, "%s", style_sub_mnem (styler, opnd->prfop->name));
4600 else
4601 snprintf (buf, size, "%s", style_imm (styler, "#0x%02x",
4602 opnd->prfop->value));
4603 break;
4604
4605 case AARCH64_OPND_RPRFMOP:
4606 enum_value = opnd->imm.value;
4607 if (enum_value < ARRAY_SIZE (aarch64_rprfmop_array)
4608 && aarch64_rprfmop_array[enum_value])
4609 snprintf (buf, size, "%s",
4610 style_reg (styler, aarch64_rprfmop_array[enum_value]));
4611 else
4612 snprintf (buf, size, "%s",
4613 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4614 break;
4615
4616 case AARCH64_OPND_BARRIER_PSB:
4617 snprintf (buf, size, "%s", style_sub_mnem (styler, "csync"));
4618 break;
4619
4620 case AARCH64_OPND_X16:
4621 snprintf (buf, size, "%s", style_reg (styler, "x16"));
4622 break;
4623
4624 case AARCH64_OPND_SME_ZT0:
4625 snprintf (buf, size, "%s", style_reg (styler, "zt0"));
4626 break;
4627
4628 case AARCH64_OPND_SME_ZT0_INDEX:
4629 snprintf (buf, size, "%s[%s]", style_reg (styler, "zt0"),
4630 style_imm (styler, "%d", (int) opnd->imm.value));
4631 break;
4632
4633 case AARCH64_OPND_SME_ZT0_LIST:
4634 snprintf (buf, size, "{%s}", style_reg (styler, "zt0"));
4635 break;
4636
4637 case AARCH64_OPND_BARRIER_GCSB:
4638 snprintf (buf, size, "%s", style_sub_mnem (styler, "dsync"));
4639 break;
4640
4641 case AARCH64_OPND_BTI_TARGET:
4642 if ((HINT_FLAG (opnd->hint_option->value) & HINT_OPD_F_NOPRINT) == 0)
4643 snprintf (buf, size, "%s",
4644 style_sub_mnem (styler, opnd->hint_option->name));
4645 break;
4646
4647 case AARCH64_OPND_MOPS_ADDR_Rd:
4648 case AARCH64_OPND_MOPS_ADDR_Rs:
4649 snprintf (buf, size, "[%s]!",
4650 style_reg (styler,
4651 get_int_reg_name (opnd->reg.regno,
4652 AARCH64_OPND_QLF_X, 0)));
4653 break;
4654
4655 case AARCH64_OPND_MOPS_WB_Rn:
4656 snprintf (buf, size, "%s!",
4657 style_reg (styler, get_int_reg_name (opnd->reg.regno,
4658 AARCH64_OPND_QLF_X, 0)));
4659 break;
4660
4661 default:
4662 snprintf (buf, size, "<invalid>");
4663 break;
4664 }
4665 }
4666 \f
4667 #define CPENC(op0,op1,crn,crm,op2) \
4668 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
4669 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
4670 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
4671 /* for 3.9.10 System Instructions */
4672 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
4673
4674 #define C0 0
4675 #define C1 1
4676 #define C2 2
4677 #define C3 3
4678 #define C4 4
4679 #define C5 5
4680 #define C6 6
4681 #define C7 7
4682 #define C8 8
4683 #define C9 9
4684 #define C10 10
4685 #define C11 11
4686 #define C12 12
4687 #define C13 13
4688 #define C14 14
4689 #define C15 15
4690
4691 /* TODO there is one more issues need to be resolved
4692 1. handle cpu-implementation-defined system registers.
4693
4694 Note that the F_REG_{READ,WRITE} flags mean read-only and write-only
4695 respectively. If neither of these are set then the register is read-write. */
4696 const aarch64_sys_reg aarch64_sys_regs [] =
4697 {
4698 #define SYSREG(name, encoding, flags, features) \
4699 { name, encoding, flags, features },
4700 #include "aarch64-sys-regs.def"
4701 { 0, CPENC (0,0,0,0,0), 0, AARCH64_NO_FEATURES }
4702 #undef SYSREG
4703 };
4704
4705 bool
4706 aarch64_sys_reg_deprecated_p (const uint32_t reg_flags)
4707 {
4708 return (reg_flags & F_DEPRECATED) != 0;
4709 }
4710
4711 bool
4712 aarch64_sys_reg_alias_p (const uint32_t reg_flags)
4713 {
4714 return (reg_flags & F_REG_ALIAS) != 0;
4715 }
4716
4717 /* The CPENC below is fairly misleading, the fields
4718 here are not in CPENC form. They are in op2op1 form. The fields are encoded
4719 by ins_pstatefield, which just shifts the value by the width of the fields
4720 in a loop. So if you CPENC them only the first value will be set, the rest
4721 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
4722 value of 0b110000000001000000 (0x30040) while what you want is
4723 0b011010 (0x1a). */
4724 const aarch64_sys_reg aarch64_pstatefields [] =
4725 {
4726 { "spsel", 0x05, F_REG_MAX_VALUE (1), AARCH64_NO_FEATURES },
4727 { "daifset", 0x1e, F_REG_MAX_VALUE (15), AARCH64_NO_FEATURES },
4728 { "daifclr", 0x1f, F_REG_MAX_VALUE (15), AARCH64_NO_FEATURES },
4729 { "pan", 0x04, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (PAN) },
4730 { "uao", 0x03, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (V8_2A) },
4731 { "ssbs", 0x19, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (SSBS) },
4732 { "dit", 0x1a, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (V8_4A) },
4733 { "tco", 0x1c, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4734 { "svcrsm", 0x1b, PSTATE_ENCODE_CRM_AND_IMM (0x2,0x1) | F_REG_MAX_VALUE (1)
4735 | F_ARCHEXT, AARCH64_FEATURE (SME) },
4736 { "svcrza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM (0x4,0x1) | F_REG_MAX_VALUE (1)
4737 | F_ARCHEXT, AARCH64_FEATURE (SME) },
4738 { "svcrsmza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM (0x6,0x1) | F_REG_MAX_VALUE (1)
4739 | F_ARCHEXT, AARCH64_FEATURE (SME) },
4740 { "allint", 0x08, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (V8_8A) },
4741 { 0, CPENC (0,0,0,0,0), 0, AARCH64_NO_FEATURES },
4742 };
4743
4744 bool
4745 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
4746 const aarch64_sys_reg *reg)
4747 {
4748 if (!(reg->flags & F_ARCHEXT))
4749 return true;
4750
4751 return AARCH64_CPU_HAS_ALL_FEATURES (features, reg->features);
4752 }
4753
4754 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
4755 {
4756 { "ialluis", CPENS(0,C7,C1,0), 0 },
4757 { "iallu", CPENS(0,C7,C5,0), 0 },
4758 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
4759 { 0, CPENS(0,0,0,0), 0 }
4760 };
4761
4762 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
4763 {
4764 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
4765 { "gva", CPENS (3, C7, C4, 3), F_HASXT | F_ARCHEXT },
4766 { "gzva", CPENS (3, C7, C4, 4), F_HASXT | F_ARCHEXT },
4767 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
4768 { "igvac", CPENS (0, C7, C6, 3), F_HASXT | F_ARCHEXT },
4769 { "igsw", CPENS (0, C7, C6, 4), F_HASXT | F_ARCHEXT },
4770 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
4771 { "igdvac", CPENS (0, C7, C6, 5), F_HASXT | F_ARCHEXT },
4772 { "igdsw", CPENS (0, C7, C6, 6), F_HASXT | F_ARCHEXT },
4773 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
4774 { "cgvac", CPENS (3, C7, C10, 3), F_HASXT | F_ARCHEXT },
4775 { "cgdvac", CPENS (3, C7, C10, 5), F_HASXT | F_ARCHEXT },
4776 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
4777 { "cgsw", CPENS (0, C7, C10, 4), F_HASXT | F_ARCHEXT },
4778 { "cgdsw", CPENS (0, C7, C10, 6), F_HASXT | F_ARCHEXT },
4779 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
4780 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
4781 { "cgvap", CPENS (3, C7, C12, 3), F_HASXT | F_ARCHEXT },
4782 { "cgdvap", CPENS (3, C7, C12, 5), F_HASXT | F_ARCHEXT },
4783 { "cvadp", CPENS (3, C7, C13, 1), F_HASXT | F_ARCHEXT },
4784 { "cgvadp", CPENS (3, C7, C13, 3), F_HASXT | F_ARCHEXT },
4785 { "cgdvadp", CPENS (3, C7, C13, 5), F_HASXT | F_ARCHEXT },
4786 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
4787 { "cigvac", CPENS (3, C7, C14, 3), F_HASXT | F_ARCHEXT },
4788 { "cigdvac", CPENS (3, C7, C14, 5), F_HASXT | F_ARCHEXT },
4789 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
4790 { "cigsw", CPENS (0, C7, C14, 4), F_HASXT | F_ARCHEXT },
4791 { "cigdsw", CPENS (0, C7, C14, 6), F_HASXT | F_ARCHEXT },
4792 { "cipapa", CPENS (6, C7, C14, 1), F_HASXT },
4793 { "cigdpapa", CPENS (6, C7, C14, 5), F_HASXT },
4794 { 0, CPENS(0,0,0,0), 0 }
4795 };
4796
4797 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4798 {
4799 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
4800 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
4801 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
4802 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
4803 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
4804 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
4805 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
4806 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
4807 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
4808 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
4809 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
4810 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
4811 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
4812 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
4813 { 0, CPENS(0,0,0,0), 0 }
4814 };
4815
4816 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
4817 {
4818 { "vmalle1", CPENS(0,C8,C7,0), 0 },
4819 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
4820 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
4821 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
4822 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
4823 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
4824 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
4825 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
4826 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
4827 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
4828 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
4829 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
4830 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
4831 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
4832 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
4833 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
4834 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
4835 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
4836 { "alle2", CPENS(4,C8,C7,0), 0 },
4837 { "alle2is", CPENS(4,C8,C3,0), 0 },
4838 { "alle1", CPENS(4,C8,C7,4), 0 },
4839 { "alle1is", CPENS(4,C8,C3,4), 0 },
4840 { "alle3", CPENS(6,C8,C7,0), 0 },
4841 { "alle3is", CPENS(6,C8,C3,0), 0 },
4842 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
4843 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
4844 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
4845 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
4846 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
4847 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
4848 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
4849 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
4850
4851 { "vmalle1os", CPENS (0, C8, C1, 0), F_ARCHEXT },
4852 { "vae1os", CPENS (0, C8, C1, 1), F_HASXT | F_ARCHEXT },
4853 { "aside1os", CPENS (0, C8, C1, 2), F_HASXT | F_ARCHEXT },
4854 { "vaae1os", CPENS (0, C8, C1, 3), F_HASXT | F_ARCHEXT },
4855 { "vale1os", CPENS (0, C8, C1, 5), F_HASXT | F_ARCHEXT },
4856 { "vaale1os", CPENS (0, C8, C1, 7), F_HASXT | F_ARCHEXT },
4857 { "ipas2e1os", CPENS (4, C8, C4, 0), F_HASXT | F_ARCHEXT },
4858 { "ipas2le1os", CPENS (4, C8, C4, 4), F_HASXT | F_ARCHEXT },
4859 { "vae2os", CPENS (4, C8, C1, 1), F_HASXT | F_ARCHEXT },
4860 { "vale2os", CPENS (4, C8, C1, 5), F_HASXT | F_ARCHEXT },
4861 { "vmalls12e1os", CPENS (4, C8, C1, 6), F_ARCHEXT },
4862 { "vae3os", CPENS (6, C8, C1, 1), F_HASXT | F_ARCHEXT },
4863 { "vale3os", CPENS (6, C8, C1, 5), F_HASXT | F_ARCHEXT },
4864 { "alle2os", CPENS (4, C8, C1, 0), F_ARCHEXT },
4865 { "alle1os", CPENS (4, C8, C1, 4), F_ARCHEXT },
4866 { "alle3os", CPENS (6, C8, C1, 0), F_ARCHEXT },
4867
4868 { "rvae1", CPENS (0, C8, C6, 1), F_HASXT | F_ARCHEXT },
4869 { "rvaae1", CPENS (0, C8, C6, 3), F_HASXT | F_ARCHEXT },
4870 { "rvale1", CPENS (0, C8, C6, 5), F_HASXT | F_ARCHEXT },
4871 { "rvaale1", CPENS (0, C8, C6, 7), F_HASXT | F_ARCHEXT },
4872 { "rvae1is", CPENS (0, C8, C2, 1), F_HASXT | F_ARCHEXT },
4873 { "rvaae1is", CPENS (0, C8, C2, 3), F_HASXT | F_ARCHEXT },
4874 { "rvale1is", CPENS (0, C8, C2, 5), F_HASXT | F_ARCHEXT },
4875 { "rvaale1is", CPENS (0, C8, C2, 7), F_HASXT | F_ARCHEXT },
4876 { "rvae1os", CPENS (0, C8, C5, 1), F_HASXT | F_ARCHEXT },
4877 { "rvaae1os", CPENS (0, C8, C5, 3), F_HASXT | F_ARCHEXT },
4878 { "rvale1os", CPENS (0, C8, C5, 5), F_HASXT | F_ARCHEXT },
4879 { "rvaale1os", CPENS (0, C8, C5, 7), F_HASXT | F_ARCHEXT },
4880 { "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_ARCHEXT },
4881 { "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_ARCHEXT },
4882 { "ripas2e1", CPENS (4, C8, C4, 2), F_HASXT | F_ARCHEXT },
4883 { "ripas2le1", CPENS (4, C8, C4, 6), F_HASXT | F_ARCHEXT },
4884 { "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_ARCHEXT },
4885 { "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_ARCHEXT },
4886 { "rvae2", CPENS (4, C8, C6, 1), F_HASXT | F_ARCHEXT },
4887 { "rvale2", CPENS (4, C8, C6, 5), F_HASXT | F_ARCHEXT },
4888 { "rvae2is", CPENS (4, C8, C2, 1), F_HASXT | F_ARCHEXT },
4889 { "rvale2is", CPENS (4, C8, C2, 5), F_HASXT | F_ARCHEXT },
4890 { "rvae2os", CPENS (4, C8, C5, 1), F_HASXT | F_ARCHEXT },
4891 { "rvale2os", CPENS (4, C8, C5, 5), F_HASXT | F_ARCHEXT },
4892 { "rvae3", CPENS (6, C8, C6, 1), F_HASXT | F_ARCHEXT },
4893 { "rvale3", CPENS (6, C8, C6, 5), F_HASXT | F_ARCHEXT },
4894 { "rvae3is", CPENS (6, C8, C2, 1), F_HASXT | F_ARCHEXT },
4895 { "rvale3is", CPENS (6, C8, C2, 5), F_HASXT | F_ARCHEXT },
4896 { "rvae3os", CPENS (6, C8, C5, 1), F_HASXT | F_ARCHEXT },
4897 { "rvale3os", CPENS (6, C8, C5, 5), F_HASXT | F_ARCHEXT },
4898
4899 { "rpaos", CPENS (6, C8, C4, 3), F_HASXT },
4900 { "rpalos", CPENS (6, C8, C4, 7), F_HASXT },
4901 { "paallos", CPENS (6, C8, C1, 4), 0},
4902 { "paall", CPENS (6, C8, C7, 4), 0},
4903
4904 { 0, CPENS(0,0,0,0), 0 }
4905 };
4906
4907 const aarch64_sys_ins_reg aarch64_sys_regs_sr[] =
4908 {
4909 /* RCTX is somewhat unique in a way that it has different values
4910 (op2) based on the instruction in which it is used (cfp/dvp/cpp).
4911 Thus op2 is masked out and instead encoded directly in the
4912 aarch64_opcode_table entries for the respective instructions. */
4913 { "rctx", CPENS(3,C7,C3,0), F_HASXT | F_ARCHEXT | F_REG_WRITE}, /* WO */
4914
4915 { 0, CPENS(0,0,0,0), 0 }
4916 };
4917
4918 bool
4919 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
4920 {
4921 return (sys_ins_reg->flags & F_HASXT) != 0;
4922 }
4923
4924 extern bool
4925 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
4926 const char *reg_name,
4927 aarch64_insn reg_value,
4928 uint32_t reg_flags,
4929 const aarch64_feature_set *reg_features)
4930 {
4931 /* Armv8-R has no EL3. */
4932 if (AARCH64_CPU_HAS_FEATURE (features, V8R))
4933 {
4934 const char *suffix = strrchr (reg_name, '_');
4935 if (suffix && !strcmp (suffix, "_el3"))
4936 return false;
4937 }
4938
4939 if (!(reg_flags & F_ARCHEXT))
4940 return true;
4941
4942 if (reg_features
4943 && AARCH64_CPU_HAS_ALL_FEATURES (features, *reg_features))
4944 return true;
4945
4946 /* ARMv8.4 TLB instructions. */
4947 if ((reg_value == CPENS (0, C8, C1, 0)
4948 || reg_value == CPENS (0, C8, C1, 1)
4949 || reg_value == CPENS (0, C8, C1, 2)
4950 || reg_value == CPENS (0, C8, C1, 3)
4951 || reg_value == CPENS (0, C8, C1, 5)
4952 || reg_value == CPENS (0, C8, C1, 7)
4953 || reg_value == CPENS (4, C8, C4, 0)
4954 || reg_value == CPENS (4, C8, C4, 4)
4955 || reg_value == CPENS (4, C8, C1, 1)
4956 || reg_value == CPENS (4, C8, C1, 5)
4957 || reg_value == CPENS (4, C8, C1, 6)
4958 || reg_value == CPENS (6, C8, C1, 1)
4959 || reg_value == CPENS (6, C8, C1, 5)
4960 || reg_value == CPENS (4, C8, C1, 0)
4961 || reg_value == CPENS (4, C8, C1, 4)
4962 || reg_value == CPENS (6, C8, C1, 0)
4963 || reg_value == CPENS (0, C8, C6, 1)
4964 || reg_value == CPENS (0, C8, C6, 3)
4965 || reg_value == CPENS (0, C8, C6, 5)
4966 || reg_value == CPENS (0, C8, C6, 7)
4967 || reg_value == CPENS (0, C8, C2, 1)
4968 || reg_value == CPENS (0, C8, C2, 3)
4969 || reg_value == CPENS (0, C8, C2, 5)
4970 || reg_value == CPENS (0, C8, C2, 7)
4971 || reg_value == CPENS (0, C8, C5, 1)
4972 || reg_value == CPENS (0, C8, C5, 3)
4973 || reg_value == CPENS (0, C8, C5, 5)
4974 || reg_value == CPENS (0, C8, C5, 7)
4975 || reg_value == CPENS (4, C8, C0, 2)
4976 || reg_value == CPENS (4, C8, C0, 6)
4977 || reg_value == CPENS (4, C8, C4, 2)
4978 || reg_value == CPENS (4, C8, C4, 6)
4979 || reg_value == CPENS (4, C8, C4, 3)
4980 || reg_value == CPENS (4, C8, C4, 7)
4981 || reg_value == CPENS (4, C8, C6, 1)
4982 || reg_value == CPENS (4, C8, C6, 5)
4983 || reg_value == CPENS (4, C8, C2, 1)
4984 || reg_value == CPENS (4, C8, C2, 5)
4985 || reg_value == CPENS (4, C8, C5, 1)
4986 || reg_value == CPENS (4, C8, C5, 5)
4987 || reg_value == CPENS (6, C8, C6, 1)
4988 || reg_value == CPENS (6, C8, C6, 5)
4989 || reg_value == CPENS (6, C8, C2, 1)
4990 || reg_value == CPENS (6, C8, C2, 5)
4991 || reg_value == CPENS (6, C8, C5, 1)
4992 || reg_value == CPENS (6, C8, C5, 5))
4993 && AARCH64_CPU_HAS_FEATURE (features, V8_4A))
4994 return true;
4995
4996 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4997 if (reg_value == CPENS (3, C7, C12, 1)
4998 && AARCH64_CPU_HAS_FEATURE (features, V8_2A))
4999 return true;
5000
5001 /* DC CVADP. Values are from aarch64_sys_regs_dc. */
5002 if (reg_value == CPENS (3, C7, C13, 1)
5003 && AARCH64_CPU_HAS_FEATURE (features, CVADP))
5004 return true;
5005
5006 /* DC <dc_op> for ARMv8.5-A Memory Tagging Extension. */
5007 if ((reg_value == CPENS (0, C7, C6, 3)
5008 || reg_value == CPENS (0, C7, C6, 4)
5009 || reg_value == CPENS (0, C7, C10, 4)
5010 || reg_value == CPENS (0, C7, C14, 4)
5011 || reg_value == CPENS (3, C7, C10, 3)
5012 || reg_value == CPENS (3, C7, C12, 3)
5013 || reg_value == CPENS (3, C7, C13, 3)
5014 || reg_value == CPENS (3, C7, C14, 3)
5015 || reg_value == CPENS (3, C7, C4, 3)
5016 || reg_value == CPENS (0, C7, C6, 5)
5017 || reg_value == CPENS (0, C7, C6, 6)
5018 || reg_value == CPENS (0, C7, C10, 6)
5019 || reg_value == CPENS (0, C7, C14, 6)
5020 || reg_value == CPENS (3, C7, C10, 5)
5021 || reg_value == CPENS (3, C7, C12, 5)
5022 || reg_value == CPENS (3, C7, C13, 5)
5023 || reg_value == CPENS (3, C7, C14, 5)
5024 || reg_value == CPENS (3, C7, C4, 4))
5025 && AARCH64_CPU_HAS_FEATURE (features, MEMTAG))
5026 return true;
5027
5028 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
5029 if ((reg_value == CPENS (0, C7, C9, 0)
5030 || reg_value == CPENS (0, C7, C9, 1))
5031 && AARCH64_CPU_HAS_FEATURE (features, V8_2A))
5032 return true;
5033
5034 /* CFP/DVP/CPP RCTX : Value are from aarch64_sys_regs_sr. */
5035 if (reg_value == CPENS (3, C7, C3, 0)
5036 && AARCH64_CPU_HAS_FEATURE (features, PREDRES))
5037 return true;
5038
5039 if ((reg_value == CPENC (3,0,13,0,3)
5040 || CPENC (3,0,13,0,6))
5041 && AARCH64_CPU_HAS_FEATURE (features, THE))
5042 return true;
5043
5044 return false;
5045 }
5046
5047 #undef C0
5048 #undef C1
5049 #undef C2
5050 #undef C3
5051 #undef C4
5052 #undef C5
5053 #undef C6
5054 #undef C7
5055 #undef C8
5056 #undef C9
5057 #undef C10
5058 #undef C11
5059 #undef C12
5060 #undef C13
5061 #undef C14
5062 #undef C15
5063
5064 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
5065 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
5066
5067 static enum err_type
5068 verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
5069 const aarch64_insn insn, bfd_vma pc ATTRIBUTE_UNUSED,
5070 bool encoding ATTRIBUTE_UNUSED,
5071 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
5072 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
5073 {
5074 int t = BITS (insn, 4, 0);
5075 int n = BITS (insn, 9, 5);
5076 int t2 = BITS (insn, 14, 10);
5077
5078 if (BIT (insn, 23))
5079 {
5080 /* Write back enabled. */
5081 if ((t == n || t2 == n) && n != 31)
5082 return ERR_UND;
5083 }
5084
5085 if (BIT (insn, 22))
5086 {
5087 /* Load */
5088 if (t == t2)
5089 return ERR_UND;
5090 }
5091
5092 return ERR_OK;
5093 }
5094
5095 /* Verifier for vector by element 3 operands functions where the
5096 conditions `if sz:L == 11 then UNDEFINED` holds. */
5097
5098 static enum err_type
5099 verify_elem_sd (const struct aarch64_inst *inst, const aarch64_insn insn,
5100 bfd_vma pc ATTRIBUTE_UNUSED, bool encoding,
5101 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
5102 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
5103 {
5104 const aarch64_insn undef_pattern = 0x3;
5105 aarch64_insn value;
5106
5107 assert (inst->opcode);
5108 assert (inst->opcode->operands[2] == AARCH64_OPND_Em);
5109 value = encoding ? inst->value : insn;
5110 assert (value);
5111
5112 if (undef_pattern == extract_fields (value, 0, 2, FLD_sz, FLD_L))
5113 return ERR_UND;
5114
5115 return ERR_OK;
5116 }
5117
5118 /* Check an instruction that takes three register operands and that
5119 requires the register numbers to be distinct from one another. */
5120
5121 static enum err_type
5122 verify_three_different_regs (const struct aarch64_inst *inst,
5123 const aarch64_insn insn ATTRIBUTE_UNUSED,
5124 bfd_vma pc ATTRIBUTE_UNUSED,
5125 bool encoding ATTRIBUTE_UNUSED,
5126 aarch64_operand_error *mismatch_detail
5127 ATTRIBUTE_UNUSED,
5128 aarch64_instr_sequence *insn_sequence
5129 ATTRIBUTE_UNUSED)
5130 {
5131 int rd, rs, rn;
5132
5133 rd = inst->operands[0].reg.regno;
5134 rs = inst->operands[1].reg.regno;
5135 rn = inst->operands[2].reg.regno;
5136 if (rd == rs || rd == rn || rs == rn)
5137 {
5138 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5139 mismatch_detail->error
5140 = _("the three register operands must be distinct from one another");
5141 mismatch_detail->index = -1;
5142 return ERR_UND;
5143 }
5144
5145 return ERR_OK;
5146 }
5147
5148 /* Add INST to the end of INSN_SEQUENCE. */
5149
5150 static void
5151 add_insn_to_sequence (const struct aarch64_inst *inst,
5152 aarch64_instr_sequence *insn_sequence)
5153 {
5154 insn_sequence->instr[insn_sequence->num_added_insns++] = *inst;
5155 }
5156
5157 /* Initialize an instruction sequence insn_sequence with the instruction INST.
5158 If INST is NULL the given insn_sequence is cleared and the sequence is left
5159 uninitialized. */
5160
5161 void
5162 init_insn_sequence (const struct aarch64_inst *inst,
5163 aarch64_instr_sequence *insn_sequence)
5164 {
5165 int num_req_entries = 0;
5166
5167 if (insn_sequence->instr)
5168 {
5169 XDELETE (insn_sequence->instr);
5170 insn_sequence->instr = NULL;
5171 }
5172
5173 /* Handle all the cases here. May need to think of something smarter than
5174 a giant if/else chain if this grows. At that time, a lookup table may be
5175 best. */
5176 if (inst && inst->opcode->constraints & C_SCAN_MOVPRFX)
5177 num_req_entries = 1;
5178 if (inst && (inst->opcode->constraints & C_SCAN_MOPS_PME) == C_SCAN_MOPS_P)
5179 num_req_entries = 2;
5180
5181 insn_sequence->num_added_insns = 0;
5182 insn_sequence->num_allocated_insns = num_req_entries;
5183
5184 if (num_req_entries != 0)
5185 {
5186 insn_sequence->instr = XCNEWVEC (aarch64_inst, num_req_entries);
5187 add_insn_to_sequence (inst, insn_sequence);
5188 }
5189 }
5190
5191 /* Subroutine of verify_constraints. Check whether the instruction
5192 is part of a MOPS P/M/E sequence and, if so, whether sequencing
5193 expectations are met. Return true if the check passes, otherwise
5194 describe the problem in MISMATCH_DETAIL.
5195
5196 IS_NEW_SECTION is true if INST is assumed to start a new section.
5197 The other arguments are as for verify_constraints. */
5198
5199 static bool
5200 verify_mops_pme_sequence (const struct aarch64_inst *inst,
5201 bool is_new_section,
5202 aarch64_operand_error *mismatch_detail,
5203 aarch64_instr_sequence *insn_sequence)
5204 {
5205 const struct aarch64_opcode *opcode;
5206 const struct aarch64_inst *prev_insn;
5207 int i;
5208
5209 opcode = inst->opcode;
5210 if (insn_sequence->instr)
5211 prev_insn = insn_sequence->instr + (insn_sequence->num_added_insns - 1);
5212 else
5213 prev_insn = NULL;
5214
5215 if (prev_insn
5216 && (prev_insn->opcode->constraints & C_SCAN_MOPS_PME)
5217 && prev_insn->opcode != opcode - 1)
5218 {
5219 mismatch_detail->kind = AARCH64_OPDE_EXPECTED_A_AFTER_B;
5220 mismatch_detail->error = NULL;
5221 mismatch_detail->index = -1;
5222 mismatch_detail->data[0].s = prev_insn->opcode[1].name;
5223 mismatch_detail->data[1].s = prev_insn->opcode->name;
5224 mismatch_detail->non_fatal = true;
5225 return false;
5226 }
5227
5228 if (opcode->constraints & C_SCAN_MOPS_PME)
5229 {
5230 if (is_new_section || !prev_insn || prev_insn->opcode != opcode - 1)
5231 {
5232 mismatch_detail->kind = AARCH64_OPDE_A_SHOULD_FOLLOW_B;
5233 mismatch_detail->error = NULL;
5234 mismatch_detail->index = -1;
5235 mismatch_detail->data[0].s = opcode->name;
5236 mismatch_detail->data[1].s = opcode[-1].name;
5237 mismatch_detail->non_fatal = true;
5238 return false;
5239 }
5240
5241 for (i = 0; i < 3; ++i)
5242 /* There's no specific requirement for the data register to be
5243 the same between consecutive SET* instructions. */
5244 if ((opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd
5245 || opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs
5246 || opcode->operands[i] == AARCH64_OPND_MOPS_WB_Rn)
5247 && prev_insn->operands[i].reg.regno != inst->operands[i].reg.regno)
5248 {
5249 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5250 if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd)
5251 mismatch_detail->error = _("destination register differs from "
5252 "preceding instruction");
5253 else if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs)
5254 mismatch_detail->error = _("source register differs from "
5255 "preceding instruction");
5256 else
5257 mismatch_detail->error = _("size register differs from "
5258 "preceding instruction");
5259 mismatch_detail->index = i;
5260 mismatch_detail->non_fatal = true;
5261 return false;
5262 }
5263 }
5264
5265 return true;
5266 }
5267
5268 /* This function verifies that the instruction INST adheres to its specified
5269 constraints. If it does then ERR_OK is returned, if not then ERR_VFI is
5270 returned and MISMATCH_DETAIL contains the reason why verification failed.
5271
5272 The function is called both during assembly and disassembly. If assembling
5273 then ENCODING will be TRUE, else FALSE. If dissassembling PC will be set
5274 and will contain the PC of the current instruction w.r.t to the section.
5275
5276 If ENCODING and PC=0 then you are at a start of a section. The constraints
5277 are verified against the given state insn_sequence which is updated as it
5278 transitions through the verification. */
5279
5280 enum err_type
5281 verify_constraints (const struct aarch64_inst *inst,
5282 const aarch64_insn insn ATTRIBUTE_UNUSED,
5283 bfd_vma pc,
5284 bool encoding,
5285 aarch64_operand_error *mismatch_detail,
5286 aarch64_instr_sequence *insn_sequence)
5287 {
5288 assert (inst);
5289 assert (inst->opcode);
5290
5291 const struct aarch64_opcode *opcode = inst->opcode;
5292 if (!opcode->constraints && !insn_sequence->instr)
5293 return ERR_OK;
5294
5295 assert (insn_sequence);
5296
5297 enum err_type res = ERR_OK;
5298
5299 /* This instruction puts a constraint on the insn_sequence. */
5300 if (opcode->flags & F_SCAN)
5301 {
5302 if (insn_sequence->instr)
5303 {
5304 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5305 mismatch_detail->error = _("instruction opens new dependency "
5306 "sequence without ending previous one");
5307 mismatch_detail->index = -1;
5308 mismatch_detail->non_fatal = true;
5309 res = ERR_VFI;
5310 }
5311
5312 init_insn_sequence (inst, insn_sequence);
5313 return res;
5314 }
5315
5316 bool is_new_section = (!encoding && pc == 0);
5317 if (!verify_mops_pme_sequence (inst, is_new_section, mismatch_detail,
5318 insn_sequence))
5319 {
5320 res = ERR_VFI;
5321 if ((opcode->constraints & C_SCAN_MOPS_PME) != C_SCAN_MOPS_M)
5322 init_insn_sequence (NULL, insn_sequence);
5323 }
5324
5325 /* Verify constraints on an existing sequence. */
5326 if (insn_sequence->instr)
5327 {
5328 const struct aarch64_opcode* inst_opcode = insn_sequence->instr->opcode;
5329 /* If we're decoding and we hit PC=0 with an open sequence then we haven't
5330 closed a previous one that we should have. */
5331 if (is_new_section && res == ERR_OK)
5332 {
5333 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5334 mismatch_detail->error = _("previous `movprfx' sequence not closed");
5335 mismatch_detail->index = -1;
5336 mismatch_detail->non_fatal = true;
5337 res = ERR_VFI;
5338 /* Reset the sequence. */
5339 init_insn_sequence (NULL, insn_sequence);
5340 return res;
5341 }
5342
5343 /* Validate C_SCAN_MOVPRFX constraints. Move this to a lookup table. */
5344 if (inst_opcode->constraints & C_SCAN_MOVPRFX)
5345 {
5346 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5347 instruction for better error messages. */
5348 if (!opcode->avariant
5349 || (!AARCH64_CPU_HAS_FEATURE (*opcode->avariant, SVE)
5350 && !AARCH64_CPU_HAS_FEATURE (*opcode->avariant, SVE2)))
5351 {
5352 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5353 mismatch_detail->error = _("SVE instruction expected after "
5354 "`movprfx'");
5355 mismatch_detail->index = -1;
5356 mismatch_detail->non_fatal = true;
5357 res = ERR_VFI;
5358 goto done;
5359 }
5360
5361 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5362 instruction that is allowed to be used with a MOVPRFX. */
5363 if (!(opcode->constraints & C_SCAN_MOVPRFX))
5364 {
5365 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5366 mismatch_detail->error = _("SVE `movprfx' compatible instruction "
5367 "expected");
5368 mismatch_detail->index = -1;
5369 mismatch_detail->non_fatal = true;
5370 res = ERR_VFI;
5371 goto done;
5372 }
5373
5374 /* Next check for usage of the predicate register. */
5375 aarch64_opnd_info blk_dest = insn_sequence->instr->operands[0];
5376 aarch64_opnd_info blk_pred, inst_pred;
5377 memset (&blk_pred, 0, sizeof (aarch64_opnd_info));
5378 memset (&inst_pred, 0, sizeof (aarch64_opnd_info));
5379 bool predicated = false;
5380 assert (blk_dest.type == AARCH64_OPND_SVE_Zd);
5381
5382 /* Determine if the movprfx instruction used is predicated or not. */
5383 if (insn_sequence->instr->operands[1].type == AARCH64_OPND_SVE_Pg3)
5384 {
5385 predicated = true;
5386 blk_pred = insn_sequence->instr->operands[1];
5387 }
5388
5389 unsigned char max_elem_size = 0;
5390 unsigned char current_elem_size;
5391 int num_op_used = 0, last_op_usage = 0;
5392 int i, inst_pred_idx = -1;
5393 int num_ops = aarch64_num_of_operands (opcode);
5394 for (i = 0; i < num_ops; i++)
5395 {
5396 aarch64_opnd_info inst_op = inst->operands[i];
5397 switch (inst_op.type)
5398 {
5399 case AARCH64_OPND_SVE_Zd:
5400 case AARCH64_OPND_SVE_Zm_5:
5401 case AARCH64_OPND_SVE_Zm_16:
5402 case AARCH64_OPND_SVE_Zn:
5403 case AARCH64_OPND_SVE_Zt:
5404 case AARCH64_OPND_SVE_Vm:
5405 case AARCH64_OPND_SVE_Vn:
5406 case AARCH64_OPND_Va:
5407 case AARCH64_OPND_Vn:
5408 case AARCH64_OPND_Vm:
5409 case AARCH64_OPND_Sn:
5410 case AARCH64_OPND_Sm:
5411 if (inst_op.reg.regno == blk_dest.reg.regno)
5412 {
5413 num_op_used++;
5414 last_op_usage = i;
5415 }
5416 current_elem_size
5417 = aarch64_get_qualifier_esize (inst_op.qualifier);
5418 if (current_elem_size > max_elem_size)
5419 max_elem_size = current_elem_size;
5420 break;
5421 case AARCH64_OPND_SVE_Pd:
5422 case AARCH64_OPND_SVE_Pg3:
5423 case AARCH64_OPND_SVE_Pg4_5:
5424 case AARCH64_OPND_SVE_Pg4_10:
5425 case AARCH64_OPND_SVE_Pg4_16:
5426 case AARCH64_OPND_SVE_Pm:
5427 case AARCH64_OPND_SVE_Pn:
5428 case AARCH64_OPND_SVE_Pt:
5429 case AARCH64_OPND_SME_Pm:
5430 inst_pred = inst_op;
5431 inst_pred_idx = i;
5432 break;
5433 default:
5434 break;
5435 }
5436 }
5437
5438 assert (max_elem_size != 0);
5439 aarch64_opnd_info inst_dest = inst->operands[0];
5440 /* Determine the size that should be used to compare against the
5441 movprfx size. */
5442 current_elem_size
5443 = opcode->constraints & C_MAX_ELEM
5444 ? max_elem_size
5445 : aarch64_get_qualifier_esize (inst_dest.qualifier);
5446
5447 /* If movprfx is predicated do some extra checks. */
5448 if (predicated)
5449 {
5450 /* The instruction must be predicated. */
5451 if (inst_pred_idx < 0)
5452 {
5453 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5454 mismatch_detail->error = _("predicated instruction expected "
5455 "after `movprfx'");
5456 mismatch_detail->index = -1;
5457 mismatch_detail->non_fatal = true;
5458 res = ERR_VFI;
5459 goto done;
5460 }
5461
5462 /* The instruction must have a merging predicate. */
5463 if (inst_pred.qualifier != AARCH64_OPND_QLF_P_M)
5464 {
5465 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5466 mismatch_detail->error = _("merging predicate expected due "
5467 "to preceding `movprfx'");
5468 mismatch_detail->index = inst_pred_idx;
5469 mismatch_detail->non_fatal = true;
5470 res = ERR_VFI;
5471 goto done;
5472 }
5473
5474 /* The same register must be used in instruction. */
5475 if (blk_pred.reg.regno != inst_pred.reg.regno)
5476 {
5477 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5478 mismatch_detail->error = _("predicate register differs "
5479 "from that in preceding "
5480 "`movprfx'");
5481 mismatch_detail->index = inst_pred_idx;
5482 mismatch_detail->non_fatal = true;
5483 res = ERR_VFI;
5484 goto done;
5485 }
5486 }
5487
5488 /* Destructive operations by definition must allow one usage of the
5489 same register. */
5490 int allowed_usage
5491 = aarch64_is_destructive_by_operands (opcode) ? 2 : 1;
5492
5493 /* Operand is not used at all. */
5494 if (num_op_used == 0)
5495 {
5496 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5497 mismatch_detail->error = _("output register of preceding "
5498 "`movprfx' not used in current "
5499 "instruction");
5500 mismatch_detail->index = 0;
5501 mismatch_detail->non_fatal = true;
5502 res = ERR_VFI;
5503 goto done;
5504 }
5505
5506 /* We now know it's used, now determine exactly where it's used. */
5507 if (blk_dest.reg.regno != inst_dest.reg.regno)
5508 {
5509 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5510 mismatch_detail->error = _("output register of preceding "
5511 "`movprfx' expected as output");
5512 mismatch_detail->index = 0;
5513 mismatch_detail->non_fatal = true;
5514 res = ERR_VFI;
5515 goto done;
5516 }
5517
5518 /* Operand used more than allowed for the specific opcode type. */
5519 if (num_op_used > allowed_usage)
5520 {
5521 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5522 mismatch_detail->error = _("output register of preceding "
5523 "`movprfx' used as input");
5524 mismatch_detail->index = last_op_usage;
5525 mismatch_detail->non_fatal = true;
5526 res = ERR_VFI;
5527 goto done;
5528 }
5529
5530 /* Now the only thing left is the qualifiers checks. The register
5531 must have the same maximum element size. */
5532 if (inst_dest.qualifier
5533 && blk_dest.qualifier
5534 && current_elem_size
5535 != aarch64_get_qualifier_esize (blk_dest.qualifier))
5536 {
5537 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5538 mismatch_detail->error = _("register size not compatible with "
5539 "previous `movprfx'");
5540 mismatch_detail->index = 0;
5541 mismatch_detail->non_fatal = true;
5542 res = ERR_VFI;
5543 goto done;
5544 }
5545 }
5546
5547 done:
5548 if (insn_sequence->num_added_insns == insn_sequence->num_allocated_insns)
5549 /* We've checked the last instruction in the sequence and so
5550 don't need the sequence any more. */
5551 init_insn_sequence (NULL, insn_sequence);
5552 else
5553 add_insn_to_sequence (inst, insn_sequence);
5554 }
5555
5556 return res;
5557 }
5558
5559
5560 /* Return true if VALUE cannot be moved into an SVE register using DUP
5561 (with any element size, not just ESIZE) and if using DUPM would
5562 therefore be OK. ESIZE is the number of bytes in the immediate. */
5563
5564 bool
5565 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
5566 {
5567 int64_t svalue = uvalue;
5568 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
5569
5570 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
5571 return false;
5572 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
5573 {
5574 svalue = (int32_t) uvalue;
5575 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
5576 {
5577 svalue = (int16_t) uvalue;
5578 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
5579 return false;
5580 }
5581 }
5582 if ((svalue & 0xff) == 0)
5583 svalue /= 256;
5584 return svalue < -128 || svalue >= 128;
5585 }
5586
5587 /* Return true if a CPU with the AARCH64_FEATURE_* bits in CPU_VARIANT
5588 supports the instruction described by INST. */
5589
5590 bool
5591 aarch64_cpu_supports_inst_p (aarch64_feature_set cpu_variant,
5592 aarch64_inst *inst)
5593 {
5594 if (!inst->opcode->avariant
5595 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *inst->opcode->avariant))
5596 return false;
5597
5598 if (inst->opcode->iclass == sme_fp_sd
5599 && inst->operands[0].qualifier == AARCH64_OPND_QLF_S_D
5600 && !AARCH64_CPU_HAS_FEATURE (cpu_variant, SME_F64F64))
5601 return false;
5602
5603 if (inst->opcode->iclass == sme_int_sd
5604 && inst->operands[0].qualifier == AARCH64_OPND_QLF_S_D
5605 && !AARCH64_CPU_HAS_FEATURE (cpu_variant, SME_I16I64))
5606 return false;
5607
5608 return true;
5609 }
5610
5611 /* Include the opcode description table as well as the operand description
5612 table. */
5613 #define VERIFIER(x) verify_##x
5614 #include "aarch64-tbl.h"