bfd: aarch64: Optimize BTI stubs PR30076
[binutils-gdb.git] / bfd / elfnn-aarch64.c
1 /* AArch64-specific support for NN-bit ELF.
2 Copyright (C) 2009-2023 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 /* Notes on implementation:
22
23 Thread Local Store (TLS)
24
25 Overview:
26
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
29
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
32
33 adrp x0, :tlsgd:foo
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
37 bl __tls_get_addr
38 nop
39
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
42
43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE21(foo)
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
46 .tlsdesccall foo
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
48
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
52
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
56
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
59
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
64
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
70
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
75
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
80
81 Implementation:
82
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
86
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
92
93 The flow:
94
95 elfNN_aarch64_check_relocs()
96
97 This function is invoked for each relocation.
98
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
103
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
106
107 elfNN_aarch64_allocate_dynrelocs ()
108
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
113 for this symbol.
114
115 elfNN_aarch64_size_dynamic_sections ()
116
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
121
122 elfNN_aarch64_relocate_section ()
123
124 Calls elfNN_aarch64_final_link_relocate ()
125
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
133
134 elfNN_aarch64_final_link_relocate ()
135
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
137
138 #include "sysdep.h"
139 #include "bfd.h"
140 #include "libiberty.h"
141 #include "libbfd.h"
142 #include "elf-bfd.h"
143 #include "bfdlink.h"
144 #include "objalloc.h"
145 #include "elf/aarch64.h"
146 #include "elfxx-aarch64.h"
147 #include "cpu-aarch64.h"
148
149 #define ARCH_SIZE NN
150
151 #if ARCH_SIZE == 64
152 #define AARCH64_R(NAME) R_AARCH64_ ## NAME
153 #define AARCH64_R_STR(NAME) "R_AARCH64_" #NAME
154 #define HOWTO64(...) HOWTO (__VA_ARGS__)
155 #define HOWTO32(...) EMPTY_HOWTO (0)
156 #define LOG_FILE_ALIGN 3
157 #define BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC BFD_RELOC_AARCH64_TLSDESC_LD64_LO12
158 #endif
159
160 #if ARCH_SIZE == 32
161 #define AARCH64_R(NAME) R_AARCH64_P32_ ## NAME
162 #define AARCH64_R_STR(NAME) "R_AARCH64_P32_" #NAME
163 #define HOWTO64(...) EMPTY_HOWTO (0)
164 #define HOWTO32(...) HOWTO (__VA_ARGS__)
165 #define LOG_FILE_ALIGN 2
166 #define BFD_RELOC_AARCH64_TLSDESC_LD32_LO12 BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
167 #define R_AARCH64_P32_TLSDESC_ADD_LO12 R_AARCH64_P32_TLSDESC_ADD_LO12_NC
168 #endif
169
170 #define IS_AARCH64_TLS_RELOC(R_TYPE) \
171 ((R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
172 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
173 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \
174 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC \
175 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G1 \
176 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
177 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC \
178 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
179 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
180 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
181 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
182 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12 \
183 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12 \
184 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC \
185 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC \
186 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21 \
187 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21 \
188 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12 \
189 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC \
190 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12 \
191 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC \
192 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12 \
193 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC \
194 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12 \
195 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC \
196 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0 \
197 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC \
198 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1 \
199 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC \
200 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2 \
201 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12 \
202 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12 \
203 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
204 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12 \
205 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC \
206 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12 \
207 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC \
208 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12 \
209 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC \
210 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12 \
211 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC \
212 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0 \
213 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
214 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 \
215 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
216 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2 \
217 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPMOD \
218 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPREL \
219 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_TPREL \
220 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
221
222 #define IS_AARCH64_TLS_RELAX_RELOC(R_TYPE) \
223 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
224 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12 \
225 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
226 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
227 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
228 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
229 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC \
230 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
231 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
232 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1 \
233 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
234 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
235 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \
236 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
237 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC \
238 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G1 \
239 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
240 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
241 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC \
242 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC \
243 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21 \
244 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21)
245
246 #define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
247 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC \
248 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
249 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12 \
250 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
251 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
252 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
253 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC \
254 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD64_LO12 \
255 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
256 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
257 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
258 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1)
259
260 #define ELIMINATE_COPY_RELOCS 1
261
262 /* Return size of a relocation entry. HTAB is the bfd's
263 elf_aarch64_link_hash_entry. */
264 #define RELOC_SIZE(HTAB) (sizeof (ElfNN_External_Rela))
265
266 /* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32. */
267 #define GOT_ENTRY_SIZE (ARCH_SIZE / 8)
268 #define PLT_ENTRY_SIZE (32)
269 #define PLT_SMALL_ENTRY_SIZE (16)
270 #define PLT_TLSDESC_ENTRY_SIZE (32)
271 /* PLT sizes with BTI insn. */
272 #define PLT_BTI_SMALL_ENTRY_SIZE (24)
273 /* PLT sizes with PAC insn. */
274 #define PLT_PAC_SMALL_ENTRY_SIZE (24)
275 /* PLT sizes with BTI and PAC insn. */
276 #define PLT_BTI_PAC_SMALL_ENTRY_SIZE (24)
277
278 /* Encoding of the nop instruction. */
279 #define INSN_NOP 0xd503201f
280
281 #define aarch64_compute_jump_table_size(htab) \
282 (((htab)->root.srelplt == NULL) ? 0 \
283 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
284
285 /* The first entry in a procedure linkage table looks like this
286 if the distance between the PLTGOT and the PLT is < 4GB use
287 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
288 in x16 and needs to work out PLTGOT[1] by using an address of
289 [x16,#-GOT_ENTRY_SIZE]. */
290 static const bfd_byte elfNN_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
291 {
292 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
293 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
294 #if ARCH_SIZE == 64
295 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
296 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
297 #else
298 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */
299 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */
300 #endif
301 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
302 0x1f, 0x20, 0x03, 0xd5, /* nop */
303 0x1f, 0x20, 0x03, 0xd5, /* nop */
304 0x1f, 0x20, 0x03, 0xd5, /* nop */
305 };
306
307 static const bfd_byte elfNN_aarch64_small_plt0_bti_entry[PLT_ENTRY_SIZE] =
308 {
309 0x5f, 0x24, 0x03, 0xd5, /* bti c. */
310 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
311 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
312 #if ARCH_SIZE == 64
313 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
314 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
315 #else
316 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */
317 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */
318 #endif
319 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
320 0x1f, 0x20, 0x03, 0xd5, /* nop */
321 0x1f, 0x20, 0x03, 0xd5, /* nop */
322 };
323
324 /* Per function entry in a procedure linkage table looks like this
325 if the distance between the PLTGOT and the PLT is < 4GB use
326 these PLT entries. Use BTI versions of the PLTs when enabled. */
327 static const bfd_byte elfNN_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
328 {
329 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
330 #if ARCH_SIZE == 64
331 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
332 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
333 #else
334 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
335 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
336 #endif
337 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
338 };
339
340 static const bfd_byte
341 elfNN_aarch64_small_plt_bti_entry[PLT_BTI_SMALL_ENTRY_SIZE] =
342 {
343 0x5f, 0x24, 0x03, 0xd5, /* bti c. */
344 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
345 #if ARCH_SIZE == 64
346 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
347 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
348 #else
349 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
350 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
351 #endif
352 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
353 0x1f, 0x20, 0x03, 0xd5, /* nop */
354 };
355
356 static const bfd_byte
357 elfNN_aarch64_small_plt_pac_entry[PLT_PAC_SMALL_ENTRY_SIZE] =
358 {
359 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
360 #if ARCH_SIZE == 64
361 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
362 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
363 #else
364 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
365 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
366 #endif
367 0x9f, 0x21, 0x03, 0xd5, /* autia1716 */
368 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
369 0x1f, 0x20, 0x03, 0xd5, /* nop */
370 };
371
372 static const bfd_byte
373 elfNN_aarch64_small_plt_bti_pac_entry[PLT_BTI_PAC_SMALL_ENTRY_SIZE] =
374 {
375 0x5f, 0x24, 0x03, 0xd5, /* bti c. */
376 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
377 #if ARCH_SIZE == 64
378 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
379 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
380 #else
381 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
382 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
383 #endif
384 0x9f, 0x21, 0x03, 0xd5, /* autia1716 */
385 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
386 };
387
388 static const bfd_byte
389 elfNN_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
390 {
391 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
392 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
393 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
394 #if ARCH_SIZE == 64
395 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */
396 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
397 #else
398 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */
399 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */
400 #endif
401 0x40, 0x00, 0x1f, 0xd6, /* br x2 */
402 0x1f, 0x20, 0x03, 0xd5, /* nop */
403 0x1f, 0x20, 0x03, 0xd5, /* nop */
404 };
405
406 static const bfd_byte
407 elfNN_aarch64_tlsdesc_small_plt_bti_entry[PLT_TLSDESC_ENTRY_SIZE] =
408 {
409 0x5f, 0x24, 0x03, 0xd5, /* bti c. */
410 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
411 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
412 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
413 #if ARCH_SIZE == 64
414 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */
415 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
416 #else
417 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */
418 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */
419 #endif
420 0x40, 0x00, 0x1f, 0xd6, /* br x2 */
421 0x1f, 0x20, 0x03, 0xd5, /* nop */
422 };
423
424 #define elf_info_to_howto elfNN_aarch64_info_to_howto
425 #define elf_info_to_howto_rel elfNN_aarch64_info_to_howto
426
427 #define AARCH64_ELF_ABI_VERSION 0
428
429 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
430 #define ALL_ONES (~ (bfd_vma) 0)
431
432 /* Indexed by the bfd interal reloc enumerators.
433 Therefore, the table needs to be synced with BFD_RELOC_AARCH64_*
434 in reloc.c. */
435
436 static reloc_howto_type elfNN_aarch64_howto_table[] =
437 {
438 EMPTY_HOWTO (0),
439
440 /* Basic data relocations. */
441
442 /* Deprecated, but retained for backwards compatibility. */
443 HOWTO64 (R_AARCH64_NULL, /* type */
444 0, /* rightshift */
445 0, /* size */
446 0, /* bitsize */
447 false, /* pc_relative */
448 0, /* bitpos */
449 complain_overflow_dont, /* complain_on_overflow */
450 bfd_elf_generic_reloc, /* special_function */
451 "R_AARCH64_NULL", /* name */
452 false, /* partial_inplace */
453 0, /* src_mask */
454 0, /* dst_mask */
455 false), /* pcrel_offset */
456 HOWTO (R_AARCH64_NONE, /* type */
457 0, /* rightshift */
458 0, /* size */
459 0, /* bitsize */
460 false, /* pc_relative */
461 0, /* bitpos */
462 complain_overflow_dont, /* complain_on_overflow */
463 bfd_elf_generic_reloc, /* special_function */
464 "R_AARCH64_NONE", /* name */
465 false, /* partial_inplace */
466 0, /* src_mask */
467 0, /* dst_mask */
468 false), /* pcrel_offset */
469
470 /* .xword: (S+A) */
471 HOWTO64 (AARCH64_R (ABS64), /* type */
472 0, /* rightshift */
473 8, /* size */
474 64, /* bitsize */
475 false, /* pc_relative */
476 0, /* bitpos */
477 complain_overflow_unsigned, /* complain_on_overflow */
478 bfd_elf_generic_reloc, /* special_function */
479 AARCH64_R_STR (ABS64), /* name */
480 false, /* partial_inplace */
481 ALL_ONES, /* src_mask */
482 ALL_ONES, /* dst_mask */
483 false), /* pcrel_offset */
484
485 /* .word: (S+A) */
486 HOWTO (AARCH64_R (ABS32), /* type */
487 0, /* rightshift */
488 4, /* size */
489 32, /* bitsize */
490 false, /* pc_relative */
491 0, /* bitpos */
492 complain_overflow_unsigned, /* complain_on_overflow */
493 bfd_elf_generic_reloc, /* special_function */
494 AARCH64_R_STR (ABS32), /* name */
495 false, /* partial_inplace */
496 0xffffffff, /* src_mask */
497 0xffffffff, /* dst_mask */
498 false), /* pcrel_offset */
499
500 /* .half: (S+A) */
501 HOWTO (AARCH64_R (ABS16), /* type */
502 0, /* rightshift */
503 2, /* size */
504 16, /* bitsize */
505 false, /* pc_relative */
506 0, /* bitpos */
507 complain_overflow_unsigned, /* complain_on_overflow */
508 bfd_elf_generic_reloc, /* special_function */
509 AARCH64_R_STR (ABS16), /* name */
510 false, /* partial_inplace */
511 0xffff, /* src_mask */
512 0xffff, /* dst_mask */
513 false), /* pcrel_offset */
514
515 /* .xword: (S+A-P) */
516 HOWTO64 (AARCH64_R (PREL64), /* type */
517 0, /* rightshift */
518 8, /* size */
519 64, /* bitsize */
520 true, /* pc_relative */
521 0, /* bitpos */
522 complain_overflow_signed, /* complain_on_overflow */
523 bfd_elf_generic_reloc, /* special_function */
524 AARCH64_R_STR (PREL64), /* name */
525 false, /* partial_inplace */
526 ALL_ONES, /* src_mask */
527 ALL_ONES, /* dst_mask */
528 true), /* pcrel_offset */
529
530 /* .word: (S+A-P) */
531 HOWTO (AARCH64_R (PREL32), /* type */
532 0, /* rightshift */
533 4, /* size */
534 32, /* bitsize */
535 true, /* pc_relative */
536 0, /* bitpos */
537 complain_overflow_signed, /* complain_on_overflow */
538 bfd_elf_generic_reloc, /* special_function */
539 AARCH64_R_STR (PREL32), /* name */
540 false, /* partial_inplace */
541 0xffffffff, /* src_mask */
542 0xffffffff, /* dst_mask */
543 true), /* pcrel_offset */
544
545 /* .half: (S+A-P) */
546 HOWTO (AARCH64_R (PREL16), /* type */
547 0, /* rightshift */
548 2, /* size */
549 16, /* bitsize */
550 true, /* pc_relative */
551 0, /* bitpos */
552 complain_overflow_signed, /* complain_on_overflow */
553 bfd_elf_generic_reloc, /* special_function */
554 AARCH64_R_STR (PREL16), /* name */
555 false, /* partial_inplace */
556 0xffff, /* src_mask */
557 0xffff, /* dst_mask */
558 true), /* pcrel_offset */
559
560 /* Group relocations to create a 16, 32, 48 or 64 bit
561 unsigned data or abs address inline. */
562
563 /* MOVZ: ((S+A) >> 0) & 0xffff */
564 HOWTO (AARCH64_R (MOVW_UABS_G0), /* type */
565 0, /* rightshift */
566 4, /* size */
567 16, /* bitsize */
568 false, /* pc_relative */
569 0, /* bitpos */
570 complain_overflow_unsigned, /* complain_on_overflow */
571 bfd_elf_generic_reloc, /* special_function */
572 AARCH64_R_STR (MOVW_UABS_G0), /* name */
573 false, /* partial_inplace */
574 0xffff, /* src_mask */
575 0xffff, /* dst_mask */
576 false), /* pcrel_offset */
577
578 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
579 HOWTO (AARCH64_R (MOVW_UABS_G0_NC), /* type */
580 0, /* rightshift */
581 4, /* size */
582 16, /* bitsize */
583 false, /* pc_relative */
584 0, /* bitpos */
585 complain_overflow_dont, /* complain_on_overflow */
586 bfd_elf_generic_reloc, /* special_function */
587 AARCH64_R_STR (MOVW_UABS_G0_NC), /* name */
588 false, /* partial_inplace */
589 0xffff, /* src_mask */
590 0xffff, /* dst_mask */
591 false), /* pcrel_offset */
592
593 /* MOVZ: ((S+A) >> 16) & 0xffff */
594 HOWTO (AARCH64_R (MOVW_UABS_G1), /* type */
595 16, /* rightshift */
596 4, /* size */
597 16, /* bitsize */
598 false, /* pc_relative */
599 0, /* bitpos */
600 complain_overflow_unsigned, /* complain_on_overflow */
601 bfd_elf_generic_reloc, /* special_function */
602 AARCH64_R_STR (MOVW_UABS_G1), /* name */
603 false, /* partial_inplace */
604 0xffff, /* src_mask */
605 0xffff, /* dst_mask */
606 false), /* pcrel_offset */
607
608 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
609 HOWTO64 (AARCH64_R (MOVW_UABS_G1_NC), /* type */
610 16, /* rightshift */
611 4, /* size */
612 16, /* bitsize */
613 false, /* pc_relative */
614 0, /* bitpos */
615 complain_overflow_dont, /* complain_on_overflow */
616 bfd_elf_generic_reloc, /* special_function */
617 AARCH64_R_STR (MOVW_UABS_G1_NC), /* name */
618 false, /* partial_inplace */
619 0xffff, /* src_mask */
620 0xffff, /* dst_mask */
621 false), /* pcrel_offset */
622
623 /* MOVZ: ((S+A) >> 32) & 0xffff */
624 HOWTO64 (AARCH64_R (MOVW_UABS_G2), /* type */
625 32, /* rightshift */
626 4, /* size */
627 16, /* bitsize */
628 false, /* pc_relative */
629 0, /* bitpos */
630 complain_overflow_unsigned, /* complain_on_overflow */
631 bfd_elf_generic_reloc, /* special_function */
632 AARCH64_R_STR (MOVW_UABS_G2), /* name */
633 false, /* partial_inplace */
634 0xffff, /* src_mask */
635 0xffff, /* dst_mask */
636 false), /* pcrel_offset */
637
638 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
639 HOWTO64 (AARCH64_R (MOVW_UABS_G2_NC), /* type */
640 32, /* rightshift */
641 4, /* size */
642 16, /* bitsize */
643 false, /* pc_relative */
644 0, /* bitpos */
645 complain_overflow_dont, /* complain_on_overflow */
646 bfd_elf_generic_reloc, /* special_function */
647 AARCH64_R_STR (MOVW_UABS_G2_NC), /* name */
648 false, /* partial_inplace */
649 0xffff, /* src_mask */
650 0xffff, /* dst_mask */
651 false), /* pcrel_offset */
652
653 /* MOVZ: ((S+A) >> 48) & 0xffff */
654 HOWTO64 (AARCH64_R (MOVW_UABS_G3), /* type */
655 48, /* rightshift */
656 4, /* size */
657 16, /* bitsize */
658 false, /* pc_relative */
659 0, /* bitpos */
660 complain_overflow_unsigned, /* complain_on_overflow */
661 bfd_elf_generic_reloc, /* special_function */
662 AARCH64_R_STR (MOVW_UABS_G3), /* name */
663 false, /* partial_inplace */
664 0xffff, /* src_mask */
665 0xffff, /* dst_mask */
666 false), /* pcrel_offset */
667
668 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
669 signed data or abs address inline. Will change instruction
670 to MOVN or MOVZ depending on sign of calculated value. */
671
672 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
673 HOWTO (AARCH64_R (MOVW_SABS_G0), /* type */
674 0, /* rightshift */
675 4, /* size */
676 17, /* bitsize */
677 false, /* pc_relative */
678 0, /* bitpos */
679 complain_overflow_signed, /* complain_on_overflow */
680 bfd_elf_generic_reloc, /* special_function */
681 AARCH64_R_STR (MOVW_SABS_G0), /* name */
682 false, /* partial_inplace */
683 0xffff, /* src_mask */
684 0xffff, /* dst_mask */
685 false), /* pcrel_offset */
686
687 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
688 HOWTO64 (AARCH64_R (MOVW_SABS_G1), /* type */
689 16, /* rightshift */
690 4, /* size */
691 17, /* bitsize */
692 false, /* pc_relative */
693 0, /* bitpos */
694 complain_overflow_signed, /* complain_on_overflow */
695 bfd_elf_generic_reloc, /* special_function */
696 AARCH64_R_STR (MOVW_SABS_G1), /* name */
697 false, /* partial_inplace */
698 0xffff, /* src_mask */
699 0xffff, /* dst_mask */
700 false), /* pcrel_offset */
701
702 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
703 HOWTO64 (AARCH64_R (MOVW_SABS_G2), /* type */
704 32, /* rightshift */
705 4, /* size */
706 17, /* bitsize */
707 false, /* pc_relative */
708 0, /* bitpos */
709 complain_overflow_signed, /* complain_on_overflow */
710 bfd_elf_generic_reloc, /* special_function */
711 AARCH64_R_STR (MOVW_SABS_G2), /* name */
712 false, /* partial_inplace */
713 0xffff, /* src_mask */
714 0xffff, /* dst_mask */
715 false), /* pcrel_offset */
716
717 /* Group relocations to create a 16, 32, 48 or 64 bit
718 PC relative address inline. */
719
720 /* MOV[NZ]: ((S+A-P) >> 0) & 0xffff */
721 HOWTO (AARCH64_R (MOVW_PREL_G0), /* type */
722 0, /* rightshift */
723 4, /* size */
724 17, /* bitsize */
725 true, /* pc_relative */
726 0, /* bitpos */
727 complain_overflow_signed, /* complain_on_overflow */
728 bfd_elf_generic_reloc, /* special_function */
729 AARCH64_R_STR (MOVW_PREL_G0), /* name */
730 false, /* partial_inplace */
731 0xffff, /* src_mask */
732 0xffff, /* dst_mask */
733 true), /* pcrel_offset */
734
735 /* MOVK: ((S+A-P) >> 0) & 0xffff [no overflow check] */
736 HOWTO (AARCH64_R (MOVW_PREL_G0_NC), /* type */
737 0, /* rightshift */
738 4, /* size */
739 16, /* bitsize */
740 true, /* pc_relative */
741 0, /* bitpos */
742 complain_overflow_dont, /* complain_on_overflow */
743 bfd_elf_generic_reloc, /* special_function */
744 AARCH64_R_STR (MOVW_PREL_G0_NC), /* name */
745 false, /* partial_inplace */
746 0xffff, /* src_mask */
747 0xffff, /* dst_mask */
748 true), /* pcrel_offset */
749
750 /* MOV[NZ]: ((S+A-P) >> 16) & 0xffff */
751 HOWTO (AARCH64_R (MOVW_PREL_G1), /* type */
752 16, /* rightshift */
753 4, /* size */
754 17, /* bitsize */
755 true, /* pc_relative */
756 0, /* bitpos */
757 complain_overflow_signed, /* complain_on_overflow */
758 bfd_elf_generic_reloc, /* special_function */
759 AARCH64_R_STR (MOVW_PREL_G1), /* name */
760 false, /* partial_inplace */
761 0xffff, /* src_mask */
762 0xffff, /* dst_mask */
763 true), /* pcrel_offset */
764
765 /* MOVK: ((S+A-P) >> 16) & 0xffff [no overflow check] */
766 HOWTO64 (AARCH64_R (MOVW_PREL_G1_NC), /* type */
767 16, /* rightshift */
768 4, /* size */
769 16, /* bitsize */
770 true, /* pc_relative */
771 0, /* bitpos */
772 complain_overflow_dont, /* complain_on_overflow */
773 bfd_elf_generic_reloc, /* special_function */
774 AARCH64_R_STR (MOVW_PREL_G1_NC), /* name */
775 false, /* partial_inplace */
776 0xffff, /* src_mask */
777 0xffff, /* dst_mask */
778 true), /* pcrel_offset */
779
780 /* MOV[NZ]: ((S+A-P) >> 32) & 0xffff */
781 HOWTO64 (AARCH64_R (MOVW_PREL_G2), /* type */
782 32, /* rightshift */
783 4, /* size */
784 17, /* bitsize */
785 true, /* pc_relative */
786 0, /* bitpos */
787 complain_overflow_signed, /* complain_on_overflow */
788 bfd_elf_generic_reloc, /* special_function */
789 AARCH64_R_STR (MOVW_PREL_G2), /* name */
790 false, /* partial_inplace */
791 0xffff, /* src_mask */
792 0xffff, /* dst_mask */
793 true), /* pcrel_offset */
794
795 /* MOVK: ((S+A-P) >> 32) & 0xffff [no overflow check] */
796 HOWTO64 (AARCH64_R (MOVW_PREL_G2_NC), /* type */
797 32, /* rightshift */
798 4, /* size */
799 16, /* bitsize */
800 true, /* pc_relative */
801 0, /* bitpos */
802 complain_overflow_dont, /* complain_on_overflow */
803 bfd_elf_generic_reloc, /* special_function */
804 AARCH64_R_STR (MOVW_PREL_G2_NC), /* name */
805 false, /* partial_inplace */
806 0xffff, /* src_mask */
807 0xffff, /* dst_mask */
808 true), /* pcrel_offset */
809
810 /* MOV[NZ]: ((S+A-P) >> 48) & 0xffff */
811 HOWTO64 (AARCH64_R (MOVW_PREL_G3), /* type */
812 48, /* rightshift */
813 4, /* size */
814 16, /* bitsize */
815 true, /* pc_relative */
816 0, /* bitpos */
817 complain_overflow_dont, /* complain_on_overflow */
818 bfd_elf_generic_reloc, /* special_function */
819 AARCH64_R_STR (MOVW_PREL_G3), /* name */
820 false, /* partial_inplace */
821 0xffff, /* src_mask */
822 0xffff, /* dst_mask */
823 true), /* pcrel_offset */
824
825 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store
826 addresses: PG(x) is (x & ~0xfff). */
827
828 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
829 HOWTO (AARCH64_R (LD_PREL_LO19), /* type */
830 2, /* rightshift */
831 4, /* size */
832 19, /* bitsize */
833 true, /* pc_relative */
834 0, /* bitpos */
835 complain_overflow_signed, /* complain_on_overflow */
836 bfd_elf_generic_reloc, /* special_function */
837 AARCH64_R_STR (LD_PREL_LO19), /* name */
838 false, /* partial_inplace */
839 0x7ffff, /* src_mask */
840 0x7ffff, /* dst_mask */
841 true), /* pcrel_offset */
842
843 /* ADR: (S+A-P) & 0x1fffff */
844 HOWTO (AARCH64_R (ADR_PREL_LO21), /* type */
845 0, /* rightshift */
846 4, /* size */
847 21, /* bitsize */
848 true, /* pc_relative */
849 0, /* bitpos */
850 complain_overflow_signed, /* complain_on_overflow */
851 bfd_elf_generic_reloc, /* special_function */
852 AARCH64_R_STR (ADR_PREL_LO21), /* name */
853 false, /* partial_inplace */
854 0x1fffff, /* src_mask */
855 0x1fffff, /* dst_mask */
856 true), /* pcrel_offset */
857
858 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
859 HOWTO (AARCH64_R (ADR_PREL_PG_HI21), /* type */
860 12, /* rightshift */
861 4, /* size */
862 21, /* bitsize */
863 true, /* pc_relative */
864 0, /* bitpos */
865 complain_overflow_signed, /* complain_on_overflow */
866 bfd_elf_generic_reloc, /* special_function */
867 AARCH64_R_STR (ADR_PREL_PG_HI21), /* name */
868 false, /* partial_inplace */
869 0x1fffff, /* src_mask */
870 0x1fffff, /* dst_mask */
871 true), /* pcrel_offset */
872
873 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
874 HOWTO64 (AARCH64_R (ADR_PREL_PG_HI21_NC), /* type */
875 12, /* rightshift */
876 4, /* size */
877 21, /* bitsize */
878 true, /* pc_relative */
879 0, /* bitpos */
880 complain_overflow_dont, /* complain_on_overflow */
881 bfd_elf_generic_reloc, /* special_function */
882 AARCH64_R_STR (ADR_PREL_PG_HI21_NC), /* name */
883 false, /* partial_inplace */
884 0x1fffff, /* src_mask */
885 0x1fffff, /* dst_mask */
886 true), /* pcrel_offset */
887
888 /* ADD: (S+A) & 0xfff [no overflow check] */
889 HOWTO (AARCH64_R (ADD_ABS_LO12_NC), /* type */
890 0, /* rightshift */
891 4, /* size */
892 12, /* bitsize */
893 false, /* pc_relative */
894 10, /* bitpos */
895 complain_overflow_dont, /* complain_on_overflow */
896 bfd_elf_generic_reloc, /* special_function */
897 AARCH64_R_STR (ADD_ABS_LO12_NC), /* name */
898 false, /* partial_inplace */
899 0x3ffc00, /* src_mask */
900 0x3ffc00, /* dst_mask */
901 false), /* pcrel_offset */
902
903 /* LD/ST8: (S+A) & 0xfff */
904 HOWTO (AARCH64_R (LDST8_ABS_LO12_NC), /* type */
905 0, /* rightshift */
906 4, /* size */
907 12, /* bitsize */
908 false, /* pc_relative */
909 0, /* bitpos */
910 complain_overflow_dont, /* complain_on_overflow */
911 bfd_elf_generic_reloc, /* special_function */
912 AARCH64_R_STR (LDST8_ABS_LO12_NC), /* name */
913 false, /* partial_inplace */
914 0xfff, /* src_mask */
915 0xfff, /* dst_mask */
916 false), /* pcrel_offset */
917
918 /* Relocations for control-flow instructions. */
919
920 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
921 HOWTO (AARCH64_R (TSTBR14), /* type */
922 2, /* rightshift */
923 4, /* size */
924 14, /* bitsize */
925 true, /* pc_relative */
926 0, /* bitpos */
927 complain_overflow_signed, /* complain_on_overflow */
928 bfd_elf_generic_reloc, /* special_function */
929 AARCH64_R_STR (TSTBR14), /* name */
930 false, /* partial_inplace */
931 0x3fff, /* src_mask */
932 0x3fff, /* dst_mask */
933 true), /* pcrel_offset */
934
935 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
936 HOWTO (AARCH64_R (CONDBR19), /* type */
937 2, /* rightshift */
938 4, /* size */
939 19, /* bitsize */
940 true, /* pc_relative */
941 0, /* bitpos */
942 complain_overflow_signed, /* complain_on_overflow */
943 bfd_elf_generic_reloc, /* special_function */
944 AARCH64_R_STR (CONDBR19), /* name */
945 false, /* partial_inplace */
946 0x7ffff, /* src_mask */
947 0x7ffff, /* dst_mask */
948 true), /* pcrel_offset */
949
950 /* B: ((S+A-P) >> 2) & 0x3ffffff */
951 HOWTO (AARCH64_R (JUMP26), /* type */
952 2, /* rightshift */
953 4, /* size */
954 26, /* bitsize */
955 true, /* pc_relative */
956 0, /* bitpos */
957 complain_overflow_signed, /* complain_on_overflow */
958 bfd_elf_generic_reloc, /* special_function */
959 AARCH64_R_STR (JUMP26), /* name */
960 false, /* partial_inplace */
961 0x3ffffff, /* src_mask */
962 0x3ffffff, /* dst_mask */
963 true), /* pcrel_offset */
964
965 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
966 HOWTO (AARCH64_R (CALL26), /* type */
967 2, /* rightshift */
968 4, /* size */
969 26, /* bitsize */
970 true, /* pc_relative */
971 0, /* bitpos */
972 complain_overflow_signed, /* complain_on_overflow */
973 bfd_elf_generic_reloc, /* special_function */
974 AARCH64_R_STR (CALL26), /* name */
975 false, /* partial_inplace */
976 0x3ffffff, /* src_mask */
977 0x3ffffff, /* dst_mask */
978 true), /* pcrel_offset */
979
980 /* LD/ST16: (S+A) & 0xffe */
981 HOWTO (AARCH64_R (LDST16_ABS_LO12_NC), /* type */
982 1, /* rightshift */
983 4, /* size */
984 12, /* bitsize */
985 false, /* pc_relative */
986 0, /* bitpos */
987 complain_overflow_dont, /* complain_on_overflow */
988 bfd_elf_generic_reloc, /* special_function */
989 AARCH64_R_STR (LDST16_ABS_LO12_NC), /* name */
990 false, /* partial_inplace */
991 0xffe, /* src_mask */
992 0xffe, /* dst_mask */
993 false), /* pcrel_offset */
994
995 /* LD/ST32: (S+A) & 0xffc */
996 HOWTO (AARCH64_R (LDST32_ABS_LO12_NC), /* type */
997 2, /* rightshift */
998 4, /* size */
999 12, /* bitsize */
1000 false, /* pc_relative */
1001 0, /* bitpos */
1002 complain_overflow_dont, /* complain_on_overflow */
1003 bfd_elf_generic_reloc, /* special_function */
1004 AARCH64_R_STR (LDST32_ABS_LO12_NC), /* name */
1005 false, /* partial_inplace */
1006 0xffc, /* src_mask */
1007 0xffc, /* dst_mask */
1008 false), /* pcrel_offset */
1009
1010 /* LD/ST64: (S+A) & 0xff8 */
1011 HOWTO (AARCH64_R (LDST64_ABS_LO12_NC), /* type */
1012 3, /* rightshift */
1013 4, /* size */
1014 12, /* bitsize */
1015 false, /* pc_relative */
1016 0, /* bitpos */
1017 complain_overflow_dont, /* complain_on_overflow */
1018 bfd_elf_generic_reloc, /* special_function */
1019 AARCH64_R_STR (LDST64_ABS_LO12_NC), /* name */
1020 false, /* partial_inplace */
1021 0xff8, /* src_mask */
1022 0xff8, /* dst_mask */
1023 false), /* pcrel_offset */
1024
1025 /* LD/ST128: (S+A) & 0xff0 */
1026 HOWTO (AARCH64_R (LDST128_ABS_LO12_NC), /* type */
1027 4, /* rightshift */
1028 4, /* size */
1029 12, /* bitsize */
1030 false, /* pc_relative */
1031 0, /* bitpos */
1032 complain_overflow_dont, /* complain_on_overflow */
1033 bfd_elf_generic_reloc, /* special_function */
1034 AARCH64_R_STR (LDST128_ABS_LO12_NC), /* name */
1035 false, /* partial_inplace */
1036 0xff0, /* src_mask */
1037 0xff0, /* dst_mask */
1038 false), /* pcrel_offset */
1039
1040 /* Set a load-literal immediate field to bits
1041 0x1FFFFC of G(S)-P */
1042 HOWTO (AARCH64_R (GOT_LD_PREL19), /* type */
1043 2, /* rightshift */
1044 4, /* size */
1045 19, /* bitsize */
1046 true, /* pc_relative */
1047 0, /* bitpos */
1048 complain_overflow_signed, /* complain_on_overflow */
1049 bfd_elf_generic_reloc, /* special_function */
1050 AARCH64_R_STR (GOT_LD_PREL19), /* name */
1051 false, /* partial_inplace */
1052 0xffffe0, /* src_mask */
1053 0xffffe0, /* dst_mask */
1054 true), /* pcrel_offset */
1055
1056 /* Get to the page for the GOT entry for the symbol
1057 (G(S) - P) using an ADRP instruction. */
1058 HOWTO (AARCH64_R (ADR_GOT_PAGE), /* type */
1059 12, /* rightshift */
1060 4, /* size */
1061 21, /* bitsize */
1062 true, /* pc_relative */
1063 0, /* bitpos */
1064 complain_overflow_dont, /* complain_on_overflow */
1065 bfd_elf_generic_reloc, /* special_function */
1066 AARCH64_R_STR (ADR_GOT_PAGE), /* name */
1067 false, /* partial_inplace */
1068 0x1fffff, /* src_mask */
1069 0x1fffff, /* dst_mask */
1070 true), /* pcrel_offset */
1071
1072 /* LD64: GOT offset G(S) & 0xff8 */
1073 HOWTO64 (AARCH64_R (LD64_GOT_LO12_NC), /* type */
1074 3, /* rightshift */
1075 4, /* size */
1076 12, /* bitsize */
1077 false, /* pc_relative */
1078 0, /* bitpos */
1079 complain_overflow_dont, /* complain_on_overflow */
1080 bfd_elf_generic_reloc, /* special_function */
1081 AARCH64_R_STR (LD64_GOT_LO12_NC), /* name */
1082 false, /* partial_inplace */
1083 0xff8, /* src_mask */
1084 0xff8, /* dst_mask */
1085 false), /* pcrel_offset */
1086
1087 /* LD32: GOT offset G(S) & 0xffc */
1088 HOWTO32 (AARCH64_R (LD32_GOT_LO12_NC), /* type */
1089 2, /* rightshift */
1090 4, /* size */
1091 12, /* bitsize */
1092 false, /* pc_relative */
1093 0, /* bitpos */
1094 complain_overflow_dont, /* complain_on_overflow */
1095 bfd_elf_generic_reloc, /* special_function */
1096 AARCH64_R_STR (LD32_GOT_LO12_NC), /* name */
1097 false, /* partial_inplace */
1098 0xffc, /* src_mask */
1099 0xffc, /* dst_mask */
1100 false), /* pcrel_offset */
1101
1102 /* Lower 16 bits of GOT offset for the symbol. */
1103 HOWTO64 (AARCH64_R (MOVW_GOTOFF_G0_NC), /* type */
1104 0, /* rightshift */
1105 4, /* size */
1106 16, /* bitsize */
1107 false, /* pc_relative */
1108 0, /* bitpos */
1109 complain_overflow_dont, /* complain_on_overflow */
1110 bfd_elf_generic_reloc, /* special_function */
1111 AARCH64_R_STR (MOVW_GOTOFF_G0_NC), /* name */
1112 false, /* partial_inplace */
1113 0xffff, /* src_mask */
1114 0xffff, /* dst_mask */
1115 false), /* pcrel_offset */
1116
1117 /* Higher 16 bits of GOT offset for the symbol. */
1118 HOWTO64 (AARCH64_R (MOVW_GOTOFF_G1), /* type */
1119 16, /* rightshift */
1120 4, /* size */
1121 16, /* bitsize */
1122 false, /* pc_relative */
1123 0, /* bitpos */
1124 complain_overflow_unsigned, /* complain_on_overflow */
1125 bfd_elf_generic_reloc, /* special_function */
1126 AARCH64_R_STR (MOVW_GOTOFF_G1), /* name */
1127 false, /* partial_inplace */
1128 0xffff, /* src_mask */
1129 0xffff, /* dst_mask */
1130 false), /* pcrel_offset */
1131
1132 /* LD64: GOT offset for the symbol. */
1133 HOWTO64 (AARCH64_R (LD64_GOTOFF_LO15), /* type */
1134 3, /* rightshift */
1135 4, /* size */
1136 12, /* bitsize */
1137 false, /* pc_relative */
1138 0, /* bitpos */
1139 complain_overflow_unsigned, /* complain_on_overflow */
1140 bfd_elf_generic_reloc, /* special_function */
1141 AARCH64_R_STR (LD64_GOTOFF_LO15), /* name */
1142 false, /* partial_inplace */
1143 0x7ff8, /* src_mask */
1144 0x7ff8, /* dst_mask */
1145 false), /* pcrel_offset */
1146
1147 /* LD32: GOT offset to the page address of GOT table.
1148 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x5ffc. */
1149 HOWTO32 (AARCH64_R (LD32_GOTPAGE_LO14), /* type */
1150 2, /* rightshift */
1151 4, /* size */
1152 12, /* bitsize */
1153 false, /* pc_relative */
1154 0, /* bitpos */
1155 complain_overflow_unsigned, /* complain_on_overflow */
1156 bfd_elf_generic_reloc, /* special_function */
1157 AARCH64_R_STR (LD32_GOTPAGE_LO14), /* name */
1158 false, /* partial_inplace */
1159 0x5ffc, /* src_mask */
1160 0x5ffc, /* dst_mask */
1161 false), /* pcrel_offset */
1162
1163 /* LD64: GOT offset to the page address of GOT table.
1164 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x7ff8. */
1165 HOWTO64 (AARCH64_R (LD64_GOTPAGE_LO15), /* type */
1166 3, /* rightshift */
1167 4, /* size */
1168 12, /* bitsize */
1169 false, /* pc_relative */
1170 0, /* bitpos */
1171 complain_overflow_unsigned, /* complain_on_overflow */
1172 bfd_elf_generic_reloc, /* special_function */
1173 AARCH64_R_STR (LD64_GOTPAGE_LO15), /* name */
1174 false, /* partial_inplace */
1175 0x7ff8, /* src_mask */
1176 0x7ff8, /* dst_mask */
1177 false), /* pcrel_offset */
1178
1179 /* Get to the page for the GOT entry for the symbol
1180 (G(S) - P) using an ADRP instruction. */
1181 HOWTO (AARCH64_R (TLSGD_ADR_PAGE21), /* type */
1182 12, /* rightshift */
1183 4, /* size */
1184 21, /* bitsize */
1185 true, /* pc_relative */
1186 0, /* bitpos */
1187 complain_overflow_dont, /* complain_on_overflow */
1188 bfd_elf_generic_reloc, /* special_function */
1189 AARCH64_R_STR (TLSGD_ADR_PAGE21), /* name */
1190 false, /* partial_inplace */
1191 0x1fffff, /* src_mask */
1192 0x1fffff, /* dst_mask */
1193 true), /* pcrel_offset */
1194
1195 HOWTO (AARCH64_R (TLSGD_ADR_PREL21), /* type */
1196 0, /* rightshift */
1197 4, /* size */
1198 21, /* bitsize */
1199 true, /* pc_relative */
1200 0, /* bitpos */
1201 complain_overflow_dont, /* complain_on_overflow */
1202 bfd_elf_generic_reloc, /* special_function */
1203 AARCH64_R_STR (TLSGD_ADR_PREL21), /* name */
1204 false, /* partial_inplace */
1205 0x1fffff, /* src_mask */
1206 0x1fffff, /* dst_mask */
1207 true), /* pcrel_offset */
1208
1209 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
1210 HOWTO (AARCH64_R (TLSGD_ADD_LO12_NC), /* type */
1211 0, /* rightshift */
1212 4, /* size */
1213 12, /* bitsize */
1214 false, /* pc_relative */
1215 0, /* bitpos */
1216 complain_overflow_dont, /* complain_on_overflow */
1217 bfd_elf_generic_reloc, /* special_function */
1218 AARCH64_R_STR (TLSGD_ADD_LO12_NC), /* name */
1219 false, /* partial_inplace */
1220 0xfff, /* src_mask */
1221 0xfff, /* dst_mask */
1222 false), /* pcrel_offset */
1223
1224 /* Lower 16 bits of GOT offset to tls_index. */
1225 HOWTO64 (AARCH64_R (TLSGD_MOVW_G0_NC), /* type */
1226 0, /* rightshift */
1227 4, /* size */
1228 16, /* bitsize */
1229 false, /* pc_relative */
1230 0, /* bitpos */
1231 complain_overflow_dont, /* complain_on_overflow */
1232 bfd_elf_generic_reloc, /* special_function */
1233 AARCH64_R_STR (TLSGD_MOVW_G0_NC), /* name */
1234 false, /* partial_inplace */
1235 0xffff, /* src_mask */
1236 0xffff, /* dst_mask */
1237 false), /* pcrel_offset */
1238
1239 /* Higher 16 bits of GOT offset to tls_index. */
1240 HOWTO64 (AARCH64_R (TLSGD_MOVW_G1), /* type */
1241 16, /* rightshift */
1242 4, /* size */
1243 16, /* bitsize */
1244 false, /* pc_relative */
1245 0, /* bitpos */
1246 complain_overflow_unsigned, /* complain_on_overflow */
1247 bfd_elf_generic_reloc, /* special_function */
1248 AARCH64_R_STR (TLSGD_MOVW_G1), /* name */
1249 false, /* partial_inplace */
1250 0xffff, /* src_mask */
1251 0xffff, /* dst_mask */
1252 false), /* pcrel_offset */
1253
1254 HOWTO (AARCH64_R (TLSIE_ADR_GOTTPREL_PAGE21), /* type */
1255 12, /* rightshift */
1256 4, /* size */
1257 21, /* bitsize */
1258 false, /* pc_relative */
1259 0, /* bitpos */
1260 complain_overflow_dont, /* complain_on_overflow */
1261 bfd_elf_generic_reloc, /* special_function */
1262 AARCH64_R_STR (TLSIE_ADR_GOTTPREL_PAGE21), /* name */
1263 false, /* partial_inplace */
1264 0x1fffff, /* src_mask */
1265 0x1fffff, /* dst_mask */
1266 false), /* pcrel_offset */
1267
1268 HOWTO64 (AARCH64_R (TLSIE_LD64_GOTTPREL_LO12_NC), /* type */
1269 3, /* rightshift */
1270 4, /* size */
1271 12, /* bitsize */
1272 false, /* pc_relative */
1273 0, /* bitpos */
1274 complain_overflow_dont, /* complain_on_overflow */
1275 bfd_elf_generic_reloc, /* special_function */
1276 AARCH64_R_STR (TLSIE_LD64_GOTTPREL_LO12_NC), /* name */
1277 false, /* partial_inplace */
1278 0xff8, /* src_mask */
1279 0xff8, /* dst_mask */
1280 false), /* pcrel_offset */
1281
1282 HOWTO32 (AARCH64_R (TLSIE_LD32_GOTTPREL_LO12_NC), /* type */
1283 2, /* rightshift */
1284 4, /* size */
1285 12, /* bitsize */
1286 false, /* pc_relative */
1287 0, /* bitpos */
1288 complain_overflow_dont, /* complain_on_overflow */
1289 bfd_elf_generic_reloc, /* special_function */
1290 AARCH64_R_STR (TLSIE_LD32_GOTTPREL_LO12_NC), /* name */
1291 false, /* partial_inplace */
1292 0xffc, /* src_mask */
1293 0xffc, /* dst_mask */
1294 false), /* pcrel_offset */
1295
1296 HOWTO (AARCH64_R (TLSIE_LD_GOTTPREL_PREL19), /* type */
1297 2, /* rightshift */
1298 4, /* size */
1299 19, /* bitsize */
1300 false, /* pc_relative */
1301 0, /* bitpos */
1302 complain_overflow_dont, /* complain_on_overflow */
1303 bfd_elf_generic_reloc, /* special_function */
1304 AARCH64_R_STR (TLSIE_LD_GOTTPREL_PREL19), /* name */
1305 false, /* partial_inplace */
1306 0x1ffffc, /* src_mask */
1307 0x1ffffc, /* dst_mask */
1308 false), /* pcrel_offset */
1309
1310 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G0_NC), /* type */
1311 0, /* rightshift */
1312 4, /* size */
1313 16, /* bitsize */
1314 false, /* pc_relative */
1315 0, /* bitpos */
1316 complain_overflow_dont, /* complain_on_overflow */
1317 bfd_elf_generic_reloc, /* special_function */
1318 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G0_NC), /* name */
1319 false, /* partial_inplace */
1320 0xffff, /* src_mask */
1321 0xffff, /* dst_mask */
1322 false), /* pcrel_offset */
1323
1324 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G1), /* type */
1325 16, /* rightshift */
1326 4, /* size */
1327 16, /* bitsize */
1328 false, /* pc_relative */
1329 0, /* bitpos */
1330 complain_overflow_unsigned, /* complain_on_overflow */
1331 bfd_elf_generic_reloc, /* special_function */
1332 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G1), /* name */
1333 false, /* partial_inplace */
1334 0xffff, /* src_mask */
1335 0xffff, /* dst_mask */
1336 false), /* pcrel_offset */
1337
1338 /* ADD: bit[23:12] of byte offset to module TLS base address. */
1339 HOWTO (AARCH64_R (TLSLD_ADD_DTPREL_HI12), /* type */
1340 12, /* rightshift */
1341 4, /* size */
1342 12, /* bitsize */
1343 false, /* pc_relative */
1344 0, /* bitpos */
1345 complain_overflow_unsigned, /* complain_on_overflow */
1346 bfd_elf_generic_reloc, /* special_function */
1347 AARCH64_R_STR (TLSLD_ADD_DTPREL_HI12), /* name */
1348 false, /* partial_inplace */
1349 0xfff, /* src_mask */
1350 0xfff, /* dst_mask */
1351 false), /* pcrel_offset */
1352
1353 /* Unsigned 12 bit byte offset to module TLS base address. */
1354 HOWTO (AARCH64_R (TLSLD_ADD_DTPREL_LO12), /* type */
1355 0, /* rightshift */
1356 4, /* size */
1357 12, /* bitsize */
1358 false, /* pc_relative */
1359 0, /* bitpos */
1360 complain_overflow_unsigned, /* complain_on_overflow */
1361 bfd_elf_generic_reloc, /* special_function */
1362 AARCH64_R_STR (TLSLD_ADD_DTPREL_LO12), /* name */
1363 false, /* partial_inplace */
1364 0xfff, /* src_mask */
1365 0xfff, /* dst_mask */
1366 false), /* pcrel_offset */
1367
1368 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12. */
1369 HOWTO (AARCH64_R (TLSLD_ADD_DTPREL_LO12_NC), /* type */
1370 0, /* rightshift */
1371 4, /* size */
1372 12, /* bitsize */
1373 false, /* pc_relative */
1374 0, /* bitpos */
1375 complain_overflow_dont, /* complain_on_overflow */
1376 bfd_elf_generic_reloc, /* special_function */
1377 AARCH64_R_STR (TLSLD_ADD_DTPREL_LO12_NC), /* name */
1378 false, /* partial_inplace */
1379 0xfff, /* src_mask */
1380 0xfff, /* dst_mask */
1381 false), /* pcrel_offset */
1382
1383 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
1384 HOWTO (AARCH64_R (TLSLD_ADD_LO12_NC), /* type */
1385 0, /* rightshift */
1386 4, /* size */
1387 12, /* bitsize */
1388 false, /* pc_relative */
1389 0, /* bitpos */
1390 complain_overflow_dont, /* complain_on_overflow */
1391 bfd_elf_generic_reloc, /* special_function */
1392 AARCH64_R_STR (TLSLD_ADD_LO12_NC), /* name */
1393 false, /* partial_inplace */
1394 0xfff, /* src_mask */
1395 0xfff, /* dst_mask */
1396 false), /* pcrel_offset */
1397
1398 /* Get to the page for the GOT entry for the symbol
1399 (G(S) - P) using an ADRP instruction. */
1400 HOWTO (AARCH64_R (TLSLD_ADR_PAGE21), /* type */
1401 12, /* rightshift */
1402 4, /* size */
1403 21, /* bitsize */
1404 true, /* pc_relative */
1405 0, /* bitpos */
1406 complain_overflow_signed, /* complain_on_overflow */
1407 bfd_elf_generic_reloc, /* special_function */
1408 AARCH64_R_STR (TLSLD_ADR_PAGE21), /* name */
1409 false, /* partial_inplace */
1410 0x1fffff, /* src_mask */
1411 0x1fffff, /* dst_mask */
1412 true), /* pcrel_offset */
1413
1414 HOWTO (AARCH64_R (TLSLD_ADR_PREL21), /* type */
1415 0, /* rightshift */
1416 4, /* size */
1417 21, /* bitsize */
1418 true, /* pc_relative */
1419 0, /* bitpos */
1420 complain_overflow_signed, /* complain_on_overflow */
1421 bfd_elf_generic_reloc, /* special_function */
1422 AARCH64_R_STR (TLSLD_ADR_PREL21), /* name */
1423 false, /* partial_inplace */
1424 0x1fffff, /* src_mask */
1425 0x1fffff, /* dst_mask */
1426 true), /* pcrel_offset */
1427
1428 /* LD/ST16: bit[11:1] of byte offset to module TLS base address. */
1429 HOWTO64 (AARCH64_R (TLSLD_LDST16_DTPREL_LO12), /* type */
1430 1, /* rightshift */
1431 4, /* size */
1432 11, /* bitsize */
1433 false, /* pc_relative */
1434 10, /* bitpos */
1435 complain_overflow_unsigned, /* complain_on_overflow */
1436 bfd_elf_generic_reloc, /* special_function */
1437 AARCH64_R_STR (TLSLD_LDST16_DTPREL_LO12), /* name */
1438 false, /* partial_inplace */
1439 0x1ffc00, /* src_mask */
1440 0x1ffc00, /* dst_mask */
1441 false), /* pcrel_offset */
1442
1443 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12, but no overflow check. */
1444 HOWTO64 (AARCH64_R (TLSLD_LDST16_DTPREL_LO12_NC), /* type */
1445 1, /* rightshift */
1446 4, /* size */
1447 11, /* bitsize */
1448 false, /* pc_relative */
1449 10, /* bitpos */
1450 complain_overflow_dont, /* complain_on_overflow */
1451 bfd_elf_generic_reloc, /* special_function */
1452 AARCH64_R_STR (TLSLD_LDST16_DTPREL_LO12_NC), /* name */
1453 false, /* partial_inplace */
1454 0x1ffc00, /* src_mask */
1455 0x1ffc00, /* dst_mask */
1456 false), /* pcrel_offset */
1457
1458 /* LD/ST32: bit[11:2] of byte offset to module TLS base address. */
1459 HOWTO64 (AARCH64_R (TLSLD_LDST32_DTPREL_LO12), /* type */
1460 2, /* rightshift */
1461 4, /* size */
1462 10, /* bitsize */
1463 false, /* pc_relative */
1464 10, /* bitpos */
1465 complain_overflow_unsigned, /* complain_on_overflow */
1466 bfd_elf_generic_reloc, /* special_function */
1467 AARCH64_R_STR (TLSLD_LDST32_DTPREL_LO12), /* name */
1468 false, /* partial_inplace */
1469 0x3ffc00, /* src_mask */
1470 0x3ffc00, /* dst_mask */
1471 false), /* pcrel_offset */
1472
1473 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12, but no overflow check. */
1474 HOWTO64 (AARCH64_R (TLSLD_LDST32_DTPREL_LO12_NC), /* type */
1475 2, /* rightshift */
1476 4, /* size */
1477 10, /* bitsize */
1478 false, /* pc_relative */
1479 10, /* bitpos */
1480 complain_overflow_dont, /* complain_on_overflow */
1481 bfd_elf_generic_reloc, /* special_function */
1482 AARCH64_R_STR (TLSLD_LDST32_DTPREL_LO12_NC), /* name */
1483 false, /* partial_inplace */
1484 0xffc00, /* src_mask */
1485 0xffc00, /* dst_mask */
1486 false), /* pcrel_offset */
1487
1488 /* LD/ST64: bit[11:3] of byte offset to module TLS base address. */
1489 HOWTO64 (AARCH64_R (TLSLD_LDST64_DTPREL_LO12), /* type */
1490 3, /* rightshift */
1491 4, /* size */
1492 9, /* bitsize */
1493 false, /* pc_relative */
1494 10, /* bitpos */
1495 complain_overflow_unsigned, /* complain_on_overflow */
1496 bfd_elf_generic_reloc, /* special_function */
1497 AARCH64_R_STR (TLSLD_LDST64_DTPREL_LO12), /* name */
1498 false, /* partial_inplace */
1499 0x3ffc00, /* src_mask */
1500 0x3ffc00, /* dst_mask */
1501 false), /* pcrel_offset */
1502
1503 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12, but no overflow check. */
1504 HOWTO64 (AARCH64_R (TLSLD_LDST64_DTPREL_LO12_NC), /* type */
1505 3, /* rightshift */
1506 4, /* size */
1507 9, /* bitsize */
1508 false, /* pc_relative */
1509 10, /* bitpos */
1510 complain_overflow_dont, /* complain_on_overflow */
1511 bfd_elf_generic_reloc, /* special_function */
1512 AARCH64_R_STR (TLSLD_LDST64_DTPREL_LO12_NC), /* name */
1513 false, /* partial_inplace */
1514 0x7fc00, /* src_mask */
1515 0x7fc00, /* dst_mask */
1516 false), /* pcrel_offset */
1517
1518 /* LD/ST8: bit[11:0] of byte offset to module TLS base address. */
1519 HOWTO64 (AARCH64_R (TLSLD_LDST8_DTPREL_LO12), /* type */
1520 0, /* rightshift */
1521 4, /* size */
1522 12, /* bitsize */
1523 false, /* pc_relative */
1524 10, /* bitpos */
1525 complain_overflow_unsigned, /* complain_on_overflow */
1526 bfd_elf_generic_reloc, /* special_function */
1527 AARCH64_R_STR (TLSLD_LDST8_DTPREL_LO12), /* name */
1528 false, /* partial_inplace */
1529 0x3ffc00, /* src_mask */
1530 0x3ffc00, /* dst_mask */
1531 false), /* pcrel_offset */
1532
1533 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12, but no overflow check. */
1534 HOWTO64 (AARCH64_R (TLSLD_LDST8_DTPREL_LO12_NC), /* type */
1535 0, /* rightshift */
1536 4, /* size */
1537 12, /* bitsize */
1538 false, /* pc_relative */
1539 10, /* bitpos */
1540 complain_overflow_dont, /* complain_on_overflow */
1541 bfd_elf_generic_reloc, /* special_function */
1542 AARCH64_R_STR (TLSLD_LDST8_DTPREL_LO12_NC), /* name */
1543 false, /* partial_inplace */
1544 0x3ffc00, /* src_mask */
1545 0x3ffc00, /* dst_mask */
1546 false), /* pcrel_offset */
1547
1548 /* MOVZ: bit[15:0] of byte offset to module TLS base address. */
1549 HOWTO (AARCH64_R (TLSLD_MOVW_DTPREL_G0), /* type */
1550 0, /* rightshift */
1551 4, /* size */
1552 16, /* bitsize */
1553 false, /* pc_relative */
1554 0, /* bitpos */
1555 complain_overflow_unsigned, /* complain_on_overflow */
1556 bfd_elf_generic_reloc, /* special_function */
1557 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G0), /* name */
1558 false, /* partial_inplace */
1559 0xffff, /* src_mask */
1560 0xffff, /* dst_mask */
1561 false), /* pcrel_offset */
1562
1563 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
1564 HOWTO (AARCH64_R (TLSLD_MOVW_DTPREL_G0_NC), /* type */
1565 0, /* rightshift */
1566 4, /* size */
1567 16, /* bitsize */
1568 false, /* pc_relative */
1569 0, /* bitpos */
1570 complain_overflow_dont, /* complain_on_overflow */
1571 bfd_elf_generic_reloc, /* special_function */
1572 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G0_NC), /* name */
1573 false, /* partial_inplace */
1574 0xffff, /* src_mask */
1575 0xffff, /* dst_mask */
1576 false), /* pcrel_offset */
1577
1578 /* MOVZ: bit[31:16] of byte offset to module TLS base address. */
1579 HOWTO (AARCH64_R (TLSLD_MOVW_DTPREL_G1), /* type */
1580 16, /* rightshift */
1581 4, /* size */
1582 16, /* bitsize */
1583 false, /* pc_relative */
1584 0, /* bitpos */
1585 complain_overflow_unsigned, /* complain_on_overflow */
1586 bfd_elf_generic_reloc, /* special_function */
1587 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G1), /* name */
1588 false, /* partial_inplace */
1589 0xffff, /* src_mask */
1590 0xffff, /* dst_mask */
1591 false), /* pcrel_offset */
1592
1593 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
1594 HOWTO64 (AARCH64_R (TLSLD_MOVW_DTPREL_G1_NC), /* type */
1595 16, /* rightshift */
1596 4, /* size */
1597 16, /* bitsize */
1598 false, /* pc_relative */
1599 0, /* bitpos */
1600 complain_overflow_dont, /* complain_on_overflow */
1601 bfd_elf_generic_reloc, /* special_function */
1602 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G1_NC), /* name */
1603 false, /* partial_inplace */
1604 0xffff, /* src_mask */
1605 0xffff, /* dst_mask */
1606 false), /* pcrel_offset */
1607
1608 /* MOVZ: bit[47:32] of byte offset to module TLS base address. */
1609 HOWTO64 (AARCH64_R (TLSLD_MOVW_DTPREL_G2), /* type */
1610 32, /* rightshift */
1611 4, /* size */
1612 16, /* bitsize */
1613 false, /* pc_relative */
1614 0, /* bitpos */
1615 complain_overflow_unsigned, /* complain_on_overflow */
1616 bfd_elf_generic_reloc, /* special_function */
1617 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G2), /* name */
1618 false, /* partial_inplace */
1619 0xffff, /* src_mask */
1620 0xffff, /* dst_mask */
1621 false), /* pcrel_offset */
1622
1623 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G2), /* type */
1624 32, /* rightshift */
1625 4, /* size */
1626 16, /* bitsize */
1627 false, /* pc_relative */
1628 0, /* bitpos */
1629 complain_overflow_unsigned, /* complain_on_overflow */
1630 bfd_elf_generic_reloc, /* special_function */
1631 AARCH64_R_STR (TLSLE_MOVW_TPREL_G2), /* name */
1632 false, /* partial_inplace */
1633 0xffff, /* src_mask */
1634 0xffff, /* dst_mask */
1635 false), /* pcrel_offset */
1636
1637 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G1), /* type */
1638 16, /* rightshift */
1639 4, /* size */
1640 16, /* bitsize */
1641 false, /* pc_relative */
1642 0, /* bitpos */
1643 complain_overflow_dont, /* complain_on_overflow */
1644 bfd_elf_generic_reloc, /* special_function */
1645 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1), /* name */
1646 false, /* partial_inplace */
1647 0xffff, /* src_mask */
1648 0xffff, /* dst_mask */
1649 false), /* pcrel_offset */
1650
1651 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G1_NC), /* type */
1652 16, /* rightshift */
1653 4, /* size */
1654 16, /* bitsize */
1655 false, /* pc_relative */
1656 0, /* bitpos */
1657 complain_overflow_dont, /* complain_on_overflow */
1658 bfd_elf_generic_reloc, /* special_function */
1659 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1_NC), /* name */
1660 false, /* partial_inplace */
1661 0xffff, /* src_mask */
1662 0xffff, /* dst_mask */
1663 false), /* pcrel_offset */
1664
1665 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0), /* type */
1666 0, /* rightshift */
1667 4, /* size */
1668 16, /* bitsize */
1669 false, /* pc_relative */
1670 0, /* bitpos */
1671 complain_overflow_dont, /* complain_on_overflow */
1672 bfd_elf_generic_reloc, /* special_function */
1673 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0), /* name */
1674 false, /* partial_inplace */
1675 0xffff, /* src_mask */
1676 0xffff, /* dst_mask */
1677 false), /* pcrel_offset */
1678
1679 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0_NC), /* type */
1680 0, /* rightshift */
1681 4, /* size */
1682 16, /* bitsize */
1683 false, /* pc_relative */
1684 0, /* bitpos */
1685 complain_overflow_dont, /* complain_on_overflow */
1686 bfd_elf_generic_reloc, /* special_function */
1687 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0_NC), /* name */
1688 false, /* partial_inplace */
1689 0xffff, /* src_mask */
1690 0xffff, /* dst_mask */
1691 false), /* pcrel_offset */
1692
1693 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_HI12), /* type */
1694 12, /* rightshift */
1695 4, /* size */
1696 12, /* bitsize */
1697 false, /* pc_relative */
1698 0, /* bitpos */
1699 complain_overflow_unsigned, /* complain_on_overflow */
1700 bfd_elf_generic_reloc, /* special_function */
1701 AARCH64_R_STR (TLSLE_ADD_TPREL_HI12), /* name */
1702 false, /* partial_inplace */
1703 0xfff, /* src_mask */
1704 0xfff, /* dst_mask */
1705 false), /* pcrel_offset */
1706
1707 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12), /* type */
1708 0, /* rightshift */
1709 4, /* size */
1710 12, /* bitsize */
1711 false, /* pc_relative */
1712 0, /* bitpos */
1713 complain_overflow_unsigned, /* complain_on_overflow */
1714 bfd_elf_generic_reloc, /* special_function */
1715 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12), /* name */
1716 false, /* partial_inplace */
1717 0xfff, /* src_mask */
1718 0xfff, /* dst_mask */
1719 false), /* pcrel_offset */
1720
1721 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12_NC), /* type */
1722 0, /* rightshift */
1723 4, /* size */
1724 12, /* bitsize */
1725 false, /* pc_relative */
1726 0, /* bitpos */
1727 complain_overflow_dont, /* complain_on_overflow */
1728 bfd_elf_generic_reloc, /* special_function */
1729 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12_NC), /* name */
1730 false, /* partial_inplace */
1731 0xfff, /* src_mask */
1732 0xfff, /* dst_mask */
1733 false), /* pcrel_offset */
1734
1735 /* LD/ST16: bit[11:1] of byte offset to module TLS base address. */
1736 HOWTO (AARCH64_R (TLSLE_LDST16_TPREL_LO12), /* type */
1737 1, /* rightshift */
1738 4, /* size */
1739 11, /* bitsize */
1740 false, /* pc_relative */
1741 10, /* bitpos */
1742 complain_overflow_unsigned, /* complain_on_overflow */
1743 bfd_elf_generic_reloc, /* special_function */
1744 AARCH64_R_STR (TLSLE_LDST16_TPREL_LO12), /* name */
1745 false, /* partial_inplace */
1746 0x1ffc00, /* src_mask */
1747 0x1ffc00, /* dst_mask */
1748 false), /* pcrel_offset */
1749
1750 /* Same as BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12, but no overflow check. */
1751 HOWTO (AARCH64_R (TLSLE_LDST16_TPREL_LO12_NC), /* type */
1752 1, /* rightshift */
1753 4, /* size */
1754 11, /* bitsize */
1755 false, /* pc_relative */
1756 10, /* bitpos */
1757 complain_overflow_dont, /* complain_on_overflow */
1758 bfd_elf_generic_reloc, /* special_function */
1759 AARCH64_R_STR (TLSLE_LDST16_TPREL_LO12_NC), /* name */
1760 false, /* partial_inplace */
1761 0x1ffc00, /* src_mask */
1762 0x1ffc00, /* dst_mask */
1763 false), /* pcrel_offset */
1764
1765 /* LD/ST32: bit[11:2] of byte offset to module TLS base address. */
1766 HOWTO (AARCH64_R (TLSLE_LDST32_TPREL_LO12), /* type */
1767 2, /* rightshift */
1768 4, /* size */
1769 10, /* bitsize */
1770 false, /* pc_relative */
1771 10, /* bitpos */
1772 complain_overflow_unsigned, /* complain_on_overflow */
1773 bfd_elf_generic_reloc, /* special_function */
1774 AARCH64_R_STR (TLSLE_LDST32_TPREL_LO12), /* name */
1775 false, /* partial_inplace */
1776 0xffc00, /* src_mask */
1777 0xffc00, /* dst_mask */
1778 false), /* pcrel_offset */
1779
1780 /* Same as BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12, but no overflow check. */
1781 HOWTO (AARCH64_R (TLSLE_LDST32_TPREL_LO12_NC), /* type */
1782 2, /* rightshift */
1783 4, /* size */
1784 10, /* bitsize */
1785 false, /* pc_relative */
1786 10, /* bitpos */
1787 complain_overflow_dont, /* complain_on_overflow */
1788 bfd_elf_generic_reloc, /* special_function */
1789 AARCH64_R_STR (TLSLE_LDST32_TPREL_LO12_NC), /* name */
1790 false, /* partial_inplace */
1791 0xffc00, /* src_mask */
1792 0xffc00, /* dst_mask */
1793 false), /* pcrel_offset */
1794
1795 /* LD/ST64: bit[11:3] of byte offset to module TLS base address. */
1796 HOWTO (AARCH64_R (TLSLE_LDST64_TPREL_LO12), /* type */
1797 3, /* rightshift */
1798 4, /* size */
1799 9, /* bitsize */
1800 false, /* pc_relative */
1801 10, /* bitpos */
1802 complain_overflow_unsigned, /* complain_on_overflow */
1803 bfd_elf_generic_reloc, /* special_function */
1804 AARCH64_R_STR (TLSLE_LDST64_TPREL_LO12), /* name */
1805 false, /* partial_inplace */
1806 0x7fc00, /* src_mask */
1807 0x7fc00, /* dst_mask */
1808 false), /* pcrel_offset */
1809
1810 /* Same as BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12, but no overflow check. */
1811 HOWTO (AARCH64_R (TLSLE_LDST64_TPREL_LO12_NC), /* type */
1812 3, /* rightshift */
1813 4, /* size */
1814 9, /* bitsize */
1815 false, /* pc_relative */
1816 10, /* bitpos */
1817 complain_overflow_dont, /* complain_on_overflow */
1818 bfd_elf_generic_reloc, /* special_function */
1819 AARCH64_R_STR (TLSLE_LDST64_TPREL_LO12_NC), /* name */
1820 false, /* partial_inplace */
1821 0x7fc00, /* src_mask */
1822 0x7fc00, /* dst_mask */
1823 false), /* pcrel_offset */
1824
1825 /* LD/ST8: bit[11:0] of byte offset to module TLS base address. */
1826 HOWTO (AARCH64_R (TLSLE_LDST8_TPREL_LO12), /* type */
1827 0, /* rightshift */
1828 4, /* size */
1829 12, /* bitsize */
1830 false, /* pc_relative */
1831 10, /* bitpos */
1832 complain_overflow_unsigned, /* complain_on_overflow */
1833 bfd_elf_generic_reloc, /* special_function */
1834 AARCH64_R_STR (TLSLE_LDST8_TPREL_LO12), /* name */
1835 false, /* partial_inplace */
1836 0x3ffc00, /* src_mask */
1837 0x3ffc00, /* dst_mask */
1838 false), /* pcrel_offset */
1839
1840 /* Same as BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12, but no overflow check. */
1841 HOWTO (AARCH64_R (TLSLE_LDST8_TPREL_LO12_NC), /* type */
1842 0, /* rightshift */
1843 4, /* size */
1844 12, /* bitsize */
1845 false, /* pc_relative */
1846 10, /* bitpos */
1847 complain_overflow_dont, /* complain_on_overflow */
1848 bfd_elf_generic_reloc, /* special_function */
1849 AARCH64_R_STR (TLSLE_LDST8_TPREL_LO12_NC), /* name */
1850 false, /* partial_inplace */
1851 0x3ffc00, /* src_mask */
1852 0x3ffc00, /* dst_mask */
1853 false), /* pcrel_offset */
1854
1855 HOWTO (AARCH64_R (TLSDESC_LD_PREL19), /* type */
1856 2, /* rightshift */
1857 4, /* size */
1858 19, /* bitsize */
1859 true, /* pc_relative */
1860 0, /* bitpos */
1861 complain_overflow_dont, /* complain_on_overflow */
1862 bfd_elf_generic_reloc, /* special_function */
1863 AARCH64_R_STR (TLSDESC_LD_PREL19), /* name */
1864 false, /* partial_inplace */
1865 0x0ffffe0, /* src_mask */
1866 0x0ffffe0, /* dst_mask */
1867 true), /* pcrel_offset */
1868
1869 HOWTO (AARCH64_R (TLSDESC_ADR_PREL21), /* type */
1870 0, /* rightshift */
1871 4, /* size */
1872 21, /* bitsize */
1873 true, /* pc_relative */
1874 0, /* bitpos */
1875 complain_overflow_dont, /* complain_on_overflow */
1876 bfd_elf_generic_reloc, /* special_function */
1877 AARCH64_R_STR (TLSDESC_ADR_PREL21), /* name */
1878 false, /* partial_inplace */
1879 0x1fffff, /* src_mask */
1880 0x1fffff, /* dst_mask */
1881 true), /* pcrel_offset */
1882
1883 /* Get to the page for the GOT entry for the symbol
1884 (G(S) - P) using an ADRP instruction. */
1885 HOWTO (AARCH64_R (TLSDESC_ADR_PAGE21), /* type */
1886 12, /* rightshift */
1887 4, /* size */
1888 21, /* bitsize */
1889 true, /* pc_relative */
1890 0, /* bitpos */
1891 complain_overflow_dont, /* complain_on_overflow */
1892 bfd_elf_generic_reloc, /* special_function */
1893 AARCH64_R_STR (TLSDESC_ADR_PAGE21), /* name */
1894 false, /* partial_inplace */
1895 0x1fffff, /* src_mask */
1896 0x1fffff, /* dst_mask */
1897 true), /* pcrel_offset */
1898
1899 /* LD64: GOT offset G(S) & 0xff8. */
1900 HOWTO64 (AARCH64_R (TLSDESC_LD64_LO12), /* type */
1901 3, /* rightshift */
1902 4, /* size */
1903 12, /* bitsize */
1904 false, /* pc_relative */
1905 0, /* bitpos */
1906 complain_overflow_dont, /* complain_on_overflow */
1907 bfd_elf_generic_reloc, /* special_function */
1908 AARCH64_R_STR (TLSDESC_LD64_LO12), /* name */
1909 false, /* partial_inplace */
1910 0xff8, /* src_mask */
1911 0xff8, /* dst_mask */
1912 false), /* pcrel_offset */
1913
1914 /* LD32: GOT offset G(S) & 0xffc. */
1915 HOWTO32 (AARCH64_R (TLSDESC_LD32_LO12_NC), /* type */
1916 2, /* rightshift */
1917 4, /* size */
1918 12, /* bitsize */
1919 false, /* pc_relative */
1920 0, /* bitpos */
1921 complain_overflow_dont, /* complain_on_overflow */
1922 bfd_elf_generic_reloc, /* special_function */
1923 AARCH64_R_STR (TLSDESC_LD32_LO12_NC), /* name */
1924 false, /* partial_inplace */
1925 0xffc, /* src_mask */
1926 0xffc, /* dst_mask */
1927 false), /* pcrel_offset */
1928
1929 /* ADD: GOT offset G(S) & 0xfff. */
1930 HOWTO (AARCH64_R (TLSDESC_ADD_LO12), /* type */
1931 0, /* rightshift */
1932 4, /* size */
1933 12, /* bitsize */
1934 false, /* pc_relative */
1935 0, /* bitpos */
1936 complain_overflow_dont,/* complain_on_overflow */
1937 bfd_elf_generic_reloc, /* special_function */
1938 AARCH64_R_STR (TLSDESC_ADD_LO12), /* name */
1939 false, /* partial_inplace */
1940 0xfff, /* src_mask */
1941 0xfff, /* dst_mask */
1942 false), /* pcrel_offset */
1943
1944 HOWTO64 (AARCH64_R (TLSDESC_OFF_G1), /* type */
1945 16, /* rightshift */
1946 4, /* size */
1947 12, /* bitsize */
1948 false, /* pc_relative */
1949 0, /* bitpos */
1950 complain_overflow_unsigned, /* complain_on_overflow */
1951 bfd_elf_generic_reloc, /* special_function */
1952 AARCH64_R_STR (TLSDESC_OFF_G1), /* name */
1953 false, /* partial_inplace */
1954 0xffff, /* src_mask */
1955 0xffff, /* dst_mask */
1956 false), /* pcrel_offset */
1957
1958 HOWTO64 (AARCH64_R (TLSDESC_OFF_G0_NC), /* type */
1959 0, /* rightshift */
1960 4, /* size */
1961 12, /* bitsize */
1962 false, /* pc_relative */
1963 0, /* bitpos */
1964 complain_overflow_dont, /* complain_on_overflow */
1965 bfd_elf_generic_reloc, /* special_function */
1966 AARCH64_R_STR (TLSDESC_OFF_G0_NC), /* name */
1967 false, /* partial_inplace */
1968 0xffff, /* src_mask */
1969 0xffff, /* dst_mask */
1970 false), /* pcrel_offset */
1971
1972 HOWTO64 (AARCH64_R (TLSDESC_LDR), /* type */
1973 0, /* rightshift */
1974 4, /* size */
1975 12, /* bitsize */
1976 false, /* pc_relative */
1977 0, /* bitpos */
1978 complain_overflow_dont, /* complain_on_overflow */
1979 bfd_elf_generic_reloc, /* special_function */
1980 AARCH64_R_STR (TLSDESC_LDR), /* name */
1981 false, /* partial_inplace */
1982 0x0, /* src_mask */
1983 0x0, /* dst_mask */
1984 false), /* pcrel_offset */
1985
1986 HOWTO64 (AARCH64_R (TLSDESC_ADD), /* type */
1987 0, /* rightshift */
1988 4, /* size */
1989 12, /* bitsize */
1990 false, /* pc_relative */
1991 0, /* bitpos */
1992 complain_overflow_dont, /* complain_on_overflow */
1993 bfd_elf_generic_reloc, /* special_function */
1994 AARCH64_R_STR (TLSDESC_ADD), /* name */
1995 false, /* partial_inplace */
1996 0x0, /* src_mask */
1997 0x0, /* dst_mask */
1998 false), /* pcrel_offset */
1999
2000 HOWTO (AARCH64_R (TLSDESC_CALL), /* type */
2001 0, /* rightshift */
2002 4, /* size */
2003 0, /* bitsize */
2004 false, /* pc_relative */
2005 0, /* bitpos */
2006 complain_overflow_dont, /* complain_on_overflow */
2007 bfd_elf_generic_reloc, /* special_function */
2008 AARCH64_R_STR (TLSDESC_CALL), /* name */
2009 false, /* partial_inplace */
2010 0x0, /* src_mask */
2011 0x0, /* dst_mask */
2012 false), /* pcrel_offset */
2013
2014 HOWTO (AARCH64_R (COPY), /* type */
2015 0, /* rightshift */
2016 4, /* size */
2017 64, /* bitsize */
2018 false, /* pc_relative */
2019 0, /* bitpos */
2020 complain_overflow_bitfield, /* complain_on_overflow */
2021 bfd_elf_generic_reloc, /* special_function */
2022 AARCH64_R_STR (COPY), /* name */
2023 true, /* partial_inplace */
2024 0xffffffff, /* src_mask */
2025 0xffffffff, /* dst_mask */
2026 false), /* pcrel_offset */
2027
2028 HOWTO (AARCH64_R (GLOB_DAT), /* type */
2029 0, /* rightshift */
2030 4, /* size */
2031 64, /* bitsize */
2032 false, /* pc_relative */
2033 0, /* bitpos */
2034 complain_overflow_bitfield, /* complain_on_overflow */
2035 bfd_elf_generic_reloc, /* special_function */
2036 AARCH64_R_STR (GLOB_DAT), /* name */
2037 true, /* partial_inplace */
2038 0xffffffff, /* src_mask */
2039 0xffffffff, /* dst_mask */
2040 false), /* pcrel_offset */
2041
2042 HOWTO (AARCH64_R (JUMP_SLOT), /* type */
2043 0, /* rightshift */
2044 4, /* size */
2045 64, /* bitsize */
2046 false, /* pc_relative */
2047 0, /* bitpos */
2048 complain_overflow_bitfield, /* complain_on_overflow */
2049 bfd_elf_generic_reloc, /* special_function */
2050 AARCH64_R_STR (JUMP_SLOT), /* name */
2051 true, /* partial_inplace */
2052 0xffffffff, /* src_mask */
2053 0xffffffff, /* dst_mask */
2054 false), /* pcrel_offset */
2055
2056 HOWTO (AARCH64_R (RELATIVE), /* type */
2057 0, /* rightshift */
2058 4, /* size */
2059 64, /* bitsize */
2060 false, /* pc_relative */
2061 0, /* bitpos */
2062 complain_overflow_bitfield, /* complain_on_overflow */
2063 bfd_elf_generic_reloc, /* special_function */
2064 AARCH64_R_STR (RELATIVE), /* name */
2065 true, /* partial_inplace */
2066 ALL_ONES, /* src_mask */
2067 ALL_ONES, /* dst_mask */
2068 false), /* pcrel_offset */
2069
2070 HOWTO (AARCH64_R (TLS_DTPMOD), /* type */
2071 0, /* rightshift */
2072 4, /* size */
2073 64, /* bitsize */
2074 false, /* pc_relative */
2075 0, /* bitpos */
2076 complain_overflow_dont, /* complain_on_overflow */
2077 bfd_elf_generic_reloc, /* special_function */
2078 #if ARCH_SIZE == 64
2079 AARCH64_R_STR (TLS_DTPMOD64), /* name */
2080 #else
2081 AARCH64_R_STR (TLS_DTPMOD), /* name */
2082 #endif
2083 false, /* partial_inplace */
2084 0, /* src_mask */
2085 ALL_ONES, /* dst_mask */
2086 false), /* pc_reloffset */
2087
2088 HOWTO (AARCH64_R (TLS_DTPREL), /* type */
2089 0, /* rightshift */
2090 4, /* size */
2091 64, /* bitsize */
2092 false, /* pc_relative */
2093 0, /* bitpos */
2094 complain_overflow_dont, /* complain_on_overflow */
2095 bfd_elf_generic_reloc, /* special_function */
2096 #if ARCH_SIZE == 64
2097 AARCH64_R_STR (TLS_DTPREL64), /* name */
2098 #else
2099 AARCH64_R_STR (TLS_DTPREL), /* name */
2100 #endif
2101 false, /* partial_inplace */
2102 0, /* src_mask */
2103 ALL_ONES, /* dst_mask */
2104 false), /* pcrel_offset */
2105
2106 HOWTO (AARCH64_R (TLS_TPREL), /* type */
2107 0, /* rightshift */
2108 4, /* size */
2109 64, /* bitsize */
2110 false, /* pc_relative */
2111 0, /* bitpos */
2112 complain_overflow_dont, /* complain_on_overflow */
2113 bfd_elf_generic_reloc, /* special_function */
2114 #if ARCH_SIZE == 64
2115 AARCH64_R_STR (TLS_TPREL64), /* name */
2116 #else
2117 AARCH64_R_STR (TLS_TPREL), /* name */
2118 #endif
2119 false, /* partial_inplace */
2120 0, /* src_mask */
2121 ALL_ONES, /* dst_mask */
2122 false), /* pcrel_offset */
2123
2124 HOWTO (AARCH64_R (TLSDESC), /* type */
2125 0, /* rightshift */
2126 4, /* size */
2127 64, /* bitsize */
2128 false, /* pc_relative */
2129 0, /* bitpos */
2130 complain_overflow_dont, /* complain_on_overflow */
2131 bfd_elf_generic_reloc, /* special_function */
2132 AARCH64_R_STR (TLSDESC), /* name */
2133 false, /* partial_inplace */
2134 0, /* src_mask */
2135 ALL_ONES, /* dst_mask */
2136 false), /* pcrel_offset */
2137
2138 HOWTO (AARCH64_R (IRELATIVE), /* type */
2139 0, /* rightshift */
2140 4, /* size */
2141 64, /* bitsize */
2142 false, /* pc_relative */
2143 0, /* bitpos */
2144 complain_overflow_bitfield, /* complain_on_overflow */
2145 bfd_elf_generic_reloc, /* special_function */
2146 AARCH64_R_STR (IRELATIVE), /* name */
2147 false, /* partial_inplace */
2148 0, /* src_mask */
2149 ALL_ONES, /* dst_mask */
2150 false), /* pcrel_offset */
2151
2152 EMPTY_HOWTO (0),
2153 };
2154
2155 static reloc_howto_type elfNN_aarch64_howto_none =
2156 HOWTO (R_AARCH64_NONE, /* type */
2157 0, /* rightshift */
2158 0, /* size */
2159 0, /* bitsize */
2160 false, /* pc_relative */
2161 0, /* bitpos */
2162 complain_overflow_dont,/* complain_on_overflow */
2163 bfd_elf_generic_reloc, /* special_function */
2164 "R_AARCH64_NONE", /* name */
2165 false, /* partial_inplace */
2166 0, /* src_mask */
2167 0, /* dst_mask */
2168 false); /* pcrel_offset */
2169
2170 /* Given HOWTO, return the bfd internal relocation enumerator. */
2171
2172 static bfd_reloc_code_real_type
2173 elfNN_aarch64_bfd_reloc_from_howto (reloc_howto_type *howto)
2174 {
2175 const int size
2176 = (int) ARRAY_SIZE (elfNN_aarch64_howto_table);
2177 const ptrdiff_t offset
2178 = howto - elfNN_aarch64_howto_table;
2179
2180 if (offset > 0 && offset < size - 1)
2181 return BFD_RELOC_AARCH64_RELOC_START + offset;
2182
2183 if (howto == &elfNN_aarch64_howto_none)
2184 return BFD_RELOC_AARCH64_NONE;
2185
2186 return BFD_RELOC_AARCH64_RELOC_START;
2187 }
2188
2189 /* Given R_TYPE, return the bfd internal relocation enumerator. */
2190
2191 static bfd_reloc_code_real_type
2192 elfNN_aarch64_bfd_reloc_from_type (bfd *abfd, unsigned int r_type)
2193 {
2194 static bool initialized_p = false;
2195 /* Indexed by R_TYPE, values are offsets in the howto_table. */
2196 static unsigned int offsets[R_AARCH64_end];
2197
2198 if (!initialized_p)
2199 {
2200 unsigned int i;
2201
2202 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
2203 if (elfNN_aarch64_howto_table[i].type != 0)
2204 offsets[elfNN_aarch64_howto_table[i].type] = i;
2205
2206 initialized_p = true;
2207 }
2208
2209 if (r_type == R_AARCH64_NONE || r_type == R_AARCH64_NULL)
2210 return BFD_RELOC_AARCH64_NONE;
2211
2212 /* PR 17512: file: b371e70a. */
2213 if (r_type >= R_AARCH64_end)
2214 {
2215 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
2216 abfd, r_type);
2217 bfd_set_error (bfd_error_bad_value);
2218 return BFD_RELOC_AARCH64_NONE;
2219 }
2220
2221 return BFD_RELOC_AARCH64_RELOC_START + offsets[r_type];
2222 }
2223
2224 struct elf_aarch64_reloc_map
2225 {
2226 bfd_reloc_code_real_type from;
2227 bfd_reloc_code_real_type to;
2228 };
2229
2230 /* Map bfd generic reloc to AArch64-specific reloc. */
2231 static const struct elf_aarch64_reloc_map elf_aarch64_reloc_map[] =
2232 {
2233 {BFD_RELOC_NONE, BFD_RELOC_AARCH64_NONE},
2234
2235 /* Basic data relocations. */
2236 {BFD_RELOC_CTOR, BFD_RELOC_AARCH64_NN},
2237 {BFD_RELOC_64, BFD_RELOC_AARCH64_64},
2238 {BFD_RELOC_32, BFD_RELOC_AARCH64_32},
2239 {BFD_RELOC_16, BFD_RELOC_AARCH64_16},
2240 {BFD_RELOC_64_PCREL, BFD_RELOC_AARCH64_64_PCREL},
2241 {BFD_RELOC_32_PCREL, BFD_RELOC_AARCH64_32_PCREL},
2242 {BFD_RELOC_16_PCREL, BFD_RELOC_AARCH64_16_PCREL},
2243 };
2244
2245 /* Given the bfd internal relocation enumerator in CODE, return the
2246 corresponding howto entry. */
2247
2248 static reloc_howto_type *
2249 elfNN_aarch64_howto_from_bfd_reloc (bfd_reloc_code_real_type code)
2250 {
2251 unsigned int i;
2252
2253 /* Convert bfd generic reloc to AArch64-specific reloc. */
2254 if (code < BFD_RELOC_AARCH64_RELOC_START
2255 || code > BFD_RELOC_AARCH64_RELOC_END)
2256 for (i = 0; i < ARRAY_SIZE (elf_aarch64_reloc_map); i++)
2257 if (elf_aarch64_reloc_map[i].from == code)
2258 {
2259 code = elf_aarch64_reloc_map[i].to;
2260 break;
2261 }
2262
2263 if (code > BFD_RELOC_AARCH64_RELOC_START
2264 && code < BFD_RELOC_AARCH64_RELOC_END)
2265 if (elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START].type)
2266 return &elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START];
2267
2268 if (code == BFD_RELOC_AARCH64_NONE)
2269 return &elfNN_aarch64_howto_none;
2270
2271 return NULL;
2272 }
2273
2274 static reloc_howto_type *
2275 elfNN_aarch64_howto_from_type (bfd *abfd, unsigned int r_type)
2276 {
2277 bfd_reloc_code_real_type val;
2278 reloc_howto_type *howto;
2279
2280 #if ARCH_SIZE == 32
2281 if (r_type > 256)
2282 {
2283 bfd_set_error (bfd_error_bad_value);
2284 return NULL;
2285 }
2286 #endif
2287
2288 if (r_type == R_AARCH64_NONE)
2289 return &elfNN_aarch64_howto_none;
2290
2291 val = elfNN_aarch64_bfd_reloc_from_type (abfd, r_type);
2292 howto = elfNN_aarch64_howto_from_bfd_reloc (val);
2293
2294 if (howto != NULL)
2295 return howto;
2296
2297 bfd_set_error (bfd_error_bad_value);
2298 return NULL;
2299 }
2300
2301 static bool
2302 elfNN_aarch64_info_to_howto (bfd *abfd, arelent *bfd_reloc,
2303 Elf_Internal_Rela *elf_reloc)
2304 {
2305 unsigned int r_type;
2306
2307 r_type = ELFNN_R_TYPE (elf_reloc->r_info);
2308 bfd_reloc->howto = elfNN_aarch64_howto_from_type (abfd, r_type);
2309
2310 if (bfd_reloc->howto == NULL)
2311 {
2312 /* xgettext:c-format */
2313 _bfd_error_handler (_("%pB: unsupported relocation type %#x"), abfd, r_type);
2314 return false;
2315 }
2316 return true;
2317 }
2318
2319 static reloc_howto_type *
2320 elfNN_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2321 bfd_reloc_code_real_type code)
2322 {
2323 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (code);
2324
2325 if (howto != NULL)
2326 return howto;
2327
2328 bfd_set_error (bfd_error_bad_value);
2329 return NULL;
2330 }
2331
2332 static reloc_howto_type *
2333 elfNN_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2334 const char *r_name)
2335 {
2336 unsigned int i;
2337
2338 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
2339 if (elfNN_aarch64_howto_table[i].name != NULL
2340 && strcasecmp (elfNN_aarch64_howto_table[i].name, r_name) == 0)
2341 return &elfNN_aarch64_howto_table[i];
2342
2343 return NULL;
2344 }
2345
2346 #define TARGET_LITTLE_SYM aarch64_elfNN_le_vec
2347 #define TARGET_LITTLE_NAME "elfNN-littleaarch64"
2348 #define TARGET_BIG_SYM aarch64_elfNN_be_vec
2349 #define TARGET_BIG_NAME "elfNN-bigaarch64"
2350
2351 /* The linker script knows the section names for placement.
2352 The entry_names are used to do simple name mangling on the stubs.
2353 Given a function name, and its type, the stub can be found. The
2354 name can be changed. The only requirement is the %s be present. */
2355 #define STUB_ENTRY_NAME "__%s_veneer"
2356
2357 /* Stub name for a BTI landing stub. */
2358 #define BTI_STUB_ENTRY_NAME "__%s_bti_veneer"
2359
2360 /* The name of the dynamic interpreter. This is put in the .interp
2361 section. */
2362 #define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
2363
2364 #define AARCH64_MAX_FWD_BRANCH_OFFSET \
2365 (((1 << 25) - 1) << 2)
2366 #define AARCH64_MAX_BWD_BRANCH_OFFSET \
2367 (-((1 << 25) << 2))
2368
2369 #define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
2370 #define AARCH64_MIN_ADRP_IMM (-(1 << 20))
2371
2372 static int
2373 aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
2374 {
2375 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
2376 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
2377 }
2378
2379 static int
2380 aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
2381 {
2382 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
2383 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
2384 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
2385 }
2386
2387 static const uint32_t aarch64_adrp_branch_stub [] =
2388 {
2389 0x90000010, /* adrp ip0, X */
2390 /* R_AARCH64_ADR_HI21_PCREL(X) */
2391 0x91000210, /* add ip0, ip0, :lo12:X */
2392 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
2393 0xd61f0200, /* br ip0 */
2394 };
2395
2396 static const uint32_t aarch64_long_branch_stub[] =
2397 {
2398 #if ARCH_SIZE == 64
2399 0x58000090, /* ldr ip0, 1f */
2400 #else
2401 0x18000090, /* ldr wip0, 1f */
2402 #endif
2403 0x10000011, /* adr ip1, #0 */
2404 0x8b110210, /* add ip0, ip0, ip1 */
2405 0xd61f0200, /* br ip0 */
2406 0x00000000, /* 1: .xword or .word
2407 R_AARCH64_PRELNN(X) + 12
2408 */
2409 0x00000000,
2410 };
2411
2412 static const uint32_t aarch64_bti_direct_branch_stub[] =
2413 {
2414 0xd503245f, /* bti c */
2415 0x14000000, /* b <label> */
2416 };
2417
2418 static const uint32_t aarch64_erratum_835769_stub[] =
2419 {
2420 0x00000000, /* Placeholder for multiply accumulate. */
2421 0x14000000, /* b <label> */
2422 };
2423
2424 static const uint32_t aarch64_erratum_843419_stub[] =
2425 {
2426 0x00000000, /* Placeholder for LDR instruction. */
2427 0x14000000, /* b <label> */
2428 };
2429
2430 /* Section name for stubs is the associated section name plus this
2431 string. */
2432 #define STUB_SUFFIX ".stub"
2433
2434 enum elf_aarch64_stub_type
2435 {
2436 aarch64_stub_none,
2437 aarch64_stub_adrp_branch,
2438 aarch64_stub_long_branch,
2439 aarch64_stub_bti_direct_branch,
2440 aarch64_stub_erratum_835769_veneer,
2441 aarch64_stub_erratum_843419_veneer,
2442 };
2443
2444 struct elf_aarch64_stub_hash_entry
2445 {
2446 /* Base hash table entry structure. */
2447 struct bfd_hash_entry root;
2448
2449 /* The stub section. */
2450 asection *stub_sec;
2451
2452 /* Offset within stub_sec of the beginning of this stub. */
2453 bfd_vma stub_offset;
2454
2455 /* Given the symbol's value and its section we can determine its final
2456 value when building the stubs (so the stub knows where to jump). */
2457 bfd_vma target_value;
2458 asection *target_section;
2459
2460 enum elf_aarch64_stub_type stub_type;
2461
2462 /* The symbol table entry, if any, that this was derived from. */
2463 struct elf_aarch64_link_hash_entry *h;
2464
2465 /* Destination symbol type */
2466 unsigned char st_type;
2467
2468 /* The target is also a stub. */
2469 bool double_stub;
2470
2471 /* Where this stub is being called from, or, in the case of combined
2472 stub sections, the first input section in the group. */
2473 asection *id_sec;
2474
2475 /* The name for the local symbol at the start of this stub. The
2476 stub name in the hash table has to be unique; this does not, so
2477 it can be friendlier. */
2478 char *output_name;
2479
2480 /* The instruction which caused this stub to be generated (only valid for
2481 erratum 835769 workaround stubs at present). */
2482 uint32_t veneered_insn;
2483
2484 /* In an erratum 843419 workaround stub, the ADRP instruction offset. */
2485 bfd_vma adrp_offset;
2486 };
2487
2488 /* Used to build a map of a section. This is required for mixed-endian
2489 code/data. */
2490
2491 typedef struct elf_elf_section_map
2492 {
2493 bfd_vma vma;
2494 char type;
2495 }
2496 elf_aarch64_section_map;
2497
2498
2499 typedef struct _aarch64_elf_section_data
2500 {
2501 struct bfd_elf_section_data elf;
2502 unsigned int mapcount;
2503 unsigned int mapsize;
2504 elf_aarch64_section_map *map;
2505 }
2506 _aarch64_elf_section_data;
2507
2508 #define elf_aarch64_section_data(sec) \
2509 ((_aarch64_elf_section_data *) elf_section_data (sec))
2510
2511 /* The size of the thread control block which is defined to be two pointers. */
2512 #define TCB_SIZE (ARCH_SIZE/8)*2
2513
2514 struct elf_aarch64_local_symbol
2515 {
2516 unsigned int got_type;
2517 bfd_signed_vma got_refcount;
2518 bfd_vma got_offset;
2519
2520 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
2521 offset is from the end of the jump table and reserved entries
2522 within the PLTGOT.
2523
2524 The magic value (bfd_vma) -1 indicates that an offset has not be
2525 allocated. */
2526 bfd_vma tlsdesc_got_jump_table_offset;
2527 };
2528
2529 struct elf_aarch64_obj_tdata
2530 {
2531 struct elf_obj_tdata root;
2532
2533 /* local symbol descriptors */
2534 struct elf_aarch64_local_symbol *locals;
2535
2536 /* Zero to warn when linking objects with incompatible enum sizes. */
2537 int no_enum_size_warning;
2538
2539 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2540 int no_wchar_size_warning;
2541
2542 /* All GNU_PROPERTY_AARCH64_FEATURE_1_AND properties. */
2543 uint32_t gnu_and_prop;
2544
2545 /* Zero to warn when linking objects with incompatible
2546 GNU_PROPERTY_AARCH64_FEATURE_1_BTI. */
2547 int no_bti_warn;
2548
2549 /* PLT type based on security. */
2550 aarch64_plt_type plt_type;
2551 };
2552
2553 #define elf_aarch64_tdata(bfd) \
2554 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
2555
2556 #define elf_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
2557
2558 #define is_aarch64_elf(bfd) \
2559 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2560 && elf_tdata (bfd) != NULL \
2561 && elf_object_id (bfd) == AARCH64_ELF_DATA)
2562
2563 static bool
2564 elfNN_aarch64_mkobject (bfd *abfd)
2565 {
2566 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
2567 AARCH64_ELF_DATA);
2568 }
2569
2570 #define elf_aarch64_hash_entry(ent) \
2571 ((struct elf_aarch64_link_hash_entry *)(ent))
2572
2573 #define GOT_UNKNOWN 0
2574 #define GOT_NORMAL 1
2575 #define GOT_TLS_GD 2
2576 #define GOT_TLS_IE 4
2577 #define GOT_TLSDESC_GD 8
2578
2579 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
2580
2581 /* AArch64 ELF linker hash entry. */
2582 struct elf_aarch64_link_hash_entry
2583 {
2584 struct elf_link_hash_entry root;
2585
2586 /* Since PLT entries have variable size, we need to record the
2587 index into .got.plt instead of recomputing it from the PLT
2588 offset. */
2589 bfd_signed_vma plt_got_offset;
2590
2591 /* Bit mask representing the type of GOT entry(s) if any required by
2592 this symbol. */
2593 unsigned int got_type;
2594
2595 /* TRUE if symbol is defined as a protected symbol. */
2596 unsigned int def_protected : 1;
2597
2598 /* A pointer to the most recently used stub hash entry against this
2599 symbol. */
2600 struct elf_aarch64_stub_hash_entry *stub_cache;
2601
2602 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
2603 is from the end of the jump table and reserved entries within the PLTGOT.
2604
2605 The magic value (bfd_vma) -1 indicates that an offset has not
2606 be allocated. */
2607 bfd_vma tlsdesc_got_jump_table_offset;
2608 };
2609
2610 static unsigned int
2611 elfNN_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
2612 bfd *abfd,
2613 unsigned long r_symndx)
2614 {
2615 if (h)
2616 return elf_aarch64_hash_entry (h)->got_type;
2617
2618 if (! elf_aarch64_locals (abfd))
2619 return GOT_UNKNOWN;
2620
2621 return elf_aarch64_locals (abfd)[r_symndx].got_type;
2622 }
2623
2624 /* Get the AArch64 elf linker hash table from a link_info structure. */
2625 #define elf_aarch64_hash_table(info) \
2626 ((struct elf_aarch64_link_hash_table *) ((info)->hash))
2627
2628 #define aarch64_stub_hash_lookup(table, string, create, copy) \
2629 ((struct elf_aarch64_stub_hash_entry *) \
2630 bfd_hash_lookup ((table), (string), (create), (copy)))
2631
2632 /* AArch64 ELF linker hash table. */
2633 struct elf_aarch64_link_hash_table
2634 {
2635 /* The main hash table. */
2636 struct elf_link_hash_table root;
2637
2638 /* Nonzero to force PIC branch veneers. */
2639 int pic_veneer;
2640
2641 /* Fix erratum 835769. */
2642 int fix_erratum_835769;
2643
2644 /* Fix erratum 843419. */
2645 erratum_84319_opts fix_erratum_843419;
2646
2647 /* Don't apply link-time values for dynamic relocations. */
2648 int no_apply_dynamic_relocs;
2649
2650 /* The number of bytes in the initial entry in the PLT. */
2651 bfd_size_type plt_header_size;
2652
2653 /* The bytes of the initial PLT entry. */
2654 const bfd_byte *plt0_entry;
2655
2656 /* The number of bytes in the subsequent PLT entries. */
2657 bfd_size_type plt_entry_size;
2658
2659 /* The bytes of the subsequent PLT entry. */
2660 const bfd_byte *plt_entry;
2661
2662 /* For convenience in allocate_dynrelocs. */
2663 bfd *obfd;
2664
2665 /* The amount of space used by the reserved portion of the sgotplt
2666 section, plus whatever space is used by the jump slots. */
2667 bfd_vma sgotplt_jump_table_size;
2668
2669 /* The stub hash table. */
2670 struct bfd_hash_table stub_hash_table;
2671
2672 /* Linker stub bfd. */
2673 bfd *stub_bfd;
2674
2675 /* Linker call-backs. */
2676 asection *(*add_stub_section) (const char *, asection *);
2677 void (*layout_sections_again) (void);
2678
2679 /* Array to keep track of which stub sections have been created, and
2680 information on stub grouping. */
2681 struct map_stub
2682 {
2683 /* This is the section to which stubs in the group will be
2684 attached. */
2685 asection *link_sec;
2686 /* The stub section. */
2687 asection *stub_sec;
2688 } *stub_group;
2689
2690 /* Assorted information used by elfNN_aarch64_size_stubs. */
2691 unsigned int bfd_count;
2692 unsigned int top_index;
2693 asection **input_list;
2694
2695 /* True when two stubs are added where one targets the other, happens
2696 when BTI stubs are inserted and then the stub layout must not change
2697 during elfNN_aarch64_build_stubs. */
2698 bool has_double_stub;
2699
2700 /* JUMP_SLOT relocs for variant PCS symbols may be present. */
2701 int variant_pcs;
2702
2703 /* The number of bytes in the PLT enty for the TLS descriptor. */
2704 bfd_size_type tlsdesc_plt_entry_size;
2705
2706 /* Used by local STT_GNU_IFUNC symbols. */
2707 htab_t loc_hash_table;
2708 void * loc_hash_memory;
2709 };
2710
2711 /* Create an entry in an AArch64 ELF linker hash table. */
2712
2713 static struct bfd_hash_entry *
2714 elfNN_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
2715 struct bfd_hash_table *table,
2716 const char *string)
2717 {
2718 struct elf_aarch64_link_hash_entry *ret =
2719 (struct elf_aarch64_link_hash_entry *) entry;
2720
2721 /* Allocate the structure if it has not already been allocated by a
2722 subclass. */
2723 if (ret == NULL)
2724 ret = bfd_hash_allocate (table,
2725 sizeof (struct elf_aarch64_link_hash_entry));
2726 if (ret == NULL)
2727 return (struct bfd_hash_entry *) ret;
2728
2729 /* Call the allocation method of the superclass. */
2730 ret = ((struct elf_aarch64_link_hash_entry *)
2731 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2732 table, string));
2733 if (ret != NULL)
2734 {
2735 ret->got_type = GOT_UNKNOWN;
2736 ret->def_protected = 0;
2737 ret->plt_got_offset = (bfd_vma) - 1;
2738 ret->stub_cache = NULL;
2739 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
2740 }
2741
2742 return (struct bfd_hash_entry *) ret;
2743 }
2744
2745 /* Initialize an entry in the stub hash table. */
2746
2747 static struct bfd_hash_entry *
2748 stub_hash_newfunc (struct bfd_hash_entry *entry,
2749 struct bfd_hash_table *table, const char *string)
2750 {
2751 /* Allocate the structure if it has not already been allocated by a
2752 subclass. */
2753 if (entry == NULL)
2754 {
2755 entry = bfd_hash_allocate (table,
2756 sizeof (struct
2757 elf_aarch64_stub_hash_entry));
2758 if (entry == NULL)
2759 return entry;
2760 }
2761
2762 /* Call the allocation method of the superclass. */
2763 entry = bfd_hash_newfunc (entry, table, string);
2764 if (entry != NULL)
2765 {
2766 struct elf_aarch64_stub_hash_entry *eh;
2767
2768 /* Initialize the local fields. */
2769 eh = (struct elf_aarch64_stub_hash_entry *) entry;
2770 eh->adrp_offset = 0;
2771 eh->stub_sec = NULL;
2772 eh->stub_offset = 0;
2773 eh->target_value = 0;
2774 eh->target_section = NULL;
2775 eh->stub_type = aarch64_stub_none;
2776 eh->h = NULL;
2777 eh->id_sec = NULL;
2778 }
2779
2780 return entry;
2781 }
2782
2783 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
2784 for local symbol so that we can handle local STT_GNU_IFUNC symbols
2785 as global symbol. We reuse indx and dynstr_index for local symbol
2786 hash since they aren't used by global symbols in this backend. */
2787
2788 static hashval_t
2789 elfNN_aarch64_local_htab_hash (const void *ptr)
2790 {
2791 struct elf_link_hash_entry *h
2792 = (struct elf_link_hash_entry *) ptr;
2793 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
2794 }
2795
2796 /* Compare local hash entries. */
2797
2798 static int
2799 elfNN_aarch64_local_htab_eq (const void *ptr1, const void *ptr2)
2800 {
2801 struct elf_link_hash_entry *h1
2802 = (struct elf_link_hash_entry *) ptr1;
2803 struct elf_link_hash_entry *h2
2804 = (struct elf_link_hash_entry *) ptr2;
2805
2806 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
2807 }
2808
2809 /* Find and/or create a hash entry for local symbol. */
2810
2811 static struct elf_link_hash_entry *
2812 elfNN_aarch64_get_local_sym_hash (struct elf_aarch64_link_hash_table *htab,
2813 bfd *abfd, const Elf_Internal_Rela *rel,
2814 bool create)
2815 {
2816 struct elf_aarch64_link_hash_entry e, *ret;
2817 asection *sec = abfd->sections;
2818 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
2819 ELFNN_R_SYM (rel->r_info));
2820 void **slot;
2821
2822 e.root.indx = sec->id;
2823 e.root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2824 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
2825 create ? INSERT : NO_INSERT);
2826
2827 if (!slot)
2828 return NULL;
2829
2830 if (*slot)
2831 {
2832 ret = (struct elf_aarch64_link_hash_entry *) *slot;
2833 return &ret->root;
2834 }
2835
2836 ret = (struct elf_aarch64_link_hash_entry *)
2837 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
2838 sizeof (struct elf_aarch64_link_hash_entry));
2839 if (ret)
2840 {
2841 memset (ret, 0, sizeof (*ret));
2842 ret->root.indx = sec->id;
2843 ret->root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2844 ret->root.dynindx = -1;
2845 *slot = ret;
2846 }
2847 return &ret->root;
2848 }
2849
2850 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2851
2852 static void
2853 elfNN_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
2854 struct elf_link_hash_entry *dir,
2855 struct elf_link_hash_entry *ind)
2856 {
2857 struct elf_aarch64_link_hash_entry *edir, *eind;
2858
2859 edir = (struct elf_aarch64_link_hash_entry *) dir;
2860 eind = (struct elf_aarch64_link_hash_entry *) ind;
2861
2862 if (ind->root.type == bfd_link_hash_indirect)
2863 {
2864 /* Copy over PLT info. */
2865 if (dir->got.refcount <= 0)
2866 {
2867 edir->got_type = eind->got_type;
2868 eind->got_type = GOT_UNKNOWN;
2869 }
2870 }
2871
2872 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2873 }
2874
2875 /* Merge non-visibility st_other attributes. */
2876
2877 static void
2878 elfNN_aarch64_merge_symbol_attribute (struct elf_link_hash_entry *h,
2879 unsigned int st_other,
2880 bool definition,
2881 bool dynamic ATTRIBUTE_UNUSED)
2882 {
2883 if (definition)
2884 {
2885 struct elf_aarch64_link_hash_entry *eh
2886 = (struct elf_aarch64_link_hash_entry *)h;
2887 eh->def_protected = ELF_ST_VISIBILITY (st_other) == STV_PROTECTED;
2888 }
2889
2890 unsigned int isym_sto = st_other & ~ELF_ST_VISIBILITY (-1);
2891 unsigned int h_sto = h->other & ~ELF_ST_VISIBILITY (-1);
2892
2893 if (isym_sto == h_sto)
2894 return;
2895
2896 if (isym_sto & ~STO_AARCH64_VARIANT_PCS)
2897 /* Not fatal, this callback cannot fail. */
2898 _bfd_error_handler (_("unknown attribute for symbol `%s': 0x%02x"),
2899 h->root.root.string, isym_sto);
2900
2901 /* Note: Ideally we would warn about any attribute mismatch, but
2902 this api does not allow that without substantial changes. */
2903 if (isym_sto & STO_AARCH64_VARIANT_PCS)
2904 h->other |= STO_AARCH64_VARIANT_PCS;
2905 }
2906
2907 /* Destroy an AArch64 elf linker hash table. */
2908
2909 static void
2910 elfNN_aarch64_link_hash_table_free (bfd *obfd)
2911 {
2912 struct elf_aarch64_link_hash_table *ret
2913 = (struct elf_aarch64_link_hash_table *) obfd->link.hash;
2914
2915 if (ret->loc_hash_table)
2916 htab_delete (ret->loc_hash_table);
2917 if (ret->loc_hash_memory)
2918 objalloc_free ((struct objalloc *) ret->loc_hash_memory);
2919
2920 bfd_hash_table_free (&ret->stub_hash_table);
2921 _bfd_elf_link_hash_table_free (obfd);
2922 }
2923
2924 /* Create an AArch64 elf linker hash table. */
2925
2926 static struct bfd_link_hash_table *
2927 elfNN_aarch64_link_hash_table_create (bfd *abfd)
2928 {
2929 struct elf_aarch64_link_hash_table *ret;
2930 size_t amt = sizeof (struct elf_aarch64_link_hash_table);
2931
2932 ret = bfd_zmalloc (amt);
2933 if (ret == NULL)
2934 return NULL;
2935
2936 if (!_bfd_elf_link_hash_table_init
2937 (&ret->root, abfd, elfNN_aarch64_link_hash_newfunc,
2938 sizeof (struct elf_aarch64_link_hash_entry), AARCH64_ELF_DATA))
2939 {
2940 free (ret);
2941 return NULL;
2942 }
2943
2944 ret->plt_header_size = PLT_ENTRY_SIZE;
2945 ret->plt0_entry = elfNN_aarch64_small_plt0_entry;
2946 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
2947 ret->plt_entry = elfNN_aarch64_small_plt_entry;
2948 ret->tlsdesc_plt_entry_size = PLT_TLSDESC_ENTRY_SIZE;
2949 ret->obfd = abfd;
2950 ret->root.tlsdesc_got = (bfd_vma) - 1;
2951
2952 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2953 sizeof (struct elf_aarch64_stub_hash_entry)))
2954 {
2955 _bfd_elf_link_hash_table_free (abfd);
2956 return NULL;
2957 }
2958
2959 ret->loc_hash_table = htab_try_create (1024,
2960 elfNN_aarch64_local_htab_hash,
2961 elfNN_aarch64_local_htab_eq,
2962 NULL);
2963 ret->loc_hash_memory = objalloc_create ();
2964 if (!ret->loc_hash_table || !ret->loc_hash_memory)
2965 {
2966 elfNN_aarch64_link_hash_table_free (abfd);
2967 return NULL;
2968 }
2969 ret->root.root.hash_table_free = elfNN_aarch64_link_hash_table_free;
2970
2971 return &ret->root.root;
2972 }
2973
2974 /* Perform relocation R_TYPE. Returns TRUE upon success, FALSE otherwise. */
2975
2976 static bool
2977 aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
2978 bfd_vma offset, bfd_vma value)
2979 {
2980 reloc_howto_type *howto;
2981 bfd_vma place;
2982
2983 howto = elfNN_aarch64_howto_from_type (input_bfd, r_type);
2984 place = (input_section->output_section->vma + input_section->output_offset
2985 + offset);
2986
2987 r_type = elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type);
2988 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, r_type, place,
2989 value, 0, false);
2990 return _bfd_aarch64_elf_put_addend (input_bfd,
2991 input_section->contents + offset, r_type,
2992 howto, value) == bfd_reloc_ok;
2993 }
2994
2995 /* Determine the type of stub needed, if any, for a call. */
2996
2997 static enum elf_aarch64_stub_type
2998 aarch64_type_of_stub (asection *input_sec,
2999 const Elf_Internal_Rela *rel,
3000 asection *sym_sec,
3001 unsigned char st_type,
3002 bfd_vma destination)
3003 {
3004 bfd_vma location;
3005 bfd_signed_vma branch_offset;
3006 unsigned int r_type;
3007 enum elf_aarch64_stub_type stub_type = aarch64_stub_none;
3008
3009 if (st_type != STT_FUNC
3010 && (sym_sec == input_sec))
3011 return stub_type;
3012
3013 /* Determine where the call point is. */
3014 location = (input_sec->output_offset
3015 + input_sec->output_section->vma + rel->r_offset);
3016
3017 branch_offset = (bfd_signed_vma) (destination - location);
3018
3019 r_type = ELFNN_R_TYPE (rel->r_info);
3020
3021 /* We don't want to redirect any old unconditional jump in this way,
3022 only one which is being used for a sibcall, where it is
3023 acceptable for the IP0 and IP1 registers to be clobbered. */
3024 if ((r_type == AARCH64_R (CALL26) || r_type == AARCH64_R (JUMP26))
3025 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
3026 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
3027 {
3028 stub_type = aarch64_stub_long_branch;
3029 }
3030
3031 return stub_type;
3032 }
3033
3034 /* Build a name for an entry in the stub hash table. */
3035
3036 static char *
3037 elfNN_aarch64_stub_name (const asection *input_section,
3038 const asection *sym_sec,
3039 const struct elf_aarch64_link_hash_entry *hash,
3040 const Elf_Internal_Rela *rel)
3041 {
3042 char *stub_name;
3043 bfd_size_type len;
3044
3045 if (hash)
3046 {
3047 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
3048 stub_name = bfd_malloc (len);
3049 if (stub_name != NULL)
3050 snprintf (stub_name, len, "%08x_%s+%" PRIx64,
3051 (unsigned int) input_section->id,
3052 hash->root.root.root.string,
3053 (uint64_t) rel->r_addend);
3054 }
3055 else
3056 {
3057 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
3058 stub_name = bfd_malloc (len);
3059 if (stub_name != NULL)
3060 snprintf (stub_name, len, "%08x_%x:%x+%" PRIx64,
3061 (unsigned int) input_section->id,
3062 (unsigned int) sym_sec->id,
3063 (unsigned int) ELFNN_R_SYM (rel->r_info),
3064 (uint64_t) rel->r_addend);
3065 }
3066
3067 return stub_name;
3068 }
3069
3070 /* Return TRUE if symbol H should be hashed in the `.gnu.hash' section. For
3071 executable PLT slots where the executable never takes the address of those
3072 functions, the function symbols are not added to the hash table. */
3073
3074 static bool
3075 elf_aarch64_hash_symbol (struct elf_link_hash_entry *h)
3076 {
3077 if (h->plt.offset != (bfd_vma) -1
3078 && !h->def_regular
3079 && !h->pointer_equality_needed)
3080 return false;
3081
3082 return _bfd_elf_hash_symbol (h);
3083 }
3084
3085
3086 /* Look up an entry in the stub hash. Stub entries are cached because
3087 creating the stub name takes a bit of time. */
3088
3089 static struct elf_aarch64_stub_hash_entry *
3090 elfNN_aarch64_get_stub_entry (const asection *input_section,
3091 const asection *sym_sec,
3092 struct elf_link_hash_entry *hash,
3093 const Elf_Internal_Rela *rel,
3094 struct elf_aarch64_link_hash_table *htab)
3095 {
3096 struct elf_aarch64_stub_hash_entry *stub_entry;
3097 struct elf_aarch64_link_hash_entry *h =
3098 (struct elf_aarch64_link_hash_entry *) hash;
3099 const asection *id_sec;
3100
3101 if ((input_section->flags & SEC_CODE) == 0)
3102 return NULL;
3103
3104 /* If this input section is part of a group of sections sharing one
3105 stub section, then use the id of the first section in the group.
3106 Stub names need to include a section id, as there may well be
3107 more than one stub used to reach say, printf, and we need to
3108 distinguish between them. */
3109 id_sec = htab->stub_group[input_section->id].link_sec;
3110
3111 if (h != NULL && h->stub_cache != NULL
3112 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
3113 {
3114 stub_entry = h->stub_cache;
3115 }
3116 else
3117 {
3118 char *stub_name;
3119
3120 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, h, rel);
3121 if (stub_name == NULL)
3122 return NULL;
3123
3124 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
3125 stub_name, false, false);
3126 if (h != NULL)
3127 h->stub_cache = stub_entry;
3128
3129 free (stub_name);
3130 }
3131
3132 return stub_entry;
3133 }
3134
3135
3136 /* Create a stub section. */
3137
3138 static asection *
3139 _bfd_aarch64_create_stub_section (asection *section,
3140 struct elf_aarch64_link_hash_table *htab)
3141 {
3142 size_t namelen;
3143 bfd_size_type len;
3144 char *s_name;
3145
3146 namelen = strlen (section->name);
3147 len = namelen + sizeof (STUB_SUFFIX);
3148 s_name = bfd_alloc (htab->stub_bfd, len);
3149 if (s_name == NULL)
3150 return NULL;
3151
3152 memcpy (s_name, section->name, namelen);
3153 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3154 return (*htab->add_stub_section) (s_name, section);
3155 }
3156
3157
3158 /* Find or create a stub section for a link section.
3159
3160 Fix or create the stub section used to collect stubs attached to
3161 the specified link section. */
3162
3163 static asection *
3164 _bfd_aarch64_get_stub_for_link_section (asection *link_section,
3165 struct elf_aarch64_link_hash_table *htab)
3166 {
3167 if (htab->stub_group[link_section->id].stub_sec == NULL)
3168 htab->stub_group[link_section->id].stub_sec
3169 = _bfd_aarch64_create_stub_section (link_section, htab);
3170 return htab->stub_group[link_section->id].stub_sec;
3171 }
3172
3173
3174 /* Find or create a stub section in the stub group for an input
3175 section. */
3176
3177 static asection *
3178 _bfd_aarch64_create_or_find_stub_sec (asection *section,
3179 struct elf_aarch64_link_hash_table *htab)
3180 {
3181 asection *link_sec = htab->stub_group[section->id].link_sec;
3182 return _bfd_aarch64_get_stub_for_link_section (link_sec, htab);
3183 }
3184
3185
3186 /* Add a new stub entry in the stub group associated with an input
3187 section to the stub hash. Not all fields of the new stub entry are
3188 initialised. */
3189
3190 static struct elf_aarch64_stub_hash_entry *
3191 _bfd_aarch64_add_stub_entry_in_group (const char *stub_name,
3192 asection *section,
3193 struct elf_aarch64_link_hash_table *htab)
3194 {
3195 asection *link_sec;
3196 asection *stub_sec;
3197 struct elf_aarch64_stub_hash_entry *stub_entry;
3198
3199 link_sec = htab->stub_group[section->id].link_sec;
3200 stub_sec = _bfd_aarch64_create_or_find_stub_sec (section, htab);
3201
3202 /* Enter this entry into the linker stub hash table. */
3203 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3204 true, false);
3205 if (stub_entry == NULL)
3206 {
3207 /* xgettext:c-format */
3208 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
3209 section->owner, stub_name);
3210 return NULL;
3211 }
3212
3213 stub_entry->stub_sec = stub_sec;
3214 stub_entry->stub_offset = 0;
3215 stub_entry->id_sec = link_sec;
3216
3217 return stub_entry;
3218 }
3219
3220 /* Add a new stub entry in the final stub section to the stub hash.
3221 Not all fields of the new stub entry are initialised. */
3222
3223 static struct elf_aarch64_stub_hash_entry *
3224 _bfd_aarch64_add_stub_entry_after (const char *stub_name,
3225 asection *link_section,
3226 struct elf_aarch64_link_hash_table *htab)
3227 {
3228 asection *stub_sec;
3229 struct elf_aarch64_stub_hash_entry *stub_entry;
3230
3231 stub_sec = NULL;
3232 /* Only create the actual stub if we will end up needing it. */
3233 if (htab->fix_erratum_843419 & ERRAT_ADRP)
3234 stub_sec = _bfd_aarch64_get_stub_for_link_section (link_section, htab);
3235 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3236 true, false);
3237 if (stub_entry == NULL)
3238 {
3239 _bfd_error_handler (_("cannot create stub entry %s"), stub_name);
3240 return NULL;
3241 }
3242
3243 stub_entry->stub_sec = stub_sec;
3244 stub_entry->stub_offset = 0;
3245 stub_entry->id_sec = link_section;
3246
3247 return stub_entry;
3248 }
3249
3250
3251 static bool
3252 aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
3253 void *in_arg)
3254 {
3255 struct elf_aarch64_stub_hash_entry *stub_entry;
3256 asection *stub_sec;
3257 bfd *stub_bfd;
3258 bfd_byte *loc;
3259 bfd_vma sym_value;
3260 bfd_vma veneered_insn_loc;
3261 bfd_vma veneer_entry_loc;
3262 bfd_signed_vma branch_offset = 0;
3263 unsigned int template_size;
3264 unsigned int pad_size = 0;
3265 const uint32_t *template;
3266 unsigned int i;
3267 struct bfd_link_info *info;
3268 struct elf_aarch64_link_hash_table *htab;
3269
3270 /* Massage our args to the form they really have. */
3271 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
3272
3273 info = (struct bfd_link_info *) in_arg;
3274 htab = elf_aarch64_hash_table (info);
3275
3276 /* Fail if the target section could not be assigned to an output
3277 section. The user should fix his linker script. */
3278 if (stub_entry->target_section->output_section == NULL
3279 && info->non_contiguous_regions)
3280 info->callbacks->einfo (_("%F%P: Could not assign `%pA' to an output section. "
3281 "Retry without "
3282 "--enable-non-contiguous-regions.\n"),
3283 stub_entry->target_section);
3284
3285 stub_sec = stub_entry->stub_sec;
3286
3287 /* The layout must not change when a stub may be the target of another. */
3288 if (htab->has_double_stub)
3289 BFD_ASSERT (stub_entry->stub_offset == stub_sec->size);
3290
3291 /* Make a note of the offset within the stubs for this entry. */
3292 stub_entry->stub_offset = stub_sec->size;
3293 loc = stub_sec->contents + stub_entry->stub_offset;
3294
3295 stub_bfd = stub_sec->owner;
3296
3297 /* This is the address of the stub destination. */
3298 sym_value = (stub_entry->target_value
3299 + stub_entry->target_section->output_offset
3300 + stub_entry->target_section->output_section->vma);
3301
3302 if (stub_entry->stub_type == aarch64_stub_long_branch)
3303 {
3304 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
3305 + stub_sec->output_offset);
3306
3307 /* See if we can relax the stub. */
3308 if (aarch64_valid_for_adrp_p (sym_value, place))
3309 {
3310 stub_entry->stub_type = aarch64_stub_adrp_branch;
3311
3312 /* Avoid the relaxation changing the layout. */
3313 if (htab->has_double_stub)
3314 pad_size = sizeof (aarch64_long_branch_stub)
3315 - sizeof (aarch64_adrp_branch_stub);
3316 }
3317 }
3318
3319 switch (stub_entry->stub_type)
3320 {
3321 case aarch64_stub_adrp_branch:
3322 template = aarch64_adrp_branch_stub;
3323 template_size = sizeof (aarch64_adrp_branch_stub);
3324 break;
3325 case aarch64_stub_long_branch:
3326 template = aarch64_long_branch_stub;
3327 template_size = sizeof (aarch64_long_branch_stub);
3328 break;
3329 case aarch64_stub_bti_direct_branch:
3330 template = aarch64_bti_direct_branch_stub;
3331 template_size = sizeof (aarch64_bti_direct_branch_stub);
3332 break;
3333 case aarch64_stub_erratum_835769_veneer:
3334 template = aarch64_erratum_835769_stub;
3335 template_size = sizeof (aarch64_erratum_835769_stub);
3336 break;
3337 case aarch64_stub_erratum_843419_veneer:
3338 template = aarch64_erratum_843419_stub;
3339 template_size = sizeof (aarch64_erratum_843419_stub);
3340 break;
3341 default:
3342 abort ();
3343 }
3344
3345 for (i = 0; i < (template_size / sizeof template[0]); i++)
3346 {
3347 bfd_putl32 (template[i], loc);
3348 loc += 4;
3349 }
3350
3351 template_size += pad_size;
3352 template_size = (template_size + 7) & ~7;
3353 stub_sec->size += template_size;
3354
3355 switch (stub_entry->stub_type)
3356 {
3357 case aarch64_stub_adrp_branch:
3358 if (!aarch64_relocate (AARCH64_R (ADR_PREL_PG_HI21), stub_bfd, stub_sec,
3359 stub_entry->stub_offset, sym_value))
3360 /* The stub would not have been relaxed if the offset was out
3361 of range. */
3362 BFD_FAIL ();
3363
3364 if (!aarch64_relocate (AARCH64_R (ADD_ABS_LO12_NC), stub_bfd, stub_sec,
3365 stub_entry->stub_offset + 4, sym_value))
3366 BFD_FAIL ();
3367 break;
3368
3369 case aarch64_stub_long_branch:
3370 /* We want the value relative to the address 12 bytes back from the
3371 value itself. */
3372 if (!aarch64_relocate (AARCH64_R (PRELNN), stub_bfd, stub_sec,
3373 stub_entry->stub_offset + 16, sym_value + 12))
3374 BFD_FAIL ();
3375 break;
3376
3377 case aarch64_stub_bti_direct_branch:
3378 if (!aarch64_relocate (AARCH64_R (JUMP26), stub_bfd, stub_sec,
3379 stub_entry->stub_offset + 4, sym_value))
3380 BFD_FAIL ();
3381 break;
3382
3383 case aarch64_stub_erratum_835769_veneer:
3384 veneered_insn_loc = stub_entry->target_section->output_section->vma
3385 + stub_entry->target_section->output_offset
3386 + stub_entry->target_value;
3387 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
3388 + stub_entry->stub_sec->output_offset
3389 + stub_entry->stub_offset;
3390 branch_offset = veneered_insn_loc - veneer_entry_loc;
3391 branch_offset >>= 2;
3392 branch_offset &= 0x3ffffff;
3393 bfd_putl32 (stub_entry->veneered_insn,
3394 stub_sec->contents + stub_entry->stub_offset);
3395 bfd_putl32 (template[1] | branch_offset,
3396 stub_sec->contents + stub_entry->stub_offset + 4);
3397 break;
3398
3399 case aarch64_stub_erratum_843419_veneer:
3400 if (!aarch64_relocate (AARCH64_R (JUMP26), stub_bfd, stub_sec,
3401 stub_entry->stub_offset + 4, sym_value + 4))
3402 BFD_FAIL ();
3403 break;
3404
3405 default:
3406 abort ();
3407 }
3408
3409 return true;
3410 }
3411
3412 /* As above, but don't actually build the stub. Just bump offset so
3413 we know stub section sizes and record the offset for each stub so
3414 a stub can target another stub (needed for BTI direct branch stub). */
3415
3416 static bool
3417 aarch64_size_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
3418 {
3419 struct elf_aarch64_stub_hash_entry *stub_entry;
3420 struct elf_aarch64_link_hash_table *htab;
3421 int size;
3422
3423 /* Massage our args to the form they really have. */
3424 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
3425 htab = (struct elf_aarch64_link_hash_table *) in_arg;
3426
3427 switch (stub_entry->stub_type)
3428 {
3429 case aarch64_stub_adrp_branch:
3430 size = sizeof (aarch64_adrp_branch_stub);
3431 break;
3432 case aarch64_stub_long_branch:
3433 size = sizeof (aarch64_long_branch_stub);
3434 break;
3435 case aarch64_stub_bti_direct_branch:
3436 size = sizeof (aarch64_bti_direct_branch_stub);
3437 break;
3438 case aarch64_stub_erratum_835769_veneer:
3439 size = sizeof (aarch64_erratum_835769_stub);
3440 break;
3441 case aarch64_stub_erratum_843419_veneer:
3442 {
3443 if (htab->fix_erratum_843419 == ERRAT_ADR)
3444 return true;
3445 size = sizeof (aarch64_erratum_843419_stub);
3446 }
3447 break;
3448 default:
3449 abort ();
3450 }
3451
3452 size = (size + 7) & ~7;
3453 stub_entry->stub_offset = stub_entry->stub_sec->size;
3454 stub_entry->stub_sec->size += size;
3455 return true;
3456 }
3457
3458 /* Output is BTI compatible. */
3459
3460 static bool
3461 elf_aarch64_bti_p (bfd *output_bfd)
3462 {
3463 uint32_t prop = elf_aarch64_tdata (output_bfd)->gnu_and_prop;
3464 return prop & GNU_PROPERTY_AARCH64_FEATURE_1_BTI;
3465 }
3466
3467 /* External entry points for sizing and building linker stubs. */
3468
3469 /* Set up various things so that we can make a list of input sections
3470 for each output section included in the link. Returns -1 on error,
3471 0 when no stubs will be needed, and 1 on success. */
3472
3473 int
3474 elfNN_aarch64_setup_section_lists (bfd *output_bfd,
3475 struct bfd_link_info *info)
3476 {
3477 bfd *input_bfd;
3478 unsigned int bfd_count;
3479 unsigned int top_id, top_index;
3480 asection *section;
3481 asection **input_list, **list;
3482 size_t amt;
3483 struct elf_aarch64_link_hash_table *htab =
3484 elf_aarch64_hash_table (info);
3485
3486 if (!is_elf_hash_table (&htab->root.root))
3487 return 0;
3488
3489 /* Count the number of input BFDs and find the top input section id. */
3490 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
3491 input_bfd != NULL; input_bfd = input_bfd->link.next)
3492 {
3493 bfd_count += 1;
3494 for (section = input_bfd->sections;
3495 section != NULL; section = section->next)
3496 {
3497 if (top_id < section->id)
3498 top_id = section->id;
3499 }
3500 }
3501 htab->bfd_count = bfd_count;
3502
3503 amt = sizeof (struct map_stub) * (top_id + 1);
3504 htab->stub_group = bfd_zmalloc (amt);
3505 if (htab->stub_group == NULL)
3506 return -1;
3507
3508 /* We can't use output_bfd->section_count here to find the top output
3509 section index as some sections may have been removed, and
3510 _bfd_strip_section_from_output doesn't renumber the indices. */
3511 for (section = output_bfd->sections, top_index = 0;
3512 section != NULL; section = section->next)
3513 {
3514 if (top_index < section->index)
3515 top_index = section->index;
3516 }
3517
3518 htab->top_index = top_index;
3519 amt = sizeof (asection *) * (top_index + 1);
3520 input_list = bfd_malloc (amt);
3521 htab->input_list = input_list;
3522 if (input_list == NULL)
3523 return -1;
3524
3525 /* For sections we aren't interested in, mark their entries with a
3526 value we can check later. */
3527 list = input_list + top_index;
3528 do
3529 *list = bfd_abs_section_ptr;
3530 while (list-- != input_list);
3531
3532 for (section = output_bfd->sections;
3533 section != NULL; section = section->next)
3534 {
3535 if ((section->flags & SEC_CODE) != 0)
3536 input_list[section->index] = NULL;
3537 }
3538
3539 return 1;
3540 }
3541
3542 /* Used by elfNN_aarch64_next_input_section and group_sections. */
3543 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
3544
3545 /* The linker repeatedly calls this function for each input section,
3546 in the order that input sections are linked into output sections.
3547 Build lists of input sections to determine groupings between which
3548 we may insert linker stubs. */
3549
3550 void
3551 elfNN_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
3552 {
3553 struct elf_aarch64_link_hash_table *htab =
3554 elf_aarch64_hash_table (info);
3555
3556 if (isec->output_section->index <= htab->top_index)
3557 {
3558 asection **list = htab->input_list + isec->output_section->index;
3559
3560 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
3561 {
3562 /* Steal the link_sec pointer for our list. */
3563 /* This happens to make the list in reverse order,
3564 which is what we want. */
3565 PREV_SEC (isec) = *list;
3566 *list = isec;
3567 }
3568 }
3569 }
3570
3571 /* See whether we can group stub sections together. Grouping stub
3572 sections may result in fewer stubs. More importantly, we need to
3573 put all .init* and .fini* stubs at the beginning of the .init or
3574 .fini output sections respectively, because glibc splits the
3575 _init and _fini functions into multiple parts. Putting a stub in
3576 the middle of a function is not a good idea. */
3577
3578 static void
3579 group_sections (struct elf_aarch64_link_hash_table *htab,
3580 bfd_size_type stub_group_size,
3581 bool stubs_always_after_branch)
3582 {
3583 asection **list = htab->input_list;
3584
3585 do
3586 {
3587 asection *tail = *list;
3588 asection *head;
3589
3590 if (tail == bfd_abs_section_ptr)
3591 continue;
3592
3593 /* Reverse the list: we must avoid placing stubs at the
3594 beginning of the section because the beginning of the text
3595 section may be required for an interrupt vector in bare metal
3596 code. */
3597 #define NEXT_SEC PREV_SEC
3598 head = NULL;
3599 while (tail != NULL)
3600 {
3601 /* Pop from tail. */
3602 asection *item = tail;
3603 tail = PREV_SEC (item);
3604
3605 /* Push on head. */
3606 NEXT_SEC (item) = head;
3607 head = item;
3608 }
3609
3610 while (head != NULL)
3611 {
3612 asection *curr;
3613 asection *next;
3614 bfd_vma stub_group_start = head->output_offset;
3615 bfd_vma end_of_next;
3616
3617 curr = head;
3618 while (NEXT_SEC (curr) != NULL)
3619 {
3620 next = NEXT_SEC (curr);
3621 end_of_next = next->output_offset + next->size;
3622 if (end_of_next - stub_group_start >= stub_group_size)
3623 /* End of NEXT is too far from start, so stop. */
3624 break;
3625 /* Add NEXT to the group. */
3626 curr = next;
3627 }
3628
3629 /* OK, the size from the start to the start of CURR is less
3630 than stub_group_size and thus can be handled by one stub
3631 section. (Or the head section is itself larger than
3632 stub_group_size, in which case we may be toast.)
3633 We should really be keeping track of the total size of
3634 stubs added here, as stubs contribute to the final output
3635 section size. */
3636 do
3637 {
3638 next = NEXT_SEC (head);
3639 /* Set up this stub group. */
3640 htab->stub_group[head->id].link_sec = curr;
3641 }
3642 while (head != curr && (head = next) != NULL);
3643
3644 /* But wait, there's more! Input sections up to stub_group_size
3645 bytes after the stub section can be handled by it too. */
3646 if (!stubs_always_after_branch)
3647 {
3648 stub_group_start = curr->output_offset + curr->size;
3649
3650 while (next != NULL)
3651 {
3652 end_of_next = next->output_offset + next->size;
3653 if (end_of_next - stub_group_start >= stub_group_size)
3654 /* End of NEXT is too far from stubs, so stop. */
3655 break;
3656 /* Add NEXT to the stub group. */
3657 head = next;
3658 next = NEXT_SEC (head);
3659 htab->stub_group[head->id].link_sec = curr;
3660 }
3661 }
3662 head = next;
3663 }
3664 }
3665 while (list++ != htab->input_list + htab->top_index);
3666
3667 free (htab->input_list);
3668 }
3669
3670 #undef PREV_SEC
3671 #undef PREV_SEC
3672
3673 #define AARCH64_HINT(insn) (((insn) & 0xfffff01f) == 0xd503201f)
3674 #define AARCH64_PACIASP 0xd503233f
3675 #define AARCH64_PACIBSP 0xd503237f
3676 #define AARCH64_BTI_C 0xd503245f
3677 #define AARCH64_BTI_J 0xd503249f
3678 #define AARCH64_BTI_JC 0xd50324df
3679
3680 /* True if the inserted stub does not break BTI compatibility. */
3681
3682 static bool
3683 aarch64_bti_stub_p (bfd *input_bfd,
3684 struct elf_aarch64_stub_hash_entry *stub_entry)
3685 {
3686 /* Stubs without indirect branch are BTI compatible. */
3687 if (stub_entry->stub_type != aarch64_stub_adrp_branch
3688 && stub_entry->stub_type != aarch64_stub_long_branch)
3689 return true;
3690
3691 /* Return true if the target instruction is compatible with BR x16. */
3692
3693 asection *section = stub_entry->target_section;
3694 bfd_byte loc[4];
3695 file_ptr off = stub_entry->target_value;
3696 bfd_size_type count = sizeof (loc);
3697
3698 if (!bfd_get_section_contents (input_bfd, section, loc, off, count))
3699 return false;
3700
3701 uint32_t insn = bfd_getl32 (loc);
3702 if (!AARCH64_HINT (insn))
3703 return false;
3704 return insn == AARCH64_BTI_C
3705 || insn == AARCH64_PACIASP
3706 || insn == AARCH64_BTI_JC
3707 || insn == AARCH64_BTI_J
3708 || insn == AARCH64_PACIBSP;
3709 }
3710
3711 #define AARCH64_BITS(x, pos, n) (((x) >> (pos)) & ((1 << (n)) - 1))
3712
3713 #define AARCH64_RT(insn) AARCH64_BITS (insn, 0, 5)
3714 #define AARCH64_RT2(insn) AARCH64_BITS (insn, 10, 5)
3715 #define AARCH64_RA(insn) AARCH64_BITS (insn, 10, 5)
3716 #define AARCH64_RD(insn) AARCH64_BITS (insn, 0, 5)
3717 #define AARCH64_RN(insn) AARCH64_BITS (insn, 5, 5)
3718 #define AARCH64_RM(insn) AARCH64_BITS (insn, 16, 5)
3719
3720 #define AARCH64_MAC(insn) (((insn) & 0xff000000) == 0x9b000000)
3721 #define AARCH64_BIT(insn, n) AARCH64_BITS (insn, n, 1)
3722 #define AARCH64_OP31(insn) AARCH64_BITS (insn, 21, 3)
3723 #define AARCH64_ZR 0x1f
3724
3725 /* All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
3726 LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops. */
3727
3728 #define AARCH64_LD(insn) (AARCH64_BIT (insn, 22) == 1)
3729 #define AARCH64_LDST(insn) (((insn) & 0x0a000000) == 0x08000000)
3730 #define AARCH64_LDST_EX(insn) (((insn) & 0x3f000000) == 0x08000000)
3731 #define AARCH64_LDST_PCREL(insn) (((insn) & 0x3b000000) == 0x18000000)
3732 #define AARCH64_LDST_NAP(insn) (((insn) & 0x3b800000) == 0x28000000)
3733 #define AARCH64_LDSTP_PI(insn) (((insn) & 0x3b800000) == 0x28800000)
3734 #define AARCH64_LDSTP_O(insn) (((insn) & 0x3b800000) == 0x29000000)
3735 #define AARCH64_LDSTP_PRE(insn) (((insn) & 0x3b800000) == 0x29800000)
3736 #define AARCH64_LDST_UI(insn) (((insn) & 0x3b200c00) == 0x38000000)
3737 #define AARCH64_LDST_PIIMM(insn) (((insn) & 0x3b200c00) == 0x38000400)
3738 #define AARCH64_LDST_U(insn) (((insn) & 0x3b200c00) == 0x38000800)
3739 #define AARCH64_LDST_PREIMM(insn) (((insn) & 0x3b200c00) == 0x38000c00)
3740 #define AARCH64_LDST_RO(insn) (((insn) & 0x3b200c00) == 0x38200800)
3741 #define AARCH64_LDST_UIMM(insn) (((insn) & 0x3b000000) == 0x39000000)
3742 #define AARCH64_LDST_SIMD_M(insn) (((insn) & 0xbfbf0000) == 0x0c000000)
3743 #define AARCH64_LDST_SIMD_M_PI(insn) (((insn) & 0xbfa00000) == 0x0c800000)
3744 #define AARCH64_LDST_SIMD_S(insn) (((insn) & 0xbf9f0000) == 0x0d000000)
3745 #define AARCH64_LDST_SIMD_S_PI(insn) (((insn) & 0xbf800000) == 0x0d800000)
3746
3747 /* Classify an INSN if it is indeed a load/store.
3748
3749 Return TRUE if INSN is a LD/ST instruction otherwise return FALSE.
3750
3751 For scalar LD/ST instructions PAIR is FALSE, RT is returned and RT2
3752 is set equal to RT.
3753
3754 For LD/ST pair instructions PAIR is TRUE, RT and RT2 are returned. */
3755
3756 static bool
3757 aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rt2,
3758 bool *pair, bool *load)
3759 {
3760 uint32_t opcode;
3761 unsigned int r;
3762 uint32_t opc = 0;
3763 uint32_t v = 0;
3764 uint32_t opc_v = 0;
3765
3766 /* Bail out quickly if INSN doesn't fall into the load-store
3767 encoding space. */
3768 if (!AARCH64_LDST (insn))
3769 return false;
3770
3771 *pair = false;
3772 *load = false;
3773 if (AARCH64_LDST_EX (insn))
3774 {
3775 *rt = AARCH64_RT (insn);
3776 *rt2 = *rt;
3777 if (AARCH64_BIT (insn, 21) == 1)
3778 {
3779 *pair = true;
3780 *rt2 = AARCH64_RT2 (insn);
3781 }
3782 *load = AARCH64_LD (insn);
3783 return true;
3784 }
3785 else if (AARCH64_LDST_NAP (insn)
3786 || AARCH64_LDSTP_PI (insn)
3787 || AARCH64_LDSTP_O (insn)
3788 || AARCH64_LDSTP_PRE (insn))
3789 {
3790 *pair = true;
3791 *rt = AARCH64_RT (insn);
3792 *rt2 = AARCH64_RT2 (insn);
3793 *load = AARCH64_LD (insn);
3794 return true;
3795 }
3796 else if (AARCH64_LDST_PCREL (insn)
3797 || AARCH64_LDST_UI (insn)
3798 || AARCH64_LDST_PIIMM (insn)
3799 || AARCH64_LDST_U (insn)
3800 || AARCH64_LDST_PREIMM (insn)
3801 || AARCH64_LDST_RO (insn)
3802 || AARCH64_LDST_UIMM (insn))
3803 {
3804 *rt = AARCH64_RT (insn);
3805 *rt2 = *rt;
3806 if (AARCH64_LDST_PCREL (insn))
3807 *load = true;
3808 opc = AARCH64_BITS (insn, 22, 2);
3809 v = AARCH64_BIT (insn, 26);
3810 opc_v = opc | (v << 2);
3811 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
3812 || opc_v == 5 || opc_v == 7);
3813 return true;
3814 }
3815 else if (AARCH64_LDST_SIMD_M (insn)
3816 || AARCH64_LDST_SIMD_M_PI (insn))
3817 {
3818 *rt = AARCH64_RT (insn);
3819 *load = AARCH64_BIT (insn, 22);
3820 opcode = (insn >> 12) & 0xf;
3821 switch (opcode)
3822 {
3823 case 0:
3824 case 2:
3825 *rt2 = *rt + 3;
3826 break;
3827
3828 case 4:
3829 case 6:
3830 *rt2 = *rt + 2;
3831 break;
3832
3833 case 7:
3834 *rt2 = *rt;
3835 break;
3836
3837 case 8:
3838 case 10:
3839 *rt2 = *rt + 1;
3840 break;
3841
3842 default:
3843 return false;
3844 }
3845 return true;
3846 }
3847 else if (AARCH64_LDST_SIMD_S (insn)
3848 || AARCH64_LDST_SIMD_S_PI (insn))
3849 {
3850 *rt = AARCH64_RT (insn);
3851 r = (insn >> 21) & 1;
3852 *load = AARCH64_BIT (insn, 22);
3853 opcode = (insn >> 13) & 0x7;
3854 switch (opcode)
3855 {
3856 case 0:
3857 case 2:
3858 case 4:
3859 *rt2 = *rt + r;
3860 break;
3861
3862 case 1:
3863 case 3:
3864 case 5:
3865 *rt2 = *rt + (r == 0 ? 2 : 3);
3866 break;
3867
3868 case 6:
3869 *rt2 = *rt + r;
3870 break;
3871
3872 case 7:
3873 *rt2 = *rt + (r == 0 ? 2 : 3);
3874 break;
3875
3876 default:
3877 return false;
3878 }
3879 return true;
3880 }
3881
3882 return false;
3883 }
3884
3885 /* Return TRUE if INSN is multiply-accumulate. */
3886
3887 static bool
3888 aarch64_mlxl_p (uint32_t insn)
3889 {
3890 uint32_t op31 = AARCH64_OP31 (insn);
3891
3892 if (AARCH64_MAC (insn)
3893 && (op31 == 0 || op31 == 1 || op31 == 5)
3894 /* Exclude MUL instructions which are encoded as a multiple accumulate
3895 with RA = XZR. */
3896 && AARCH64_RA (insn) != AARCH64_ZR)
3897 return true;
3898
3899 return false;
3900 }
3901
3902 /* Some early revisions of the Cortex-A53 have an erratum (835769) whereby
3903 it is possible for a 64-bit multiply-accumulate instruction to generate an
3904 incorrect result. The details are quite complex and hard to
3905 determine statically, since branches in the code may exist in some
3906 circumstances, but all cases end with a memory (load, store, or
3907 prefetch) instruction followed immediately by the multiply-accumulate
3908 operation. We employ a linker patching technique, by moving the potentially
3909 affected multiply-accumulate instruction into a patch region and replacing
3910 the original instruction with a branch to the patch. This function checks
3911 if INSN_1 is the memory operation followed by a multiply-accumulate
3912 operation (INSN_2). Return TRUE if an erratum sequence is found, FALSE
3913 if INSN_1 and INSN_2 are safe. */
3914
3915 static bool
3916 aarch64_erratum_sequence (uint32_t insn_1, uint32_t insn_2)
3917 {
3918 uint32_t rt;
3919 uint32_t rt2;
3920 uint32_t rn;
3921 uint32_t rm;
3922 uint32_t ra;
3923 bool pair;
3924 bool load;
3925
3926 if (aarch64_mlxl_p (insn_2)
3927 && aarch64_mem_op_p (insn_1, &rt, &rt2, &pair, &load))
3928 {
3929 /* Any SIMD memory op is independent of the subsequent MLA
3930 by definition of the erratum. */
3931 if (AARCH64_BIT (insn_1, 26))
3932 return true;
3933
3934 /* If not SIMD, check for integer memory ops and MLA relationship. */
3935 rn = AARCH64_RN (insn_2);
3936 ra = AARCH64_RA (insn_2);
3937 rm = AARCH64_RM (insn_2);
3938
3939 /* If this is a load and there's a true(RAW) dependency, we are safe
3940 and this is not an erratum sequence. */
3941 if (load &&
3942 (rt == rn || rt == rm || rt == ra
3943 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
3944 return false;
3945
3946 /* We conservatively put out stubs for all other cases (including
3947 writebacks). */
3948 return true;
3949 }
3950
3951 return false;
3952 }
3953
3954 /* Used to order a list of mapping symbols by address. */
3955
3956 static int
3957 elf_aarch64_compare_mapping (const void *a, const void *b)
3958 {
3959 const elf_aarch64_section_map *amap = (const elf_aarch64_section_map *) a;
3960 const elf_aarch64_section_map *bmap = (const elf_aarch64_section_map *) b;
3961
3962 if (amap->vma > bmap->vma)
3963 return 1;
3964 else if (amap->vma < bmap->vma)
3965 return -1;
3966 else if (amap->type > bmap->type)
3967 /* Ensure results do not depend on the host qsort for objects with
3968 multiple mapping symbols at the same address by sorting on type
3969 after vma. */
3970 return 1;
3971 else if (amap->type < bmap->type)
3972 return -1;
3973 else
3974 return 0;
3975 }
3976
3977
3978 static char *
3979 _bfd_aarch64_erratum_835769_stub_name (unsigned num_fixes)
3980 {
3981 char *stub_name = (char *) bfd_malloc
3982 (strlen ("__erratum_835769_veneer_") + 16);
3983 if (stub_name != NULL)
3984 sprintf (stub_name,"__erratum_835769_veneer_%d", num_fixes);
3985 return stub_name;
3986 }
3987
3988 /* Scan for Cortex-A53 erratum 835769 sequence.
3989
3990 Return TRUE else FALSE on abnormal termination. */
3991
3992 static bool
3993 _bfd_aarch64_erratum_835769_scan (bfd *input_bfd,
3994 struct bfd_link_info *info,
3995 unsigned int *num_fixes_p)
3996 {
3997 asection *section;
3998 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3999 unsigned int num_fixes = *num_fixes_p;
4000
4001 if (htab == NULL)
4002 return true;
4003
4004 for (section = input_bfd->sections;
4005 section != NULL;
4006 section = section->next)
4007 {
4008 bfd_byte *contents = NULL;
4009 struct _aarch64_elf_section_data *sec_data;
4010 unsigned int span;
4011
4012 if (elf_section_type (section) != SHT_PROGBITS
4013 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
4014 || (section->flags & SEC_EXCLUDE) != 0
4015 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
4016 || (section->output_section == bfd_abs_section_ptr))
4017 continue;
4018
4019 if (elf_section_data (section)->this_hdr.contents != NULL)
4020 contents = elf_section_data (section)->this_hdr.contents;
4021 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
4022 return false;
4023
4024 sec_data = elf_aarch64_section_data (section);
4025
4026 if (sec_data->mapcount)
4027 qsort (sec_data->map, sec_data->mapcount,
4028 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
4029
4030 for (span = 0; span < sec_data->mapcount; span++)
4031 {
4032 unsigned int span_start = sec_data->map[span].vma;
4033 unsigned int span_end = ((span == sec_data->mapcount - 1)
4034 ? sec_data->map[0].vma + section->size
4035 : sec_data->map[span + 1].vma);
4036 unsigned int i;
4037 char span_type = sec_data->map[span].type;
4038
4039 if (span_type == 'd')
4040 continue;
4041
4042 for (i = span_start; i + 4 < span_end; i += 4)
4043 {
4044 uint32_t insn_1 = bfd_getl32 (contents + i);
4045 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
4046
4047 if (aarch64_erratum_sequence (insn_1, insn_2))
4048 {
4049 struct elf_aarch64_stub_hash_entry *stub_entry;
4050 char *stub_name = _bfd_aarch64_erratum_835769_stub_name (num_fixes);
4051 if (! stub_name)
4052 return false;
4053
4054 stub_entry = _bfd_aarch64_add_stub_entry_in_group (stub_name,
4055 section,
4056 htab);
4057 if (! stub_entry)
4058 return false;
4059
4060 stub_entry->stub_type = aarch64_stub_erratum_835769_veneer;
4061 stub_entry->target_section = section;
4062 stub_entry->target_value = i + 4;
4063 stub_entry->veneered_insn = insn_2;
4064 stub_entry->output_name = stub_name;
4065 num_fixes++;
4066 }
4067 }
4068 }
4069 if (elf_section_data (section)->this_hdr.contents == NULL)
4070 free (contents);
4071 }
4072
4073 *num_fixes_p = num_fixes;
4074
4075 return true;
4076 }
4077
4078
4079 /* Test if instruction INSN is ADRP. */
4080
4081 static bool
4082 _bfd_aarch64_adrp_p (uint32_t insn)
4083 {
4084 return ((insn & AARCH64_ADRP_OP_MASK) == AARCH64_ADRP_OP);
4085 }
4086
4087
4088 /* Helper predicate to look for cortex-a53 erratum 843419 sequence 1. */
4089
4090 static bool
4091 _bfd_aarch64_erratum_843419_sequence_p (uint32_t insn_1, uint32_t insn_2,
4092 uint32_t insn_3)
4093 {
4094 uint32_t rt;
4095 uint32_t rt2;
4096 bool pair;
4097 bool load;
4098
4099 return (aarch64_mem_op_p (insn_2, &rt, &rt2, &pair, &load)
4100 && (!pair
4101 || (pair && !load))
4102 && AARCH64_LDST_UIMM (insn_3)
4103 && AARCH64_RN (insn_3) == AARCH64_RD (insn_1));
4104 }
4105
4106
4107 /* Test for the presence of Cortex-A53 erratum 843419 instruction sequence.
4108
4109 Return TRUE if section CONTENTS at offset I contains one of the
4110 erratum 843419 sequences, otherwise return FALSE. If a sequence is
4111 seen set P_VENEER_I to the offset of the final LOAD/STORE
4112 instruction in the sequence.
4113 */
4114
4115 static bool
4116 _bfd_aarch64_erratum_843419_p (bfd_byte *contents, bfd_vma vma,
4117 bfd_vma i, bfd_vma span_end,
4118 bfd_vma *p_veneer_i)
4119 {
4120 uint32_t insn_1 = bfd_getl32 (contents + i);
4121
4122 if (!_bfd_aarch64_adrp_p (insn_1))
4123 return false;
4124
4125 if (span_end < i + 12)
4126 return false;
4127
4128 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
4129 uint32_t insn_3 = bfd_getl32 (contents + i + 8);
4130
4131 if ((vma & 0xfff) != 0xff8 && (vma & 0xfff) != 0xffc)
4132 return false;
4133
4134 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_3))
4135 {
4136 *p_veneer_i = i + 8;
4137 return true;
4138 }
4139
4140 if (span_end < i + 16)
4141 return false;
4142
4143 uint32_t insn_4 = bfd_getl32 (contents + i + 12);
4144
4145 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_4))
4146 {
4147 *p_veneer_i = i + 12;
4148 return true;
4149 }
4150
4151 return false;
4152 }
4153
4154
4155 /* Resize all stub sections. */
4156
4157 static void
4158 _bfd_aarch64_resize_stubs (struct elf_aarch64_link_hash_table *htab)
4159 {
4160 asection *section;
4161
4162 /* OK, we've added some stubs. Find out the new size of the
4163 stub sections. */
4164 for (section = htab->stub_bfd->sections;
4165 section != NULL; section = section->next)
4166 {
4167 /* Ignore non-stub sections. */
4168 if (!strstr (section->name, STUB_SUFFIX))
4169 continue;
4170
4171 /* Add space for a branch. Add 8 bytes to keep section 8 byte aligned,
4172 as long branch stubs contain a 64-bit address. */
4173 section->size = 8;
4174 }
4175
4176 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
4177
4178 for (section = htab->stub_bfd->sections;
4179 section != NULL; section = section->next)
4180 {
4181 if (!strstr (section->name, STUB_SUFFIX))
4182 continue;
4183
4184 /* Empty stub section. */
4185 if (section->size == 8)
4186 section->size = 0;
4187
4188 /* Ensure all stub sections have a size which is a multiple of
4189 4096. This is important in order to ensure that the insertion
4190 of stub sections does not in itself move existing code around
4191 in such a way that new errata sequences are created. We only do this
4192 when the ADRP workaround is enabled. If only the ADR workaround is
4193 enabled then the stubs workaround won't ever be used. */
4194 if (htab->fix_erratum_843419 & ERRAT_ADRP)
4195 if (section->size)
4196 section->size = BFD_ALIGN (section->size, 0x1000);
4197 }
4198 }
4199
4200 /* Construct an erratum 843419 workaround stub name. */
4201
4202 static char *
4203 _bfd_aarch64_erratum_843419_stub_name (asection *input_section,
4204 bfd_vma offset)
4205 {
4206 const bfd_size_type len = 8 + 4 + 1 + 8 + 1 + 16 + 1;
4207 char *stub_name = bfd_malloc (len);
4208
4209 if (stub_name != NULL)
4210 snprintf (stub_name, len, "e843419@%04x_%08x_%" PRIx64,
4211 input_section->owner->id,
4212 input_section->id,
4213 (uint64_t) offset);
4214 return stub_name;
4215 }
4216
4217 /* Build a stub_entry structure describing an 843419 fixup.
4218
4219 The stub_entry constructed is populated with the bit pattern INSN
4220 of the instruction located at OFFSET within input SECTION.
4221
4222 Returns TRUE on success. */
4223
4224 static bool
4225 _bfd_aarch64_erratum_843419_fixup (uint32_t insn,
4226 bfd_vma adrp_offset,
4227 bfd_vma ldst_offset,
4228 asection *section,
4229 struct bfd_link_info *info)
4230 {
4231 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
4232 char *stub_name;
4233 struct elf_aarch64_stub_hash_entry *stub_entry;
4234
4235 stub_name = _bfd_aarch64_erratum_843419_stub_name (section, ldst_offset);
4236 if (stub_name == NULL)
4237 return false;
4238 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4239 false, false);
4240 if (stub_entry)
4241 {
4242 free (stub_name);
4243 return true;
4244 }
4245
4246 /* We always place an 843419 workaround veneer in the stub section
4247 attached to the input section in which an erratum sequence has
4248 been found. This ensures that later in the link process (in
4249 elfNN_aarch64_write_section) when we copy the veneered
4250 instruction from the input section into the stub section the
4251 copied instruction will have had any relocations applied to it.
4252 If we placed workaround veneers in any other stub section then we
4253 could not assume that all relocations have been processed on the
4254 corresponding input section at the point we output the stub
4255 section. */
4256
4257 stub_entry = _bfd_aarch64_add_stub_entry_after (stub_name, section, htab);
4258 if (stub_entry == NULL)
4259 {
4260 free (stub_name);
4261 return false;
4262 }
4263
4264 stub_entry->adrp_offset = adrp_offset;
4265 stub_entry->target_value = ldst_offset;
4266 stub_entry->target_section = section;
4267 stub_entry->stub_type = aarch64_stub_erratum_843419_veneer;
4268 stub_entry->veneered_insn = insn;
4269 stub_entry->output_name = stub_name;
4270
4271 return true;
4272 }
4273
4274
4275 /* Scan an input section looking for the signature of erratum 843419.
4276
4277 Scans input SECTION in INPUT_BFD looking for erratum 843419
4278 signatures, for each signature found a stub_entry is created
4279 describing the location of the erratum for subsequent fixup.
4280
4281 Return TRUE on successful scan, FALSE on failure to scan.
4282 */
4283
4284 static bool
4285 _bfd_aarch64_erratum_843419_scan (bfd *input_bfd, asection *section,
4286 struct bfd_link_info *info)
4287 {
4288 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
4289
4290 if (htab == NULL)
4291 return true;
4292
4293 if (elf_section_type (section) != SHT_PROGBITS
4294 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
4295 || (section->flags & SEC_EXCLUDE) != 0
4296 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
4297 || (section->output_section == bfd_abs_section_ptr))
4298 return true;
4299
4300 do
4301 {
4302 bfd_byte *contents = NULL;
4303 struct _aarch64_elf_section_data *sec_data;
4304 unsigned int span;
4305
4306 if (elf_section_data (section)->this_hdr.contents != NULL)
4307 contents = elf_section_data (section)->this_hdr.contents;
4308 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
4309 return false;
4310
4311 sec_data = elf_aarch64_section_data (section);
4312
4313 if (sec_data->mapcount)
4314 qsort (sec_data->map, sec_data->mapcount,
4315 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
4316
4317 for (span = 0; span < sec_data->mapcount; span++)
4318 {
4319 unsigned int span_start = sec_data->map[span].vma;
4320 unsigned int span_end = ((span == sec_data->mapcount - 1)
4321 ? sec_data->map[0].vma + section->size
4322 : sec_data->map[span + 1].vma);
4323 unsigned int i;
4324 char span_type = sec_data->map[span].type;
4325
4326 if (span_type == 'd')
4327 continue;
4328
4329 for (i = span_start; i + 8 < span_end; i += 4)
4330 {
4331 bfd_vma vma = (section->output_section->vma
4332 + section->output_offset
4333 + i);
4334 bfd_vma veneer_i;
4335
4336 if (_bfd_aarch64_erratum_843419_p
4337 (contents, vma, i, span_end, &veneer_i))
4338 {
4339 uint32_t insn = bfd_getl32 (contents + veneer_i);
4340
4341 if (!_bfd_aarch64_erratum_843419_fixup (insn, i, veneer_i,
4342 section, info))
4343 return false;
4344 }
4345 }
4346 }
4347
4348 if (elf_section_data (section)->this_hdr.contents == NULL)
4349 free (contents);
4350 }
4351 while (0);
4352
4353 return true;
4354 }
4355
4356
4357 /* Add stub entries for calls.
4358
4359 The basic idea here is to examine all the relocations looking for
4360 PC-relative calls to a target that is unreachable with a "bl"
4361 instruction. */
4362
4363 static bool
4364 _bfd_aarch64_add_call_stub_entries (bool *stub_changed, bfd *output_bfd,
4365 struct bfd_link_info *info)
4366 {
4367 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
4368 bool need_bti = elf_aarch64_bti_p (output_bfd);
4369 bfd *input_bfd;
4370
4371 for (input_bfd = info->input_bfds; input_bfd != NULL;
4372 input_bfd = input_bfd->link.next)
4373 {
4374 Elf_Internal_Shdr *symtab_hdr;
4375 asection *section;
4376 Elf_Internal_Sym *local_syms = NULL;
4377
4378 if (!is_aarch64_elf (input_bfd)
4379 || (input_bfd->flags & BFD_LINKER_CREATED) != 0)
4380 continue;
4381
4382 /* We'll need the symbol table in a second. */
4383 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4384 if (symtab_hdr->sh_info == 0)
4385 continue;
4386
4387 /* Walk over each section attached to the input bfd. */
4388 for (section = input_bfd->sections;
4389 section != NULL; section = section->next)
4390 {
4391 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
4392
4393 /* If there aren't any relocs, then there's nothing more to do. */
4394 if ((section->flags & SEC_RELOC) == 0
4395 || section->reloc_count == 0
4396 || (section->flags & SEC_CODE) == 0)
4397 continue;
4398
4399 /* If this section is a link-once section that will be
4400 discarded, then don't create any stubs. */
4401 if (section->output_section == NULL
4402 || section->output_section->owner != output_bfd)
4403 continue;
4404
4405 /* Get the relocs. */
4406 internal_relocs
4407 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
4408 NULL, info->keep_memory);
4409 if (internal_relocs == NULL)
4410 goto error_ret_free_local;
4411
4412 /* Now examine each relocation. */
4413 irela = internal_relocs;
4414 irelaend = irela + section->reloc_count;
4415 for (; irela < irelaend; irela++)
4416 {
4417 unsigned int r_type, r_indx;
4418 enum elf_aarch64_stub_type stub_type;
4419 struct elf_aarch64_stub_hash_entry *stub_entry;
4420 struct elf_aarch64_stub_hash_entry *stub_entry_bti;
4421 asection *sym_sec;
4422 bfd_vma sym_value;
4423 bfd_vma destination;
4424 struct elf_aarch64_link_hash_entry *hash;
4425 const char *sym_name;
4426 char *stub_name;
4427 char *stub_name_bti;
4428 const asection *id_sec;
4429 const asection *id_sec_bti;
4430 unsigned char st_type;
4431 bfd_size_type len;
4432
4433 r_type = ELFNN_R_TYPE (irela->r_info);
4434 r_indx = ELFNN_R_SYM (irela->r_info);
4435
4436 if (r_type >= (unsigned int) R_AARCH64_end)
4437 {
4438 bfd_set_error (bfd_error_bad_value);
4439 error_ret_free_internal:
4440 if (elf_section_data (section)->relocs == NULL)
4441 free (internal_relocs);
4442 goto error_ret_free_local;
4443 }
4444
4445 /* Only look for stubs on unconditional branch and
4446 branch and link instructions. */
4447 if (r_type != (unsigned int) AARCH64_R (CALL26)
4448 && r_type != (unsigned int) AARCH64_R (JUMP26))
4449 continue;
4450
4451 /* Now determine the call target, its name, value,
4452 section. */
4453 sym_sec = NULL;
4454 sym_value = 0;
4455 destination = 0;
4456 hash = NULL;
4457 sym_name = NULL;
4458 if (r_indx < symtab_hdr->sh_info)
4459 {
4460 /* It's a local symbol. */
4461 Elf_Internal_Sym *sym;
4462 Elf_Internal_Shdr *hdr;
4463
4464 if (local_syms == NULL)
4465 {
4466 local_syms
4467 = (Elf_Internal_Sym *) symtab_hdr->contents;
4468 if (local_syms == NULL)
4469 local_syms
4470 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
4471 symtab_hdr->sh_info, 0,
4472 NULL, NULL, NULL);
4473 if (local_syms == NULL)
4474 goto error_ret_free_internal;
4475 }
4476
4477 sym = local_syms + r_indx;
4478 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
4479 sym_sec = hdr->bfd_section;
4480 if (!sym_sec)
4481 /* This is an undefined symbol. It can never
4482 be resolved. */
4483 continue;
4484
4485 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
4486 sym_value = sym->st_value;
4487 destination = (sym_value + irela->r_addend
4488 + sym_sec->output_offset
4489 + sym_sec->output_section->vma);
4490 st_type = ELF_ST_TYPE (sym->st_info);
4491 sym_name
4492 = bfd_elf_string_from_elf_section (input_bfd,
4493 symtab_hdr->sh_link,
4494 sym->st_name);
4495 }
4496 else
4497 {
4498 int e_indx;
4499
4500 e_indx = r_indx - symtab_hdr->sh_info;
4501 hash = ((struct elf_aarch64_link_hash_entry *)
4502 elf_sym_hashes (input_bfd)[e_indx]);
4503
4504 while (hash->root.root.type == bfd_link_hash_indirect
4505 || hash->root.root.type == bfd_link_hash_warning)
4506 hash = ((struct elf_aarch64_link_hash_entry *)
4507 hash->root.root.u.i.link);
4508
4509 if (hash->root.root.type == bfd_link_hash_defined
4510 || hash->root.root.type == bfd_link_hash_defweak)
4511 {
4512 struct elf_aarch64_link_hash_table *globals =
4513 elf_aarch64_hash_table (info);
4514 sym_sec = hash->root.root.u.def.section;
4515 sym_value = hash->root.root.u.def.value;
4516 /* For a destination in a shared library,
4517 use the PLT stub as target address to
4518 decide whether a branch stub is
4519 needed. */
4520 if (globals->root.splt != NULL && hash != NULL
4521 && hash->root.plt.offset != (bfd_vma) - 1)
4522 {
4523 sym_sec = globals->root.splt;
4524 sym_value = hash->root.plt.offset;
4525 if (sym_sec->output_section != NULL)
4526 destination = (sym_value
4527 + sym_sec->output_offset
4528 + sym_sec->output_section->vma);
4529 }
4530 else if (sym_sec->output_section != NULL)
4531 destination = (sym_value + irela->r_addend
4532 + sym_sec->output_offset
4533 + sym_sec->output_section->vma);
4534 }
4535 else if (hash->root.root.type == bfd_link_hash_undefined
4536 || (hash->root.root.type
4537 == bfd_link_hash_undefweak))
4538 {
4539 /* For a shared library, use the PLT stub as
4540 target address to decide whether a long
4541 branch stub is needed.
4542 For absolute code, they cannot be handled. */
4543 struct elf_aarch64_link_hash_table *globals =
4544 elf_aarch64_hash_table (info);
4545
4546 if (globals->root.splt != NULL && hash != NULL
4547 && hash->root.plt.offset != (bfd_vma) - 1)
4548 {
4549 sym_sec = globals->root.splt;
4550 sym_value = hash->root.plt.offset;
4551 if (sym_sec->output_section != NULL)
4552 destination = (sym_value
4553 + sym_sec->output_offset
4554 + sym_sec->output_section->vma);
4555 }
4556 else
4557 continue;
4558 }
4559 else
4560 {
4561 bfd_set_error (bfd_error_bad_value);
4562 goto error_ret_free_internal;
4563 }
4564 st_type = ELF_ST_TYPE (hash->root.type);
4565 sym_name = hash->root.root.root.string;
4566 }
4567
4568 /* Determine what (if any) linker stub is needed. */
4569 stub_type = aarch64_type_of_stub (section, irela, sym_sec,
4570 st_type, destination);
4571 if (stub_type == aarch64_stub_none)
4572 continue;
4573
4574 /* Support for grouping stub sections. */
4575 id_sec = htab->stub_group[section->id].link_sec;
4576
4577 /* Get the name of this stub. */
4578 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, hash,
4579 irela);
4580 if (!stub_name)
4581 goto error_ret_free_internal;
4582
4583 stub_entry =
4584 aarch64_stub_hash_lookup (&htab->stub_hash_table,
4585 stub_name, false, false);
4586 if (stub_entry != NULL)
4587 {
4588 /* The proper stub has already been created. */
4589 free (stub_name);
4590
4591 /* Always update this stub's target since it may have
4592 changed after layout. */
4593 stub_entry->target_value = sym_value + irela->r_addend;
4594
4595 if (stub_entry->double_stub)
4596 {
4597 /* Update the target of both stubs. */
4598
4599 id_sec_bti = htab->stub_group[sym_sec->id].link_sec;
4600 stub_name_bti =
4601 elfNN_aarch64_stub_name (id_sec_bti, sym_sec, hash,
4602 irela);
4603 if (!stub_name_bti)
4604 goto error_ret_free_internal;
4605 stub_entry_bti =
4606 aarch64_stub_hash_lookup (&htab->stub_hash_table,
4607 stub_name_bti, false, false);
4608 BFD_ASSERT (stub_entry_bti != NULL);
4609 free (stub_name_bti);
4610 stub_entry_bti->target_value = stub_entry->target_value;
4611 stub_entry->target_value = stub_entry_bti->stub_offset;
4612 }
4613 continue;
4614 }
4615
4616 stub_entry = _bfd_aarch64_add_stub_entry_in_group
4617 (stub_name, section, htab);
4618 if (stub_entry == NULL)
4619 {
4620 free (stub_name);
4621 goto error_ret_free_internal;
4622 }
4623
4624 stub_entry->target_value = sym_value + irela->r_addend;
4625 stub_entry->target_section = sym_sec;
4626 stub_entry->stub_type = stub_type;
4627 stub_entry->h = hash;
4628 stub_entry->st_type = st_type;
4629
4630 if (sym_name == NULL)
4631 sym_name = "unnamed";
4632 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
4633 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
4634 if (stub_entry->output_name == NULL)
4635 {
4636 free (stub_name);
4637 goto error_ret_free_internal;
4638 }
4639
4640 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
4641 sym_name);
4642
4643 /* A stub with indirect jump may break BTI compatibility, so
4644 insert another stub with direct jump near the target then. */
4645 if (need_bti && !aarch64_bti_stub_p (input_bfd, stub_entry))
4646 {
4647 stub_entry->double_stub = true;
4648 htab->has_double_stub = true;
4649 id_sec_bti = htab->stub_group[sym_sec->id].link_sec;
4650 stub_name_bti =
4651 elfNN_aarch64_stub_name (id_sec_bti, sym_sec, hash, irela);
4652 if (!stub_name_bti)
4653 {
4654 free (stub_name);
4655 goto error_ret_free_internal;
4656 }
4657
4658 stub_entry_bti =
4659 aarch64_stub_hash_lookup (&htab->stub_hash_table,
4660 stub_name_bti, false, false);
4661 if (stub_entry_bti == NULL)
4662 stub_entry_bti =
4663 _bfd_aarch64_add_stub_entry_in_group (stub_name_bti,
4664 sym_sec, htab);
4665 if (stub_entry_bti == NULL)
4666 {
4667 free (stub_name);
4668 free (stub_name_bti);
4669 goto error_ret_free_internal;
4670 }
4671
4672 stub_entry_bti->target_value = sym_value + irela->r_addend;
4673 stub_entry_bti->target_section = sym_sec;
4674 stub_entry_bti->stub_type = aarch64_stub_bti_direct_branch;
4675 stub_entry_bti->h = hash;
4676 stub_entry_bti->st_type = st_type;
4677
4678 len = sizeof (BTI_STUB_ENTRY_NAME) + strlen (sym_name);
4679 stub_entry_bti->output_name = bfd_alloc (htab->stub_bfd, len);
4680 if (stub_entry_bti->output_name == NULL)
4681 {
4682 free (stub_name);
4683 free (stub_name_bti);
4684 goto error_ret_free_internal;
4685 }
4686 snprintf (stub_entry_bti->output_name, len,
4687 BTI_STUB_ENTRY_NAME, sym_name);
4688
4689 /* Update the indirect call stub to target the BTI stub. */
4690 stub_entry->target_value = 0;
4691 stub_entry->target_section = stub_entry_bti->stub_sec;
4692 stub_entry->stub_type = stub_type;
4693 stub_entry->h = NULL;
4694 stub_entry->st_type = STT_FUNC;
4695 }
4696
4697 *stub_changed = true;
4698 }
4699
4700 /* We're done with the internal relocs, free them. */
4701 if (elf_section_data (section)->relocs == NULL)
4702 free (internal_relocs);
4703 }
4704 }
4705 return true;
4706 error_ret_free_local:
4707 return false;
4708 }
4709
4710
4711 /* Determine and set the size of the stub section for a final link. */
4712
4713 bool
4714 elfNN_aarch64_size_stubs (bfd *output_bfd,
4715 bfd *stub_bfd,
4716 struct bfd_link_info *info,
4717 bfd_signed_vma group_size,
4718 asection * (*add_stub_section) (const char *,
4719 asection *),
4720 void (*layout_sections_again) (void))
4721 {
4722 bfd_size_type stub_group_size;
4723 bool stubs_always_before_branch;
4724 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
4725 unsigned int num_erratum_835769_fixes = 0;
4726
4727 /* Propagate mach to stub bfd, because it may not have been
4728 finalized when we created stub_bfd. */
4729 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4730 bfd_get_mach (output_bfd));
4731
4732 /* Stash our params away. */
4733 htab->stub_bfd = stub_bfd;
4734 htab->add_stub_section = add_stub_section;
4735 htab->layout_sections_again = layout_sections_again;
4736 stubs_always_before_branch = group_size < 0;
4737 if (group_size < 0)
4738 stub_group_size = -group_size;
4739 else
4740 stub_group_size = group_size;
4741
4742 if (stub_group_size == 1)
4743 {
4744 /* Default values. */
4745 /* AArch64 branch range is +-128MB. The value used is 1MB less. */
4746 stub_group_size = 127 * 1024 * 1024;
4747 }
4748
4749 group_sections (htab, stub_group_size, stubs_always_before_branch);
4750
4751 (*htab->layout_sections_again) ();
4752
4753 if (htab->fix_erratum_835769)
4754 {
4755 bfd *input_bfd;
4756
4757 for (input_bfd = info->input_bfds;
4758 input_bfd != NULL; input_bfd = input_bfd->link.next)
4759 {
4760 if (!is_aarch64_elf (input_bfd)
4761 || (input_bfd->flags & BFD_LINKER_CREATED) != 0)
4762 continue;
4763
4764 if (!_bfd_aarch64_erratum_835769_scan (input_bfd, info,
4765 &num_erratum_835769_fixes))
4766 return false;
4767 }
4768
4769 _bfd_aarch64_resize_stubs (htab);
4770 (*htab->layout_sections_again) ();
4771 }
4772
4773 if (htab->fix_erratum_843419 != ERRAT_NONE)
4774 {
4775 bfd *input_bfd;
4776
4777 for (input_bfd = info->input_bfds;
4778 input_bfd != NULL;
4779 input_bfd = input_bfd->link.next)
4780 {
4781 asection *section;
4782
4783 if (!is_aarch64_elf (input_bfd)
4784 || (input_bfd->flags & BFD_LINKER_CREATED) != 0)
4785 continue;
4786
4787 for (section = input_bfd->sections;
4788 section != NULL;
4789 section = section->next)
4790 if (!_bfd_aarch64_erratum_843419_scan (input_bfd, section, info))
4791 return false;
4792 }
4793
4794 _bfd_aarch64_resize_stubs (htab);
4795 (*htab->layout_sections_again) ();
4796 }
4797
4798 for (;;)
4799 {
4800 bool stub_changed = false;
4801
4802 if (!_bfd_aarch64_add_call_stub_entries (&stub_changed, output_bfd, info))
4803 return false;
4804
4805 if (!stub_changed)
4806 return true;
4807
4808 _bfd_aarch64_resize_stubs (htab);
4809 (*htab->layout_sections_again) ();
4810 }
4811 }
4812
4813 /* Build all the stubs associated with the current output file. The
4814 stubs are kept in a hash table attached to the main linker hash
4815 table. We also set up the .plt entries for statically linked PIC
4816 functions here. This function is called via aarch64_elf_finish in the
4817 linker. */
4818
4819 bool
4820 elfNN_aarch64_build_stubs (struct bfd_link_info *info)
4821 {
4822 asection *stub_sec;
4823 struct bfd_hash_table *table;
4824 struct elf_aarch64_link_hash_table *htab;
4825
4826 htab = elf_aarch64_hash_table (info);
4827
4828 for (stub_sec = htab->stub_bfd->sections;
4829 stub_sec != NULL; stub_sec = stub_sec->next)
4830 {
4831 bfd_size_type size;
4832
4833 /* Ignore non-stub sections. */
4834 if (!strstr (stub_sec->name, STUB_SUFFIX))
4835 continue;
4836
4837 /* Allocate memory to hold the linker stubs. */
4838 size = stub_sec->size;
4839 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
4840 if (stub_sec->contents == NULL && size != 0)
4841 return false;
4842 stub_sec->size = 0;
4843
4844 /* Add a branch around the stub section, and a nop, to keep it 8 byte
4845 aligned, as long branch stubs contain a 64-bit address. */
4846 bfd_putl32 (0x14000000 | (size >> 2), stub_sec->contents);
4847 bfd_putl32 (INSN_NOP, stub_sec->contents + 4);
4848 stub_sec->size += 8;
4849 }
4850
4851 /* Build the stubs as directed by the stub hash table. */
4852 table = &htab->stub_hash_table;
4853 bfd_hash_traverse (table, aarch64_build_one_stub, info);
4854
4855 return true;
4856 }
4857
4858
4859 /* Add an entry to the code/data map for section SEC. */
4860
4861 static void
4862 elfNN_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
4863 {
4864 struct _aarch64_elf_section_data *sec_data =
4865 elf_aarch64_section_data (sec);
4866 unsigned int newidx;
4867
4868 if (sec_data->map == NULL)
4869 {
4870 sec_data->map = bfd_malloc (sizeof (elf_aarch64_section_map));
4871 sec_data->mapcount = 0;
4872 sec_data->mapsize = 1;
4873 }
4874
4875 newidx = sec_data->mapcount++;
4876
4877 if (sec_data->mapcount > sec_data->mapsize)
4878 {
4879 sec_data->mapsize *= 2;
4880 sec_data->map = bfd_realloc_or_free
4881 (sec_data->map, sec_data->mapsize * sizeof (elf_aarch64_section_map));
4882 }
4883
4884 if (sec_data->map)
4885 {
4886 sec_data->map[newidx].vma = vma;
4887 sec_data->map[newidx].type = type;
4888 }
4889 }
4890
4891
4892 /* Initialise maps of insn/data for input BFDs. */
4893 void
4894 bfd_elfNN_aarch64_init_maps (bfd *abfd)
4895 {
4896 Elf_Internal_Sym *isymbuf;
4897 Elf_Internal_Shdr *hdr;
4898 unsigned int i, localsyms;
4899
4900 /* Make sure that we are dealing with an AArch64 elf binary. */
4901 if (!is_aarch64_elf (abfd))
4902 return;
4903
4904 if ((abfd->flags & DYNAMIC) != 0)
4905 return;
4906
4907 hdr = &elf_symtab_hdr (abfd);
4908 localsyms = hdr->sh_info;
4909
4910 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
4911 should contain the number of local symbols, which should come before any
4912 global symbols. Mapping symbols are always local. */
4913 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
4914
4915 /* No internal symbols read? Skip this BFD. */
4916 if (isymbuf == NULL)
4917 return;
4918
4919 for (i = 0; i < localsyms; i++)
4920 {
4921 Elf_Internal_Sym *isym = &isymbuf[i];
4922 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
4923 const char *name;
4924
4925 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
4926 {
4927 name = bfd_elf_string_from_elf_section (abfd,
4928 hdr->sh_link,
4929 isym->st_name);
4930
4931 if (bfd_is_aarch64_special_symbol_name
4932 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
4933 elfNN_aarch64_section_map_add (sec, name[1], isym->st_value);
4934 }
4935 }
4936 }
4937
4938 static void
4939 setup_plt_values (struct bfd_link_info *link_info,
4940 aarch64_plt_type plt_type)
4941 {
4942 struct elf_aarch64_link_hash_table *globals;
4943 globals = elf_aarch64_hash_table (link_info);
4944
4945 if (plt_type == PLT_BTI_PAC)
4946 {
4947 globals->plt0_entry = elfNN_aarch64_small_plt0_bti_entry;
4948
4949 /* Only in ET_EXEC we need PLTn with BTI. */
4950 if (bfd_link_pde (link_info))
4951 {
4952 globals->plt_entry_size = PLT_BTI_PAC_SMALL_ENTRY_SIZE;
4953 globals->plt_entry = elfNN_aarch64_small_plt_bti_pac_entry;
4954 }
4955 else
4956 {
4957 globals->plt_entry_size = PLT_PAC_SMALL_ENTRY_SIZE;
4958 globals->plt_entry = elfNN_aarch64_small_plt_pac_entry;
4959 }
4960 }
4961 else if (plt_type == PLT_BTI)
4962 {
4963 globals->plt0_entry = elfNN_aarch64_small_plt0_bti_entry;
4964
4965 /* Only in ET_EXEC we need PLTn with BTI. */
4966 if (bfd_link_pde (link_info))
4967 {
4968 globals->plt_entry_size = PLT_BTI_SMALL_ENTRY_SIZE;
4969 globals->plt_entry = elfNN_aarch64_small_plt_bti_entry;
4970 }
4971 }
4972 else if (plt_type == PLT_PAC)
4973 {
4974 globals->plt_entry_size = PLT_PAC_SMALL_ENTRY_SIZE;
4975 globals->plt_entry = elfNN_aarch64_small_plt_pac_entry;
4976 }
4977 }
4978
4979 /* Set option values needed during linking. */
4980 void
4981 bfd_elfNN_aarch64_set_options (struct bfd *output_bfd,
4982 struct bfd_link_info *link_info,
4983 int no_enum_warn,
4984 int no_wchar_warn, int pic_veneer,
4985 int fix_erratum_835769,
4986 erratum_84319_opts fix_erratum_843419,
4987 int no_apply_dynamic_relocs,
4988 aarch64_bti_pac_info bp_info)
4989 {
4990 struct elf_aarch64_link_hash_table *globals;
4991
4992 globals = elf_aarch64_hash_table (link_info);
4993 globals->pic_veneer = pic_veneer;
4994 globals->fix_erratum_835769 = fix_erratum_835769;
4995 /* If the default options are used, then ERRAT_ADR will be set by default
4996 which will enable the ADRP->ADR workaround for the erratum 843419
4997 workaround. */
4998 globals->fix_erratum_843419 = fix_erratum_843419;
4999 globals->no_apply_dynamic_relocs = no_apply_dynamic_relocs;
5000
5001 BFD_ASSERT (is_aarch64_elf (output_bfd));
5002 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
5003 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
5004
5005 switch (bp_info.bti_type)
5006 {
5007 case BTI_WARN:
5008 elf_aarch64_tdata (output_bfd)->no_bti_warn = 0;
5009 elf_aarch64_tdata (output_bfd)->gnu_and_prop
5010 |= GNU_PROPERTY_AARCH64_FEATURE_1_BTI;
5011 break;
5012
5013 default:
5014 break;
5015 }
5016 elf_aarch64_tdata (output_bfd)->plt_type = bp_info.plt_type;
5017 setup_plt_values (link_info, bp_info.plt_type);
5018 }
5019
5020 static bfd_vma
5021 aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
5022 struct elf_aarch64_link_hash_table
5023 *globals, struct bfd_link_info *info,
5024 bfd_vma value, bfd *output_bfd,
5025 bool *unresolved_reloc_p)
5026 {
5027 bfd_vma off = (bfd_vma) - 1;
5028 asection *basegot = globals->root.sgot;
5029 bool dyn = globals->root.dynamic_sections_created;
5030
5031 if (h != NULL)
5032 {
5033 BFD_ASSERT (basegot != NULL);
5034 off = h->got.offset;
5035 BFD_ASSERT (off != (bfd_vma) - 1);
5036 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h)
5037 || (bfd_link_pic (info)
5038 && SYMBOL_REFERENCES_LOCAL (info, h))
5039 || (ELF_ST_VISIBILITY (h->other)
5040 && h->root.type == bfd_link_hash_undefweak))
5041 {
5042 /* This is actually a static link, or it is a -Bsymbolic link
5043 and the symbol is defined locally. We must initialize this
5044 entry in the global offset table. Since the offset must
5045 always be a multiple of 8 (4 in the case of ILP32), we use
5046 the least significant bit to record whether we have
5047 initialized it already.
5048 When doing a dynamic link, we create a .rel(a).got relocation
5049 entry to initialize the value. This is done in the
5050 finish_dynamic_symbol routine. */
5051 if ((off & 1) != 0)
5052 off &= ~1;
5053 else
5054 {
5055 bfd_put_NN (output_bfd, value, basegot->contents + off);
5056 h->got.offset |= 1;
5057 }
5058 }
5059 else
5060 *unresolved_reloc_p = false;
5061
5062 off = off + basegot->output_section->vma + basegot->output_offset;
5063 }
5064
5065 return off;
5066 }
5067
5068 /* Change R_TYPE to a more efficient access model where possible,
5069 return the new reloc type. */
5070
5071 static bfd_reloc_code_real_type
5072 aarch64_tls_transition_without_check (bfd_reloc_code_real_type r_type,
5073 struct elf_link_hash_entry *h,
5074 struct bfd_link_info *info)
5075 {
5076 bool local_exec = bfd_link_executable (info)
5077 && SYMBOL_REFERENCES_LOCAL (info, h);
5078
5079 switch (r_type)
5080 {
5081 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5082 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5083 return (local_exec
5084 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
5085 : BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21);
5086
5087 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5088 return (local_exec
5089 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
5090 : r_type);
5091
5092 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5093 return (local_exec
5094 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
5095 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
5096
5097 case BFD_RELOC_AARCH64_TLSDESC_LDR:
5098 return (local_exec
5099 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
5100 : BFD_RELOC_AARCH64_NONE);
5101
5102 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5103 return (local_exec
5104 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC
5105 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC);
5106
5107 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5108 return (local_exec
5109 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2
5110 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1);
5111
5112 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
5113 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5114 return (local_exec
5115 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
5116 : BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC);
5117
5118 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5119 return local_exec ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
5120
5121 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
5122 return local_exec ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
5123
5124 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5125 return r_type;
5126
5127 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5128 return (local_exec
5129 ? BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12
5130 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
5131
5132 case BFD_RELOC_AARCH64_TLSDESC_ADD:
5133 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
5134 case BFD_RELOC_AARCH64_TLSDESC_CALL:
5135 /* Instructions with these relocations will become NOPs. */
5136 return BFD_RELOC_AARCH64_NONE;
5137
5138 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
5139 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
5140 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
5141 return local_exec ? BFD_RELOC_AARCH64_NONE : r_type;
5142
5143 #if ARCH_SIZE == 64
5144 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5145 return local_exec
5146 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC
5147 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC;
5148
5149 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5150 return local_exec
5151 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2
5152 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1;
5153 #endif
5154
5155 default:
5156 break;
5157 }
5158
5159 return r_type;
5160 }
5161
5162 static unsigned int
5163 aarch64_reloc_got_type (bfd_reloc_code_real_type r_type)
5164 {
5165 switch (r_type)
5166 {
5167 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5168 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5169 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
5170 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5171 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
5172 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
5173 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5174 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5175 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5176 return GOT_NORMAL;
5177
5178 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5179 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5180 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5181 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5182 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5183 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
5184 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
5185 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
5186 return GOT_TLS_GD;
5187
5188 case BFD_RELOC_AARCH64_TLSDESC_ADD:
5189 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
5190 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5191 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5192 case BFD_RELOC_AARCH64_TLSDESC_CALL:
5193 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
5194 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
5195 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5196 case BFD_RELOC_AARCH64_TLSDESC_LDR:
5197 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5198 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5199 return GOT_TLSDESC_GD;
5200
5201 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5202 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
5203 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5204 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5205 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5206 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5207 return GOT_TLS_IE;
5208
5209 default:
5210 break;
5211 }
5212 return GOT_UNKNOWN;
5213 }
5214
5215 static bool
5216 aarch64_can_relax_tls (bfd *input_bfd,
5217 struct bfd_link_info *info,
5218 bfd_reloc_code_real_type r_type,
5219 struct elf_link_hash_entry *h,
5220 unsigned long r_symndx)
5221 {
5222 unsigned int symbol_got_type;
5223 unsigned int reloc_got_type;
5224
5225 if (! IS_AARCH64_TLS_RELAX_RELOC (r_type))
5226 return false;
5227
5228 symbol_got_type = elfNN_aarch64_symbol_got_type (h, input_bfd, r_symndx);
5229 reloc_got_type = aarch64_reloc_got_type (r_type);
5230
5231 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
5232 return true;
5233
5234 if (!bfd_link_executable (info))
5235 return false;
5236
5237 if (h && h->root.type == bfd_link_hash_undefweak)
5238 return false;
5239
5240 return true;
5241 }
5242
5243 /* Given the relocation code R_TYPE, return the relaxed bfd reloc
5244 enumerator. */
5245
5246 static bfd_reloc_code_real_type
5247 aarch64_tls_transition (bfd *input_bfd,
5248 struct bfd_link_info *info,
5249 unsigned int r_type,
5250 struct elf_link_hash_entry *h,
5251 unsigned long r_symndx)
5252 {
5253 bfd_reloc_code_real_type bfd_r_type
5254 = elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type);
5255
5256 if (! aarch64_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx))
5257 return bfd_r_type;
5258
5259 return aarch64_tls_transition_without_check (bfd_r_type, h, info);
5260 }
5261
5262 /* Return the base VMA address which should be subtracted from real addresses
5263 when resolving R_AARCH64_TLS_DTPREL relocation. */
5264
5265 static bfd_vma
5266 dtpoff_base (struct bfd_link_info *info)
5267 {
5268 /* If tls_sec is NULL, we should have signalled an error already. */
5269 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
5270 return elf_hash_table (info)->tls_sec->vma;
5271 }
5272
5273 /* Return the base VMA address which should be subtracted from real addresses
5274 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
5275
5276 static bfd_vma
5277 tpoff_base (struct bfd_link_info *info)
5278 {
5279 struct elf_link_hash_table *htab = elf_hash_table (info);
5280
5281 /* If tls_sec is NULL, we should have signalled an error already. */
5282 BFD_ASSERT (htab->tls_sec != NULL);
5283
5284 bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
5285 htab->tls_sec->alignment_power);
5286 return htab->tls_sec->vma - base;
5287 }
5288
5289 static bfd_vma *
5290 symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
5291 unsigned long r_symndx)
5292 {
5293 /* Calculate the address of the GOT entry for symbol
5294 referred to in h. */
5295 if (h != NULL)
5296 return &h->got.offset;
5297 else
5298 {
5299 /* local symbol */
5300 struct elf_aarch64_local_symbol *l;
5301
5302 l = elf_aarch64_locals (input_bfd);
5303 return &l[r_symndx].got_offset;
5304 }
5305 }
5306
5307 static void
5308 symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
5309 unsigned long r_symndx)
5310 {
5311 bfd_vma *p;
5312 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
5313 *p |= 1;
5314 }
5315
5316 static int
5317 symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
5318 unsigned long r_symndx)
5319 {
5320 bfd_vma value;
5321 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
5322 return value & 1;
5323 }
5324
5325 static bfd_vma
5326 symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
5327 unsigned long r_symndx)
5328 {
5329 bfd_vma value;
5330 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
5331 value &= ~1;
5332 return value;
5333 }
5334
5335 static bfd_vma *
5336 symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
5337 unsigned long r_symndx)
5338 {
5339 /* Calculate the address of the GOT entry for symbol
5340 referred to in h. */
5341 if (h != NULL)
5342 {
5343 struct elf_aarch64_link_hash_entry *eh;
5344 eh = (struct elf_aarch64_link_hash_entry *) h;
5345 return &eh->tlsdesc_got_jump_table_offset;
5346 }
5347 else
5348 {
5349 /* local symbol */
5350 struct elf_aarch64_local_symbol *l;
5351
5352 l = elf_aarch64_locals (input_bfd);
5353 return &l[r_symndx].tlsdesc_got_jump_table_offset;
5354 }
5355 }
5356
5357 static void
5358 symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
5359 unsigned long r_symndx)
5360 {
5361 bfd_vma *p;
5362 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
5363 *p |= 1;
5364 }
5365
5366 static int
5367 symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
5368 struct elf_link_hash_entry *h,
5369 unsigned long r_symndx)
5370 {
5371 bfd_vma value;
5372 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
5373 return value & 1;
5374 }
5375
5376 static bfd_vma
5377 symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
5378 unsigned long r_symndx)
5379 {
5380 bfd_vma value;
5381 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
5382 value &= ~1;
5383 return value;
5384 }
5385
5386 /* Data for make_branch_to_erratum_835769_stub(). */
5387
5388 struct erratum_835769_branch_to_stub_data
5389 {
5390 struct bfd_link_info *info;
5391 asection *output_section;
5392 bfd_byte *contents;
5393 };
5394
5395 /* Helper to insert branches to erratum 835769 stubs in the right
5396 places for a particular section. */
5397
5398 static bool
5399 make_branch_to_erratum_835769_stub (struct bfd_hash_entry *gen_entry,
5400 void *in_arg)
5401 {
5402 struct elf_aarch64_stub_hash_entry *stub_entry;
5403 struct erratum_835769_branch_to_stub_data *data;
5404 bfd_byte *contents;
5405 unsigned long branch_insn = 0;
5406 bfd_vma veneered_insn_loc, veneer_entry_loc;
5407 bfd_signed_vma branch_offset;
5408 unsigned int target;
5409 bfd *abfd;
5410
5411 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
5412 data = (struct erratum_835769_branch_to_stub_data *) in_arg;
5413
5414 if (stub_entry->target_section != data->output_section
5415 || stub_entry->stub_type != aarch64_stub_erratum_835769_veneer)
5416 return true;
5417
5418 contents = data->contents;
5419 veneered_insn_loc = stub_entry->target_section->output_section->vma
5420 + stub_entry->target_section->output_offset
5421 + stub_entry->target_value;
5422 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
5423 + stub_entry->stub_sec->output_offset
5424 + stub_entry->stub_offset;
5425 branch_offset = veneer_entry_loc - veneered_insn_loc;
5426
5427 abfd = stub_entry->target_section->owner;
5428 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
5429 _bfd_error_handler
5430 (_("%pB: error: erratum 835769 stub out "
5431 "of range (input file too large)"), abfd);
5432
5433 target = stub_entry->target_value;
5434 branch_insn = 0x14000000;
5435 branch_offset >>= 2;
5436 branch_offset &= 0x3ffffff;
5437 branch_insn |= branch_offset;
5438 bfd_putl32 (branch_insn, &contents[target]);
5439
5440 return true;
5441 }
5442
5443
5444 static bool
5445 _bfd_aarch64_erratum_843419_branch_to_stub (struct bfd_hash_entry *gen_entry,
5446 void *in_arg)
5447 {
5448 struct elf_aarch64_stub_hash_entry *stub_entry
5449 = (struct elf_aarch64_stub_hash_entry *) gen_entry;
5450 struct erratum_835769_branch_to_stub_data *data
5451 = (struct erratum_835769_branch_to_stub_data *) in_arg;
5452 struct bfd_link_info *info;
5453 struct elf_aarch64_link_hash_table *htab;
5454 bfd_byte *contents;
5455 asection *section;
5456 bfd *abfd;
5457 bfd_vma place;
5458 uint32_t insn;
5459
5460 info = data->info;
5461 contents = data->contents;
5462 section = data->output_section;
5463
5464 htab = elf_aarch64_hash_table (info);
5465
5466 if (stub_entry->target_section != section
5467 || stub_entry->stub_type != aarch64_stub_erratum_843419_veneer)
5468 return true;
5469
5470 BFD_ASSERT (((htab->fix_erratum_843419 & ERRAT_ADRP) && stub_entry->stub_sec)
5471 || (htab->fix_erratum_843419 & ERRAT_ADR));
5472
5473 /* Only update the stub section if we have one. We should always have one if
5474 we're allowed to use the ADRP errata workaround, otherwise it is not
5475 required. */
5476 if (stub_entry->stub_sec)
5477 {
5478 insn = bfd_getl32 (contents + stub_entry->target_value);
5479 bfd_putl32 (insn,
5480 stub_entry->stub_sec->contents + stub_entry->stub_offset);
5481 }
5482
5483 place = (section->output_section->vma + section->output_offset
5484 + stub_entry->adrp_offset);
5485 insn = bfd_getl32 (contents + stub_entry->adrp_offset);
5486
5487 if (!_bfd_aarch64_adrp_p (insn))
5488 abort ();
5489
5490 bfd_signed_vma imm =
5491 (_bfd_aarch64_sign_extend
5492 ((bfd_vma) _bfd_aarch64_decode_adrp_imm (insn) << 12, 33)
5493 - (place & 0xfff));
5494
5495 if ((htab->fix_erratum_843419 & ERRAT_ADR)
5496 && (imm >= AARCH64_MIN_ADRP_IMM && imm <= AARCH64_MAX_ADRP_IMM))
5497 {
5498 insn = (_bfd_aarch64_reencode_adr_imm (AARCH64_ADR_OP, imm)
5499 | AARCH64_RT (insn));
5500 bfd_putl32 (insn, contents + stub_entry->adrp_offset);
5501 /* Stub is not needed, don't map it out. */
5502 stub_entry->stub_type = aarch64_stub_none;
5503 }
5504 else if (htab->fix_erratum_843419 & ERRAT_ADRP)
5505 {
5506 bfd_vma veneered_insn_loc;
5507 bfd_vma veneer_entry_loc;
5508 bfd_signed_vma branch_offset;
5509 uint32_t branch_insn;
5510
5511 veneered_insn_loc = stub_entry->target_section->output_section->vma
5512 + stub_entry->target_section->output_offset
5513 + stub_entry->target_value;
5514 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
5515 + stub_entry->stub_sec->output_offset
5516 + stub_entry->stub_offset;
5517 branch_offset = veneer_entry_loc - veneered_insn_loc;
5518
5519 abfd = stub_entry->target_section->owner;
5520 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
5521 _bfd_error_handler
5522 (_("%pB: error: erratum 843419 stub out "
5523 "of range (input file too large)"), abfd);
5524
5525 branch_insn = 0x14000000;
5526 branch_offset >>= 2;
5527 branch_offset &= 0x3ffffff;
5528 branch_insn |= branch_offset;
5529 bfd_putl32 (branch_insn, contents + stub_entry->target_value);
5530 }
5531 else
5532 {
5533 abfd = stub_entry->target_section->owner;
5534 _bfd_error_handler
5535 (_("%pB: error: erratum 843419 immediate 0x%" PRIx64
5536 " out of range for ADR (input file too large) and "
5537 "--fix-cortex-a53-843419=adr used. Run the linker with "
5538 "--fix-cortex-a53-843419=full instead"),
5539 abfd, (uint64_t) (bfd_vma) imm);
5540 bfd_set_error (bfd_error_bad_value);
5541 /* This function is called inside a hashtable traversal and the error
5542 handlers called above turn into non-fatal errors. Which means this
5543 case ld returns an exit code 0 and also produces a broken object file.
5544 To prevent this, issue a hard abort. */
5545 BFD_FAIL ();
5546 }
5547 return true;
5548 }
5549
5550
5551 static bool
5552 elfNN_aarch64_write_section (bfd *output_bfd ATTRIBUTE_UNUSED,
5553 struct bfd_link_info *link_info,
5554 asection *sec,
5555 bfd_byte *contents)
5556
5557 {
5558 struct elf_aarch64_link_hash_table *globals =
5559 elf_aarch64_hash_table (link_info);
5560
5561 if (globals == NULL)
5562 return false;
5563
5564 /* Fix code to point to erratum 835769 stubs. */
5565 if (globals->fix_erratum_835769)
5566 {
5567 struct erratum_835769_branch_to_stub_data data;
5568
5569 data.info = link_info;
5570 data.output_section = sec;
5571 data.contents = contents;
5572 bfd_hash_traverse (&globals->stub_hash_table,
5573 make_branch_to_erratum_835769_stub, &data);
5574 }
5575
5576 if (globals->fix_erratum_843419)
5577 {
5578 struct erratum_835769_branch_to_stub_data data;
5579
5580 data.info = link_info;
5581 data.output_section = sec;
5582 data.contents = contents;
5583 bfd_hash_traverse (&globals->stub_hash_table,
5584 _bfd_aarch64_erratum_843419_branch_to_stub, &data);
5585 }
5586
5587 return false;
5588 }
5589
5590 /* Return TRUE if RELOC is a relocation against the base of GOT table. */
5591
5592 static bool
5593 aarch64_relocation_aginst_gp_p (bfd_reloc_code_real_type reloc)
5594 {
5595 return (reloc == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14
5596 || reloc == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
5597 || reloc == BFD_RELOC_AARCH64_LD64_GOTOFF_LO15
5598 || reloc == BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC
5599 || reloc == BFD_RELOC_AARCH64_MOVW_GOTOFF_G1);
5600 }
5601
5602 /* Perform a relocation as part of a final link. The input relocation type
5603 should be TLS relaxed. */
5604
5605 static bfd_reloc_status_type
5606 elfNN_aarch64_final_link_relocate (reloc_howto_type *howto,
5607 bfd *input_bfd,
5608 bfd *output_bfd,
5609 asection *input_section,
5610 bfd_byte *contents,
5611 Elf_Internal_Rela *rel,
5612 bfd_vma value,
5613 struct bfd_link_info *info,
5614 asection *sym_sec,
5615 struct elf_link_hash_entry *h,
5616 bool *unresolved_reloc_p,
5617 bool save_addend,
5618 bfd_vma *saved_addend,
5619 Elf_Internal_Sym *sym)
5620 {
5621 Elf_Internal_Shdr *symtab_hdr;
5622 unsigned int r_type = howto->type;
5623 bfd_reloc_code_real_type bfd_r_type
5624 = elfNN_aarch64_bfd_reloc_from_howto (howto);
5625 unsigned long r_symndx;
5626 bfd_byte *hit_data = contents + rel->r_offset;
5627 bfd_vma place, off, got_entry_addr = 0;
5628 bfd_signed_vma signed_addend;
5629 struct elf_aarch64_link_hash_table *globals;
5630 bool weak_undef_p;
5631 bool relative_reloc;
5632 asection *base_got;
5633 bfd_vma orig_value = value;
5634 bool resolved_to_zero;
5635 bool abs_symbol_p;
5636
5637 globals = elf_aarch64_hash_table (info);
5638
5639 symtab_hdr = &elf_symtab_hdr (input_bfd);
5640
5641 BFD_ASSERT (is_aarch64_elf (input_bfd));
5642
5643 r_symndx = ELFNN_R_SYM (rel->r_info);
5644
5645 place = input_section->output_section->vma
5646 + input_section->output_offset + rel->r_offset;
5647
5648 /* Get addend, accumulating the addend for consecutive relocs
5649 which refer to the same offset. */
5650 signed_addend = saved_addend ? *saved_addend : 0;
5651 signed_addend += rel->r_addend;
5652
5653 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
5654 : bfd_is_und_section (sym_sec));
5655 abs_symbol_p = h != NULL && bfd_is_abs_symbol (&h->root);
5656
5657
5658 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
5659 it here if it is defined in a non-shared object. */
5660 if (h != NULL
5661 && h->type == STT_GNU_IFUNC
5662 && h->def_regular)
5663 {
5664 asection *plt;
5665 const char *name;
5666 bfd_vma addend = 0;
5667
5668 if ((input_section->flags & SEC_ALLOC) == 0)
5669 {
5670 /* If this is a SHT_NOTE section without SHF_ALLOC, treat
5671 STT_GNU_IFUNC symbol as STT_FUNC. */
5672 if (elf_section_type (input_section) == SHT_NOTE)
5673 goto skip_ifunc;
5674
5675 /* Dynamic relocs are not propagated for SEC_DEBUGGING
5676 sections because such sections are not SEC_ALLOC and
5677 thus ld.so will not process them. */
5678 if ((input_section->flags & SEC_DEBUGGING) != 0)
5679 return bfd_reloc_ok;
5680
5681 if (h->root.root.string)
5682 name = h->root.root.string;
5683 else
5684 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, NULL);
5685 _bfd_error_handler
5686 /* xgettext:c-format */
5687 (_("%pB(%pA+%#" PRIx64 "): "
5688 "unresolvable %s relocation against symbol `%s'"),
5689 input_bfd, input_section, (uint64_t) rel->r_offset,
5690 howto->name, name);
5691 bfd_set_error (bfd_error_bad_value);
5692 return bfd_reloc_notsupported;
5693 }
5694 else if (h->plt.offset == (bfd_vma) -1)
5695 goto bad_ifunc_reloc;
5696
5697 /* STT_GNU_IFUNC symbol must go through PLT. */
5698 plt = globals->root.splt ? globals->root.splt : globals->root.iplt;
5699 value = (plt->output_section->vma + plt->output_offset + h->plt.offset);
5700
5701 switch (bfd_r_type)
5702 {
5703 default:
5704 bad_ifunc_reloc:
5705 if (h->root.root.string)
5706 name = h->root.root.string;
5707 else
5708 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
5709 NULL);
5710 _bfd_error_handler
5711 /* xgettext:c-format */
5712 (_("%pB: relocation %s against STT_GNU_IFUNC "
5713 "symbol `%s' isn't handled by %s"), input_bfd,
5714 howto->name, name, __func__);
5715 bfd_set_error (bfd_error_bad_value);
5716 return bfd_reloc_notsupported;
5717
5718 case BFD_RELOC_AARCH64_NN:
5719 if (rel->r_addend != 0)
5720 {
5721 if (h->root.root.string)
5722 name = h->root.root.string;
5723 else
5724 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
5725 sym, NULL);
5726 _bfd_error_handler
5727 /* xgettext:c-format */
5728 (_("%pB: relocation %s against STT_GNU_IFUNC "
5729 "symbol `%s' has non-zero addend: %" PRId64),
5730 input_bfd, howto->name, name, (int64_t) rel->r_addend);
5731 bfd_set_error (bfd_error_bad_value);
5732 return bfd_reloc_notsupported;
5733 }
5734
5735 /* Generate dynamic relocation only when there is a
5736 non-GOT reference in a shared object. */
5737 if (bfd_link_pic (info) && h->non_got_ref)
5738 {
5739 Elf_Internal_Rela outrel;
5740 asection *sreloc;
5741
5742 /* Need a dynamic relocation to get the real function
5743 address. */
5744 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
5745 info,
5746 input_section,
5747 rel->r_offset);
5748 if (outrel.r_offset == (bfd_vma) -1
5749 || outrel.r_offset == (bfd_vma) -2)
5750 abort ();
5751
5752 outrel.r_offset += (input_section->output_section->vma
5753 + input_section->output_offset);
5754
5755 if (h->dynindx == -1
5756 || h->forced_local
5757 || bfd_link_executable (info))
5758 {
5759 /* This symbol is resolved locally. */
5760 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
5761 outrel.r_addend = (h->root.u.def.value
5762 + h->root.u.def.section->output_section->vma
5763 + h->root.u.def.section->output_offset);
5764 }
5765 else
5766 {
5767 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
5768 outrel.r_addend = 0;
5769 }
5770
5771 sreloc = globals->root.irelifunc;
5772 elf_append_rela (output_bfd, sreloc, &outrel);
5773
5774 /* If this reloc is against an external symbol, we
5775 do not want to fiddle with the addend. Otherwise,
5776 we need to include the symbol value so that it
5777 becomes an addend for the dynamic reloc. For an
5778 internal symbol, we have updated addend. */
5779 return bfd_reloc_ok;
5780 }
5781 /* FALLTHROUGH */
5782 case BFD_RELOC_AARCH64_CALL26:
5783 case BFD_RELOC_AARCH64_JUMP26:
5784 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
5785 place, value,
5786 signed_addend,
5787 weak_undef_p);
5788 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
5789 howto, value);
5790 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5791 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5792 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
5793 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5794 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
5795 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5796 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5797 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
5798 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5799 base_got = globals->root.sgot;
5800 off = h->got.offset;
5801
5802 if (base_got == NULL)
5803 abort ();
5804
5805 if (off == (bfd_vma) -1)
5806 {
5807 bfd_vma plt_index;
5808
5809 /* We can't use h->got.offset here to save state, or
5810 even just remember the offset, as finish_dynamic_symbol
5811 would use that as offset into .got. */
5812
5813 if (globals->root.splt != NULL)
5814 {
5815 plt_index = ((h->plt.offset - globals->plt_header_size) /
5816 globals->plt_entry_size);
5817 off = (plt_index + 3) * GOT_ENTRY_SIZE;
5818 base_got = globals->root.sgotplt;
5819 }
5820 else
5821 {
5822 plt_index = h->plt.offset / globals->plt_entry_size;
5823 off = plt_index * GOT_ENTRY_SIZE;
5824 base_got = globals->root.igotplt;
5825 }
5826
5827 if (h->dynindx == -1
5828 || h->forced_local
5829 || info->symbolic)
5830 {
5831 /* This references the local definition. We must
5832 initialize this entry in the global offset table.
5833 Since the offset must always be a multiple of 8,
5834 we use the least significant bit to record
5835 whether we have initialized it already.
5836
5837 When doing a dynamic link, we create a .rela.got
5838 relocation entry to initialize the value. This
5839 is done in the finish_dynamic_symbol routine. */
5840 if ((off & 1) != 0)
5841 off &= ~1;
5842 else
5843 {
5844 bfd_put_NN (output_bfd, value,
5845 base_got->contents + off);
5846 /* Note that this is harmless as -1 | 1 still is -1. */
5847 h->got.offset |= 1;
5848 }
5849 }
5850 value = (base_got->output_section->vma
5851 + base_got->output_offset + off);
5852 }
5853 else
5854 value = aarch64_calculate_got_entry_vma (h, globals, info,
5855 value, output_bfd,
5856 unresolved_reloc_p);
5857
5858 if (aarch64_relocation_aginst_gp_p (bfd_r_type))
5859 addend = (globals->root.sgot->output_section->vma
5860 + globals->root.sgot->output_offset);
5861
5862 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
5863 place, value,
5864 addend, weak_undef_p);
5865 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type, howto, value);
5866 case BFD_RELOC_AARCH64_ADD_LO12:
5867 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
5868 break;
5869 }
5870 }
5871
5872 skip_ifunc:
5873 resolved_to_zero = (h != NULL
5874 && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h));
5875
5876 switch (bfd_r_type)
5877 {
5878 case BFD_RELOC_AARCH64_NONE:
5879 case BFD_RELOC_AARCH64_TLSDESC_ADD:
5880 case BFD_RELOC_AARCH64_TLSDESC_CALL:
5881 case BFD_RELOC_AARCH64_TLSDESC_LDR:
5882 *unresolved_reloc_p = false;
5883 return bfd_reloc_ok;
5884
5885 case BFD_RELOC_AARCH64_NN:
5886
5887 /* When generating a shared object or relocatable executable, these
5888 relocations are copied into the output file to be resolved at
5889 run time. */
5890 if (((bfd_link_pic (info)
5891 || globals->root.is_relocatable_executable)
5892 && (input_section->flags & SEC_ALLOC)
5893 && (h == NULL
5894 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5895 && !resolved_to_zero)
5896 || h->root.type != bfd_link_hash_undefweak))
5897 /* Or we are creating an executable, we may need to keep relocations
5898 for symbols satisfied by a dynamic library if we manage to avoid
5899 copy relocs for the symbol. */
5900 || (ELIMINATE_COPY_RELOCS
5901 && !bfd_link_pic (info)
5902 && h != NULL
5903 && (input_section->flags & SEC_ALLOC)
5904 && h->dynindx != -1
5905 && !h->non_got_ref
5906 && ((h->def_dynamic
5907 && !h->def_regular)
5908 || h->root.type == bfd_link_hash_undefweak
5909 || h->root.type == bfd_link_hash_undefined)))
5910 {
5911 Elf_Internal_Rela outrel;
5912 bfd_byte *loc;
5913 bool skip, relocate;
5914 asection *sreloc;
5915
5916 *unresolved_reloc_p = false;
5917
5918 skip = false;
5919 relocate = false;
5920
5921 outrel.r_addend = signed_addend;
5922 outrel.r_offset =
5923 _bfd_elf_section_offset (output_bfd, info, input_section,
5924 rel->r_offset);
5925 if (outrel.r_offset == (bfd_vma) - 1)
5926 skip = true;
5927 else if (outrel.r_offset == (bfd_vma) - 2)
5928 {
5929 skip = true;
5930 relocate = true;
5931 }
5932 else if (abs_symbol_p)
5933 {
5934 /* Local absolute symbol. */
5935 skip = (h->forced_local || (h->dynindx == -1));
5936 relocate = skip;
5937 }
5938
5939 outrel.r_offset += (input_section->output_section->vma
5940 + input_section->output_offset);
5941
5942 if (skip)
5943 memset (&outrel, 0, sizeof outrel);
5944 else if (h != NULL
5945 && h->dynindx != -1
5946 && (!bfd_link_pic (info)
5947 || !(bfd_link_pie (info) || SYMBOLIC_BIND (info, h))
5948 || !h->def_regular))
5949 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
5950 else
5951 {
5952 int symbol;
5953
5954 /* On SVR4-ish systems, the dynamic loader cannot
5955 relocate the text and data segments independently,
5956 so the symbol does not matter. */
5957 symbol = 0;
5958 relocate = !globals->no_apply_dynamic_relocs;
5959 outrel.r_info = ELFNN_R_INFO (symbol, AARCH64_R (RELATIVE));
5960 outrel.r_addend += value;
5961 }
5962
5963 sreloc = elf_section_data (input_section)->sreloc;
5964 if (sreloc == NULL || sreloc->contents == NULL)
5965 return bfd_reloc_notsupported;
5966
5967 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals);
5968 bfd_elfNN_swap_reloca_out (output_bfd, &outrel, loc);
5969
5970 if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size)
5971 {
5972 /* Sanity to check that we have previously allocated
5973 sufficient space in the relocation section for the
5974 number of relocations we actually want to emit. */
5975 abort ();
5976 }
5977
5978 /* If this reloc is against an external symbol, we do not want to
5979 fiddle with the addend. Otherwise, we need to include the symbol
5980 value so that it becomes an addend for the dynamic reloc. */
5981 if (!relocate)
5982 return bfd_reloc_ok;
5983
5984 return _bfd_final_link_relocate (howto, input_bfd, input_section,
5985 contents, rel->r_offset, value,
5986 signed_addend);
5987 }
5988 else
5989 value += signed_addend;
5990 break;
5991
5992 case BFD_RELOC_AARCH64_CALL26:
5993 case BFD_RELOC_AARCH64_JUMP26:
5994 {
5995 asection *splt = globals->root.splt;
5996 bool via_plt_p =
5997 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
5998
5999 /* A call to an undefined weak symbol is converted to a jump to
6000 the next instruction unless a PLT entry will be created.
6001 The jump to the next instruction is optimized as a NOP.
6002 Do the same for local undefined symbols. */
6003 if (weak_undef_p && ! via_plt_p)
6004 {
6005 bfd_putl32 (INSN_NOP, hit_data);
6006 return bfd_reloc_ok;
6007 }
6008
6009 /* If the call goes through a PLT entry, make sure to
6010 check distance to the right destination address. */
6011 if (via_plt_p)
6012 value = (splt->output_section->vma
6013 + splt->output_offset + h->plt.offset);
6014
6015 /* Check if a stub has to be inserted because the destination
6016 is too far away. */
6017 struct elf_aarch64_stub_hash_entry *stub_entry = NULL;
6018
6019 /* If the branch destination is directed to plt stub, "value" will be
6020 the final destination, otherwise we should plus signed_addend, it may
6021 contain non-zero value, for example call to local function symbol
6022 which are turned into "sec_sym + sec_off", and sec_off is kept in
6023 signed_addend. */
6024 if (! aarch64_valid_branch_p (via_plt_p ? value : value + signed_addend,
6025 place))
6026 /* The target is out of reach, so redirect the branch to
6027 the local stub for this function. */
6028 stub_entry = elfNN_aarch64_get_stub_entry (input_section, sym_sec, h,
6029 rel, globals);
6030 if (stub_entry != NULL)
6031 {
6032 value = (stub_entry->stub_offset
6033 + stub_entry->stub_sec->output_offset
6034 + stub_entry->stub_sec->output_section->vma);
6035
6036 /* We have redirected the destination to stub entry address,
6037 so ignore any addend record in the original rela entry. */
6038 signed_addend = 0;
6039 }
6040 }
6041 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6042 place, value,
6043 signed_addend, weak_undef_p);
6044 *unresolved_reloc_p = false;
6045 break;
6046
6047 case BFD_RELOC_AARCH64_16_PCREL:
6048 case BFD_RELOC_AARCH64_32_PCREL:
6049 case BFD_RELOC_AARCH64_64_PCREL:
6050 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6051 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6052 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6053 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
6054 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6055 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
6056 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6057 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
6058 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6059 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
6060 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6061 if (bfd_link_pic (info)
6062 && (input_section->flags & SEC_ALLOC) != 0
6063 && (input_section->flags & SEC_READONLY) != 0
6064 && !_bfd_elf_symbol_refs_local_p (h, info, 1))
6065 {
6066 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
6067
6068 _bfd_error_handler
6069 /* xgettext:c-format */
6070 (_("%pB: relocation %s against symbol `%s' which may bind "
6071 "externally can not be used when making a shared object; "
6072 "recompile with -fPIC"),
6073 input_bfd, elfNN_aarch64_howto_table[howto_index].name,
6074 h->root.root.string);
6075 bfd_set_error (bfd_error_bad_value);
6076 return bfd_reloc_notsupported;
6077 }
6078 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6079 place, value,
6080 signed_addend,
6081 weak_undef_p);
6082 break;
6083
6084 case BFD_RELOC_AARCH64_BRANCH19:
6085 case BFD_RELOC_AARCH64_TSTBR14:
6086 if (h && h->root.type == bfd_link_hash_undefined)
6087 {
6088 _bfd_error_handler
6089 /* xgettext:c-format */
6090 (_("%pB: conditional branch to undefined symbol `%s' "
6091 "not allowed"), input_bfd, h->root.root.string);
6092 bfd_set_error (bfd_error_bad_value);
6093 return bfd_reloc_notsupported;
6094 }
6095 /* Fall through. */
6096
6097 case BFD_RELOC_AARCH64_16:
6098 #if ARCH_SIZE == 64
6099 case BFD_RELOC_AARCH64_32:
6100 #endif
6101 case BFD_RELOC_AARCH64_ADD_LO12:
6102 case BFD_RELOC_AARCH64_LDST128_LO12:
6103 case BFD_RELOC_AARCH64_LDST16_LO12:
6104 case BFD_RELOC_AARCH64_LDST32_LO12:
6105 case BFD_RELOC_AARCH64_LDST64_LO12:
6106 case BFD_RELOC_AARCH64_LDST8_LO12:
6107 case BFD_RELOC_AARCH64_MOVW_G0:
6108 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6109 case BFD_RELOC_AARCH64_MOVW_G0_S:
6110 case BFD_RELOC_AARCH64_MOVW_G1:
6111 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6112 case BFD_RELOC_AARCH64_MOVW_G1_S:
6113 case BFD_RELOC_AARCH64_MOVW_G2:
6114 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6115 case BFD_RELOC_AARCH64_MOVW_G2_S:
6116 case BFD_RELOC_AARCH64_MOVW_G3:
6117 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6118 place, value,
6119 signed_addend, weak_undef_p);
6120 break;
6121
6122 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6123 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6124 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6125 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6126 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6127 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6128 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
6129 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6130 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6131 if (globals->root.sgot == NULL)
6132 BFD_ASSERT (h != NULL);
6133
6134 relative_reloc = false;
6135 if (h != NULL)
6136 {
6137 bfd_vma addend = 0;
6138
6139 /* If a symbol is not dynamic and is not undefined weak, bind it
6140 locally and generate a RELATIVE relocation under PIC mode.
6141
6142 NOTE: one symbol may be referenced by several relocations, we
6143 should only generate one RELATIVE relocation for that symbol.
6144 Therefore, check GOT offset mark first. */
6145 if (h->dynindx == -1
6146 && !h->forced_local
6147 && h->root.type != bfd_link_hash_undefweak
6148 && bfd_link_pic (info)
6149 && !symbol_got_offset_mark_p (input_bfd, h, r_symndx))
6150 relative_reloc = true;
6151
6152 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
6153 output_bfd,
6154 unresolved_reloc_p);
6155 /* Record the GOT entry address which will be used when generating
6156 RELATIVE relocation. */
6157 if (relative_reloc)
6158 got_entry_addr = value;
6159
6160 if (aarch64_relocation_aginst_gp_p (bfd_r_type))
6161 addend = (globals->root.sgot->output_section->vma
6162 + globals->root.sgot->output_offset);
6163 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6164 place, value,
6165 addend, weak_undef_p);
6166 }
6167 else
6168 {
6169 bfd_vma addend = 0;
6170 struct elf_aarch64_local_symbol *locals
6171 = elf_aarch64_locals (input_bfd);
6172
6173 if (locals == NULL)
6174 {
6175 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
6176 _bfd_error_handler
6177 /* xgettext:c-format */
6178 (_("%pB: local symbol descriptor table be NULL when applying "
6179 "relocation %s against local symbol"),
6180 input_bfd, elfNN_aarch64_howto_table[howto_index].name);
6181 abort ();
6182 }
6183
6184 off = symbol_got_offset (input_bfd, h, r_symndx);
6185 base_got = globals->root.sgot;
6186 got_entry_addr = (base_got->output_section->vma
6187 + base_got->output_offset + off);
6188
6189 if (!symbol_got_offset_mark_p (input_bfd, h, r_symndx))
6190 {
6191 bfd_put_64 (output_bfd, value, base_got->contents + off);
6192
6193 /* For local symbol, we have done absolute relocation in static
6194 linking stage. While for shared library, we need to update the
6195 content of GOT entry according to the shared object's runtime
6196 base address. So, we need to generate a R_AARCH64_RELATIVE reloc
6197 for dynamic linker. */
6198 if (bfd_link_pic (info))
6199 relative_reloc = true;
6200
6201 symbol_got_offset_mark (input_bfd, h, r_symndx);
6202 }
6203
6204 /* Update the relocation value to GOT entry addr as we have transformed
6205 the direct data access into indirect data access through GOT. */
6206 value = got_entry_addr;
6207
6208 if (aarch64_relocation_aginst_gp_p (bfd_r_type))
6209 addend = base_got->output_section->vma + base_got->output_offset;
6210
6211 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6212 place, value,
6213 addend, weak_undef_p);
6214 }
6215
6216 if (relative_reloc)
6217 {
6218 asection *s;
6219 Elf_Internal_Rela outrel;
6220
6221 s = globals->root.srelgot;
6222 if (s == NULL)
6223 abort ();
6224
6225 outrel.r_offset = got_entry_addr;
6226 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
6227 outrel.r_addend = orig_value;
6228 elf_append_rela (output_bfd, s, &outrel);
6229 }
6230 break;
6231
6232 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6233 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6234 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6235 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6236 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6237 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6238 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6239 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
6240 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
6241 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6242 if (globals->root.sgot == NULL)
6243 return bfd_reloc_notsupported;
6244
6245 value = (symbol_got_offset (input_bfd, h, r_symndx)
6246 + globals->root.sgot->output_section->vma
6247 + globals->root.sgot->output_offset);
6248
6249 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6250 place, value,
6251 0, weak_undef_p);
6252 *unresolved_reloc_p = false;
6253 break;
6254
6255 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6256 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6257 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6258 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6259 if (globals->root.sgot == NULL)
6260 return bfd_reloc_notsupported;
6261
6262 value = symbol_got_offset (input_bfd, h, r_symndx);
6263 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6264 place, value,
6265 0, weak_undef_p);
6266 *unresolved_reloc_p = false;
6267 break;
6268
6269 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
6270 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
6271 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
6272 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
6273 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
6274 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
6275 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
6276 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
6277 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
6278 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
6279 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
6280 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
6281 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6282 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
6283 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
6284 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
6285 {
6286 if (!(weak_undef_p || elf_hash_table (info)->tls_sec))
6287 {
6288 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
6289 _bfd_error_handler
6290 /* xgettext:c-format */
6291 (_("%pB: TLS relocation %s against undefined symbol `%s'"),
6292 input_bfd, elfNN_aarch64_howto_table[howto_index].name,
6293 h->root.root.string);
6294 bfd_set_error (bfd_error_bad_value);
6295 return bfd_reloc_notsupported;
6296 }
6297
6298 bfd_vma def_value
6299 = weak_undef_p ? 0 : signed_addend - dtpoff_base (info);
6300 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6301 place, value,
6302 def_value, weak_undef_p);
6303 break;
6304 }
6305
6306 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6307 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6308 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6309 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
6310 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
6311 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
6312 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
6313 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
6314 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
6315 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
6316 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
6317 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6318 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6319 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6320 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6321 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6322 {
6323 if (!(weak_undef_p || elf_hash_table (info)->tls_sec))
6324 {
6325 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
6326 _bfd_error_handler
6327 /* xgettext:c-format */
6328 (_("%pB: TLS relocation %s against undefined symbol `%s'"),
6329 input_bfd, elfNN_aarch64_howto_table[howto_index].name,
6330 h->root.root.string);
6331 bfd_set_error (bfd_error_bad_value);
6332 return bfd_reloc_notsupported;
6333 }
6334
6335 bfd_vma def_value
6336 = weak_undef_p ? 0 : signed_addend - tpoff_base (info);
6337 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6338 place, value,
6339 def_value, weak_undef_p);
6340 *unresolved_reloc_p = false;
6341 break;
6342 }
6343
6344 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
6345 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6346 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6347 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6348 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
6349 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6350 if (globals->root.sgot == NULL)
6351 return bfd_reloc_notsupported;
6352 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
6353 + globals->root.sgotplt->output_section->vma
6354 + globals->root.sgotplt->output_offset
6355 + globals->sgotplt_jump_table_size);
6356
6357 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6358 place, value,
6359 0, weak_undef_p);
6360 *unresolved_reloc_p = false;
6361 break;
6362
6363 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6364 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6365 if (globals->root.sgot == NULL)
6366 return bfd_reloc_notsupported;
6367
6368 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
6369 + globals->root.sgotplt->output_section->vma
6370 + globals->root.sgotplt->output_offset
6371 + globals->sgotplt_jump_table_size);
6372
6373 value -= (globals->root.sgot->output_section->vma
6374 + globals->root.sgot->output_offset);
6375
6376 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6377 place, value,
6378 0, weak_undef_p);
6379 *unresolved_reloc_p = false;
6380 break;
6381
6382 default:
6383 return bfd_reloc_notsupported;
6384 }
6385
6386 if (saved_addend)
6387 *saved_addend = value;
6388
6389 /* Only apply the final relocation in a sequence. */
6390 if (save_addend)
6391 return bfd_reloc_continue;
6392
6393 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
6394 howto, value);
6395 }
6396
6397 /* LP64 and ILP32 operates on x- and w-registers respectively.
6398 Next definitions take into account the difference between
6399 corresponding machine codes. R means x-register if the target
6400 arch is LP64, and w-register if the target is ILP32. */
6401
6402 #if ARCH_SIZE == 64
6403 # define add_R0_R0 (0x91000000)
6404 # define add_R0_R0_R1 (0x8b000020)
6405 # define add_R0_R1 (0x91400020)
6406 # define ldr_R0 (0x58000000)
6407 # define ldr_R0_mask(i) (i & 0xffffffe0)
6408 # define ldr_R0_x0 (0xf9400000)
6409 # define ldr_hw_R0 (0xf2a00000)
6410 # define movk_R0 (0xf2800000)
6411 # define movz_R0 (0xd2a00000)
6412 # define movz_hw_R0 (0xd2c00000)
6413 #else /*ARCH_SIZE == 32 */
6414 # define add_R0_R0 (0x11000000)
6415 # define add_R0_R0_R1 (0x0b000020)
6416 # define add_R0_R1 (0x11400020)
6417 # define ldr_R0 (0x18000000)
6418 # define ldr_R0_mask(i) (i & 0xbfffffe0)
6419 # define ldr_R0_x0 (0xb9400000)
6420 # define ldr_hw_R0 (0x72a00000)
6421 # define movk_R0 (0x72800000)
6422 # define movz_R0 (0x52a00000)
6423 # define movz_hw_R0 (0x52c00000)
6424 #endif
6425
6426 /* Structure to hold payload for _bfd_aarch64_erratum_843419_clear_stub,
6427 it is used to identify the stub information to reset. */
6428
6429 struct erratum_843419_branch_to_stub_clear_data
6430 {
6431 bfd_vma adrp_offset;
6432 asection *output_section;
6433 };
6434
6435 /* Clear the erratum information for GEN_ENTRY if the ADRP_OFFSET and
6436 section inside IN_ARG matches. The clearing is done by setting the
6437 stub_type to none. */
6438
6439 static bool
6440 _bfd_aarch64_erratum_843419_clear_stub (struct bfd_hash_entry *gen_entry,
6441 void *in_arg)
6442 {
6443 struct elf_aarch64_stub_hash_entry *stub_entry
6444 = (struct elf_aarch64_stub_hash_entry *) gen_entry;
6445 struct erratum_843419_branch_to_stub_clear_data *data
6446 = (struct erratum_843419_branch_to_stub_clear_data *) in_arg;
6447
6448 if (stub_entry->target_section != data->output_section
6449 || stub_entry->stub_type != aarch64_stub_erratum_843419_veneer
6450 || stub_entry->adrp_offset != data->adrp_offset)
6451 return true;
6452
6453 /* Change the stub type instead of removing the entry, removing from the hash
6454 table would be slower and we have already reserved the memory for the entry
6455 so there wouldn't be much gain. Changing the stub also keeps around a
6456 record of what was there before. */
6457 stub_entry->stub_type = aarch64_stub_none;
6458
6459 /* We're done and there could have been only one matching stub at that
6460 particular offset, so abort further traversal. */
6461 return false;
6462 }
6463
6464 /* TLS Relaxations may relax an adrp sequence that matches the erratum 843419
6465 sequence. In this case the erratum no longer applies and we need to remove
6466 the entry from the pending stub generation. This clears matching adrp insn
6467 at ADRP_OFFSET in INPUT_SECTION in the stub table defined in GLOBALS. */
6468
6469 static void
6470 clear_erratum_843419_entry (struct elf_aarch64_link_hash_table *globals,
6471 bfd_vma adrp_offset, asection *input_section)
6472 {
6473 if (globals->fix_erratum_843419 & ERRAT_ADRP)
6474 {
6475 struct erratum_843419_branch_to_stub_clear_data data;
6476 data.adrp_offset = adrp_offset;
6477 data.output_section = input_section;
6478
6479 bfd_hash_traverse (&globals->stub_hash_table,
6480 _bfd_aarch64_erratum_843419_clear_stub, &data);
6481 }
6482 }
6483
6484 /* Handle TLS relaxations. Relaxing is possible for symbols that use
6485 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
6486 link.
6487
6488 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
6489 is to then call final_link_relocate. Return other values in the
6490 case of error. */
6491
6492 static bfd_reloc_status_type
6493 elfNN_aarch64_tls_relax (struct elf_aarch64_link_hash_table *globals,
6494 bfd *input_bfd, asection *input_section,
6495 bfd_byte *contents, Elf_Internal_Rela *rel,
6496 struct elf_link_hash_entry *h,
6497 struct bfd_link_info *info)
6498 {
6499 bool local_exec = bfd_link_executable (info)
6500 && SYMBOL_REFERENCES_LOCAL (info, h);
6501 unsigned int r_type = ELFNN_R_TYPE (rel->r_info);
6502 unsigned long insn;
6503
6504 BFD_ASSERT (globals && input_bfd && contents && rel);
6505
6506 switch (elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type))
6507 {
6508 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6509 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6510 if (local_exec)
6511 {
6512 /* GD->LE relaxation:
6513 adrp x0, :tlsgd:var => movz R0, :tprel_g1:var
6514 or
6515 adrp x0, :tlsdesc:var => movz R0, :tprel_g1:var
6516
6517 Where R is x for LP64, and w for ILP32. */
6518 bfd_putl32 (movz_R0, contents + rel->r_offset);
6519 /* We have relaxed the adrp into a mov, we may have to clear any
6520 pending erratum fixes. */
6521 clear_erratum_843419_entry (globals, rel->r_offset, input_section);
6522 return bfd_reloc_continue;
6523 }
6524 else
6525 {
6526 /* GD->IE relaxation:
6527 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
6528 or
6529 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
6530 */
6531 return bfd_reloc_continue;
6532 }
6533
6534 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6535 BFD_ASSERT (0);
6536 break;
6537
6538 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6539 if (local_exec)
6540 {
6541 /* Tiny TLSDESC->LE relaxation:
6542 ldr x1, :tlsdesc:var => movz R0, #:tprel_g1:var
6543 adr x0, :tlsdesc:var => movk R0, #:tprel_g0_nc:var
6544 .tlsdesccall var
6545 blr x1 => nop
6546
6547 Where R is x for LP64, and w for ILP32. */
6548 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
6549 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
6550
6551 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
6552 AARCH64_R (TLSLE_MOVW_TPREL_G0_NC));
6553 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6554
6555 bfd_putl32 (movz_R0, contents + rel->r_offset);
6556 bfd_putl32 (movk_R0, contents + rel->r_offset + 4);
6557 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
6558 return bfd_reloc_continue;
6559 }
6560 else
6561 {
6562 /* Tiny TLSDESC->IE relaxation:
6563 ldr x1, :tlsdesc:var => ldr x0, :gottprel:var
6564 adr x0, :tlsdesc:var => nop
6565 .tlsdesccall var
6566 blr x1 => nop
6567 */
6568 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
6569 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
6570
6571 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6572 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6573
6574 bfd_putl32 (ldr_R0, contents + rel->r_offset);
6575 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4);
6576 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
6577 return bfd_reloc_continue;
6578 }
6579
6580 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6581 if (local_exec)
6582 {
6583 /* Tiny GD->LE relaxation:
6584 adr x0, :tlsgd:var => mrs x1, tpidr_el0
6585 bl __tls_get_addr => add R0, R1, #:tprel_hi12:x, lsl #12
6586 nop => add R0, R0, #:tprel_lo12_nc:x
6587
6588 Where R is x for LP64, and x for Ilp32. */
6589
6590 /* First kill the tls_get_addr reloc on the bl instruction. */
6591 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
6592
6593 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 0);
6594 bfd_putl32 (add_R0_R1, contents + rel->r_offset + 4);
6595 bfd_putl32 (add_R0_R0, contents + rel->r_offset + 8);
6596
6597 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
6598 AARCH64_R (TLSLE_ADD_TPREL_LO12_NC));
6599 rel[1].r_offset = rel->r_offset + 8;
6600
6601 /* Move the current relocation to the second instruction in
6602 the sequence. */
6603 rel->r_offset += 4;
6604 rel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
6605 AARCH64_R (TLSLE_ADD_TPREL_HI12));
6606 return bfd_reloc_continue;
6607 }
6608 else
6609 {
6610 /* Tiny GD->IE relaxation:
6611 adr x0, :tlsgd:var => ldr R0, :gottprel:var
6612 bl __tls_get_addr => mrs x1, tpidr_el0
6613 nop => add R0, R0, R1
6614
6615 Where R is x for LP64, and w for Ilp32. */
6616
6617 /* First kill the tls_get_addr reloc on the bl instruction. */
6618 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
6619 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6620
6621 bfd_putl32 (ldr_R0, contents + rel->r_offset);
6622 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
6623 bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 8);
6624 return bfd_reloc_continue;
6625 }
6626
6627 #if ARCH_SIZE == 64
6628 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6629 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSGD_MOVW_G0_NC));
6630 BFD_ASSERT (rel->r_offset + 12 == rel[2].r_offset);
6631 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (CALL26));
6632
6633 if (local_exec)
6634 {
6635 /* Large GD->LE relaxation:
6636 movz x0, #:tlsgd_g1:var => movz x0, #:tprel_g2:var, lsl #32
6637 movk x0, #:tlsgd_g0_nc:var => movk x0, #:tprel_g1_nc:var, lsl #16
6638 add x0, gp, x0 => movk x0, #:tprel_g0_nc:var
6639 bl __tls_get_addr => mrs x1, tpidr_el0
6640 nop => add x0, x0, x1
6641 */
6642 rel[2].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
6643 AARCH64_R (TLSLE_MOVW_TPREL_G0_NC));
6644 rel[2].r_offset = rel->r_offset + 8;
6645
6646 bfd_putl32 (movz_hw_R0, contents + rel->r_offset + 0);
6647 bfd_putl32 (ldr_hw_R0, contents + rel->r_offset + 4);
6648 bfd_putl32 (movk_R0, contents + rel->r_offset + 8);
6649 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 12);
6650 bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 16);
6651 }
6652 else
6653 {
6654 /* Large GD->IE relaxation:
6655 movz x0, #:tlsgd_g1:var => movz x0, #:gottprel_g1:var, lsl #16
6656 movk x0, #:tlsgd_g0_nc:var => movk x0, #:gottprel_g0_nc:var
6657 add x0, gp, x0 => ldr x0, [gp, x0]
6658 bl __tls_get_addr => mrs x1, tpidr_el0
6659 nop => add x0, x0, x1
6660 */
6661 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6662 bfd_putl32 (0xd2a80000, contents + rel->r_offset + 0);
6663 bfd_putl32 (ldr_R0, contents + rel->r_offset + 8);
6664 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 12);
6665 bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 16);
6666 }
6667 return bfd_reloc_continue;
6668
6669 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6670 return bfd_reloc_continue;
6671 #endif
6672
6673 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6674 return bfd_reloc_continue;
6675
6676 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
6677 if (local_exec)
6678 {
6679 /* GD->LE relaxation:
6680 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
6681
6682 Where R is x for lp64 mode, and w for ILP32 mode. */
6683 bfd_putl32 (movk_R0, contents + rel->r_offset);
6684 return bfd_reloc_continue;
6685 }
6686 else
6687 {
6688 /* GD->IE relaxation:
6689 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr R0, [x0, #:gottprel_lo12:var]
6690
6691 Where R is x for lp64 mode, and w for ILP32 mode. */
6692 insn = bfd_getl32 (contents + rel->r_offset);
6693 bfd_putl32 (ldr_R0_mask (insn), contents + rel->r_offset);
6694 return bfd_reloc_continue;
6695 }
6696
6697 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6698 if (local_exec)
6699 {
6700 /* GD->LE relaxation
6701 add x0, #:tlsgd_lo12:var => movk R0, :tprel_g0_nc:var
6702 bl __tls_get_addr => mrs x1, tpidr_el0
6703 nop => add R0, R1, R0
6704
6705 Where R is x for lp64 mode, and w for ILP32 mode. */
6706
6707 /* First kill the tls_get_addr reloc on the bl instruction. */
6708 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
6709 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6710
6711 bfd_putl32 (movk_R0, contents + rel->r_offset);
6712 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
6713 bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 8);
6714 return bfd_reloc_continue;
6715 }
6716 else
6717 {
6718 /* GD->IE relaxation
6719 ADD x0, #:tlsgd_lo12:var => ldr R0, [x0, #:gottprel_lo12:var]
6720 BL __tls_get_addr => mrs x1, tpidr_el0
6721 R_AARCH64_CALL26
6722 NOP => add R0, R1, R0
6723
6724 Where R is x for lp64 mode, and w for ilp32 mode. */
6725
6726 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
6727
6728 /* Remove the relocation on the BL instruction. */
6729 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6730
6731 /* We choose to fixup the BL and NOP instructions using the
6732 offset from the second relocation to allow flexibility in
6733 scheduling instructions between the ADD and BL. */
6734 bfd_putl32 (ldr_R0_x0, contents + rel->r_offset);
6735 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
6736 bfd_putl32 (add_R0_R0_R1, contents + rel[1].r_offset + 4);
6737 return bfd_reloc_continue;
6738 }
6739
6740 case BFD_RELOC_AARCH64_TLSDESC_ADD:
6741 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
6742 case BFD_RELOC_AARCH64_TLSDESC_CALL:
6743 /* GD->IE/LE relaxation:
6744 add x0, x0, #:tlsdesc_lo12:var => nop
6745 blr xd => nop
6746 */
6747 bfd_putl32 (INSN_NOP, contents + rel->r_offset);
6748 return bfd_reloc_ok;
6749
6750 case BFD_RELOC_AARCH64_TLSDESC_LDR:
6751 if (local_exec)
6752 {
6753 /* GD->LE relaxation:
6754 ldr xd, [gp, xn] => movk R0, #:tprel_g0_nc:var
6755
6756 Where R is x for lp64 mode, and w for ILP32 mode. */
6757 bfd_putl32 (movk_R0, contents + rel->r_offset);
6758 return bfd_reloc_continue;
6759 }
6760 else
6761 {
6762 /* GD->IE relaxation:
6763 ldr xd, [gp, xn] => ldr R0, [gp, xn]
6764
6765 Where R is x for lp64 mode, and w for ILP32 mode. */
6766 insn = bfd_getl32 (contents + rel->r_offset);
6767 bfd_putl32 (ldr_R0_mask (insn), contents + rel->r_offset);
6768 return bfd_reloc_ok;
6769 }
6770
6771 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6772 /* GD->LE relaxation:
6773 movk xd, #:tlsdesc_off_g0_nc:var => movk R0, #:tprel_g1_nc:var, lsl #16
6774 GD->IE relaxation:
6775 movk xd, #:tlsdesc_off_g0_nc:var => movk Rd, #:gottprel_g0_nc:var
6776
6777 Where R is x for lp64 mode, and w for ILP32 mode. */
6778 if (local_exec)
6779 bfd_putl32 (ldr_hw_R0, contents + rel->r_offset);
6780 return bfd_reloc_continue;
6781
6782 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6783 if (local_exec)
6784 {
6785 /* GD->LE relaxation:
6786 movz xd, #:tlsdesc_off_g1:var => movz R0, #:tprel_g2:var, lsl #32
6787
6788 Where R is x for lp64 mode, and w for ILP32 mode. */
6789 bfd_putl32 (movz_hw_R0, contents + rel->r_offset);
6790 return bfd_reloc_continue;
6791 }
6792 else
6793 {
6794 /* GD->IE relaxation:
6795 movz xd, #:tlsdesc_off_g1:var => movz Rd, #:gottprel_g1:var, lsl #16
6796
6797 Where R is x for lp64 mode, and w for ILP32 mode. */
6798 insn = bfd_getl32 (contents + rel->r_offset);
6799 bfd_putl32 (movz_R0 | (insn & 0x1f), contents + rel->r_offset);
6800 return bfd_reloc_continue;
6801 }
6802
6803 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6804 /* IE->LE relaxation:
6805 adrp xd, :gottprel:var => movz Rd, :tprel_g1:var
6806
6807 Where R is x for lp64 mode, and w for ILP32 mode. */
6808 if (local_exec)
6809 {
6810 insn = bfd_getl32 (contents + rel->r_offset);
6811 bfd_putl32 (movz_R0 | (insn & 0x1f), contents + rel->r_offset);
6812 /* We have relaxed the adrp into a mov, we may have to clear any
6813 pending erratum fixes. */
6814 clear_erratum_843419_entry (globals, rel->r_offset, input_section);
6815 }
6816 return bfd_reloc_continue;
6817
6818 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
6819 /* IE->LE relaxation:
6820 ldr xd, [xm, #:gottprel_lo12:var] => movk Rd, :tprel_g0_nc:var
6821
6822 Where R is x for lp64 mode, and w for ILP32 mode. */
6823 if (local_exec)
6824 {
6825 insn = bfd_getl32 (contents + rel->r_offset);
6826 bfd_putl32 (movk_R0 | (insn & 0x1f), contents + rel->r_offset);
6827 }
6828 return bfd_reloc_continue;
6829
6830 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6831 /* LD->LE relaxation (tiny):
6832 adr x0, :tlsldm:x => mrs x0, tpidr_el0
6833 bl __tls_get_addr => add R0, R0, TCB_SIZE
6834
6835 Where R is x for lp64 mode, and w for ilp32 mode. */
6836 if (local_exec)
6837 {
6838 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
6839 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
6840 /* No need of CALL26 relocation for tls_get_addr. */
6841 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6842 bfd_putl32 (0xd53bd040, contents + rel->r_offset + 0);
6843 bfd_putl32 (add_R0_R0 | (TCB_SIZE << 10),
6844 contents + rel->r_offset + 4);
6845 return bfd_reloc_ok;
6846 }
6847 return bfd_reloc_continue;
6848
6849 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
6850 /* LD->LE relaxation (small):
6851 adrp x0, :tlsldm:x => mrs x0, tpidr_el0
6852 */
6853 if (local_exec)
6854 {
6855 bfd_putl32 (0xd53bd040, contents + rel->r_offset);
6856 return bfd_reloc_ok;
6857 }
6858 return bfd_reloc_continue;
6859
6860 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
6861 /* LD->LE relaxation (small):
6862 add x0, #:tlsldm_lo12:x => add R0, R0, TCB_SIZE
6863 bl __tls_get_addr => nop
6864
6865 Where R is x for lp64 mode, and w for ilp32 mode. */
6866 if (local_exec)
6867 {
6868 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
6869 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
6870 /* No need of CALL26 relocation for tls_get_addr. */
6871 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6872 bfd_putl32 (add_R0_R0 | (TCB_SIZE << 10),
6873 contents + rel->r_offset + 0);
6874 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4);
6875 return bfd_reloc_ok;
6876 }
6877 return bfd_reloc_continue;
6878
6879 default:
6880 return bfd_reloc_continue;
6881 }
6882
6883 return bfd_reloc_ok;
6884 }
6885
6886 /* Relocate an AArch64 ELF section. */
6887
6888 static int
6889 elfNN_aarch64_relocate_section (bfd *output_bfd,
6890 struct bfd_link_info *info,
6891 bfd *input_bfd,
6892 asection *input_section,
6893 bfd_byte *contents,
6894 Elf_Internal_Rela *relocs,
6895 Elf_Internal_Sym *local_syms,
6896 asection **local_sections)
6897 {
6898 Elf_Internal_Shdr *symtab_hdr;
6899 struct elf_link_hash_entry **sym_hashes;
6900 Elf_Internal_Rela *rel;
6901 Elf_Internal_Rela *relend;
6902 const char *name;
6903 struct elf_aarch64_link_hash_table *globals;
6904 bool save_addend = false;
6905 bfd_vma addend = 0;
6906
6907 globals = elf_aarch64_hash_table (info);
6908
6909 symtab_hdr = &elf_symtab_hdr (input_bfd);
6910 sym_hashes = elf_sym_hashes (input_bfd);
6911
6912 rel = relocs;
6913 relend = relocs + input_section->reloc_count;
6914 for (; rel < relend; rel++)
6915 {
6916 unsigned int r_type;
6917 bfd_reloc_code_real_type bfd_r_type;
6918 bfd_reloc_code_real_type relaxed_bfd_r_type;
6919 reloc_howto_type *howto;
6920 unsigned long r_symndx;
6921 Elf_Internal_Sym *sym;
6922 asection *sec;
6923 struct elf_link_hash_entry *h;
6924 bfd_vma relocation;
6925 bfd_reloc_status_type r;
6926 arelent bfd_reloc;
6927 char sym_type;
6928 bool unresolved_reloc = false;
6929 char *error_message = NULL;
6930
6931 r_symndx = ELFNN_R_SYM (rel->r_info);
6932 r_type = ELFNN_R_TYPE (rel->r_info);
6933
6934 bfd_reloc.howto = elfNN_aarch64_howto_from_type (input_bfd, r_type);
6935 howto = bfd_reloc.howto;
6936
6937 if (howto == NULL)
6938 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
6939
6940 bfd_r_type = elfNN_aarch64_bfd_reloc_from_howto (howto);
6941
6942 h = NULL;
6943 sym = NULL;
6944 sec = NULL;
6945
6946 if (r_symndx < symtab_hdr->sh_info)
6947 {
6948 sym = local_syms + r_symndx;
6949 sym_type = ELFNN_ST_TYPE (sym->st_info);
6950 sec = local_sections[r_symndx];
6951
6952 /* An object file might have a reference to a local
6953 undefined symbol. This is a daft object file, but we
6954 should at least do something about it. */
6955 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
6956 && bfd_is_und_section (sec)
6957 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
6958 (*info->callbacks->undefined_symbol)
6959 (info, bfd_elf_string_from_elf_section
6960 (input_bfd, symtab_hdr->sh_link, sym->st_name),
6961 input_bfd, input_section, rel->r_offset, true);
6962
6963 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
6964
6965 /* Relocate against local STT_GNU_IFUNC symbol. */
6966 if (!bfd_link_relocatable (info)
6967 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
6968 {
6969 h = elfNN_aarch64_get_local_sym_hash (globals, input_bfd,
6970 rel, false);
6971 if (h == NULL)
6972 abort ();
6973
6974 /* Set STT_GNU_IFUNC symbol value. */
6975 h->root.u.def.value = sym->st_value;
6976 h->root.u.def.section = sec;
6977 }
6978 }
6979 else
6980 {
6981 bool warned, ignored;
6982
6983 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
6984 r_symndx, symtab_hdr, sym_hashes,
6985 h, sec, relocation,
6986 unresolved_reloc, warned, ignored);
6987
6988 sym_type = h->type;
6989 }
6990
6991 if (sec != NULL && discarded_section (sec))
6992 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
6993 rel, 1, relend, howto, 0, contents);
6994
6995 if (bfd_link_relocatable (info))
6996 continue;
6997
6998 if (h != NULL)
6999 name = h->root.root.string;
7000 else
7001 {
7002 name = (bfd_elf_string_from_elf_section
7003 (input_bfd, symtab_hdr->sh_link, sym->st_name));
7004 if (name == NULL || *name == '\0')
7005 name = bfd_section_name (sec);
7006 }
7007
7008 if (r_symndx != 0
7009 && r_type != R_AARCH64_NONE
7010 && r_type != R_AARCH64_NULL
7011 && (h == NULL
7012 || h->root.type == bfd_link_hash_defined
7013 || h->root.type == bfd_link_hash_defweak)
7014 && IS_AARCH64_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS))
7015 {
7016 _bfd_error_handler
7017 ((sym_type == STT_TLS
7018 /* xgettext:c-format */
7019 ? _("%pB(%pA+%#" PRIx64 "): %s used with TLS symbol %s")
7020 /* xgettext:c-format */
7021 : _("%pB(%pA+%#" PRIx64 "): %s used with non-TLS symbol %s")),
7022 input_bfd,
7023 input_section, (uint64_t) rel->r_offset, howto->name, name);
7024 }
7025
7026 /* We relax only if we can see that there can be a valid transition
7027 from a reloc type to another.
7028 We call elfNN_aarch64_final_link_relocate unless we're completely
7029 done, i.e., the relaxation produced the final output we want. */
7030
7031 relaxed_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type,
7032 h, r_symndx);
7033 if (relaxed_bfd_r_type != bfd_r_type)
7034 {
7035 bfd_r_type = relaxed_bfd_r_type;
7036 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
7037 BFD_ASSERT (howto != NULL);
7038 r_type = howto->type;
7039 r = elfNN_aarch64_tls_relax (globals, input_bfd, input_section,
7040 contents, rel, h, info);
7041 unresolved_reloc = 0;
7042 }
7043 else
7044 r = bfd_reloc_continue;
7045
7046 /* There may be multiple consecutive relocations for the
7047 same offset. In that case we are supposed to treat the
7048 output of each relocation as the addend for the next. */
7049 if (rel + 1 < relend
7050 && rel->r_offset == rel[1].r_offset
7051 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
7052 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
7053 save_addend = true;
7054 else
7055 save_addend = false;
7056
7057 if (r == bfd_reloc_continue)
7058 r = elfNN_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
7059 input_section, contents, rel,
7060 relocation, info, sec,
7061 h, &unresolved_reloc,
7062 save_addend, &addend, sym);
7063
7064 switch (elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type))
7065 {
7066 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7067 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7068 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7069 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7070 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7071 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7072 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7073 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7074 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
7075 {
7076 bool need_relocs = false;
7077 bfd_byte *loc;
7078 int indx;
7079 bfd_vma off;
7080
7081 off = symbol_got_offset (input_bfd, h, r_symndx);
7082 indx = h && h->dynindx != -1 ? h->dynindx : 0;
7083
7084 need_relocs =
7085 (!bfd_link_executable (info) || indx != 0) &&
7086 (h == NULL
7087 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7088 || h->root.type != bfd_link_hash_undefweak);
7089
7090 BFD_ASSERT (globals->root.srelgot != NULL);
7091
7092 if (need_relocs)
7093 {
7094 Elf_Internal_Rela rela;
7095 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPMOD));
7096 rela.r_addend = 0;
7097 rela.r_offset = globals->root.sgot->output_section->vma +
7098 globals->root.sgot->output_offset + off;
7099
7100
7101 loc = globals->root.srelgot->contents;
7102 loc += globals->root.srelgot->reloc_count++
7103 * RELOC_SIZE (htab);
7104 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7105
7106 bfd_reloc_code_real_type real_type =
7107 elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type);
7108
7109 if (real_type == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21
7110 || real_type == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21
7111 || real_type == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC)
7112 {
7113 /* For local dynamic, don't generate DTPREL in any case.
7114 Initialize the DTPREL slot into zero, so we get module
7115 base address when invoke runtime TLS resolver. */
7116 bfd_put_NN (output_bfd, 0,
7117 globals->root.sgot->contents + off
7118 + GOT_ENTRY_SIZE);
7119 }
7120 else if (indx == 0)
7121 {
7122 bfd_put_NN (output_bfd,
7123 relocation - dtpoff_base (info),
7124 globals->root.sgot->contents + off
7125 + GOT_ENTRY_SIZE);
7126 }
7127 else
7128 {
7129 /* This TLS symbol is global. We emit a
7130 relocation to fixup the tls offset at load
7131 time. */
7132 rela.r_info =
7133 ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPREL));
7134 rela.r_addend = 0;
7135 rela.r_offset =
7136 (globals->root.sgot->output_section->vma
7137 + globals->root.sgot->output_offset + off
7138 + GOT_ENTRY_SIZE);
7139
7140 loc = globals->root.srelgot->contents;
7141 loc += globals->root.srelgot->reloc_count++
7142 * RELOC_SIZE (globals);
7143 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7144 bfd_put_NN (output_bfd, (bfd_vma) 0,
7145 globals->root.sgot->contents + off
7146 + GOT_ENTRY_SIZE);
7147 }
7148 }
7149 else
7150 {
7151 bfd_put_NN (output_bfd, (bfd_vma) 1,
7152 globals->root.sgot->contents + off);
7153 bfd_put_NN (output_bfd,
7154 relocation - dtpoff_base (info),
7155 globals->root.sgot->contents + off
7156 + GOT_ENTRY_SIZE);
7157 }
7158
7159 symbol_got_offset_mark (input_bfd, h, r_symndx);
7160 }
7161 break;
7162
7163 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7164 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
7165 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7166 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7167 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7168 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
7169 {
7170 bool need_relocs = false;
7171 bfd_byte *loc;
7172 int indx;
7173 bfd_vma off;
7174
7175 off = symbol_got_offset (input_bfd, h, r_symndx);
7176
7177 indx = h && h->dynindx != -1 ? h->dynindx : 0;
7178
7179 need_relocs =
7180 (!bfd_link_executable (info) || indx != 0) &&
7181 (h == NULL
7182 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7183 || h->root.type != bfd_link_hash_undefweak);
7184
7185 BFD_ASSERT (globals->root.srelgot != NULL);
7186
7187 if (need_relocs)
7188 {
7189 Elf_Internal_Rela rela;
7190
7191 if (indx == 0)
7192 rela.r_addend = relocation - dtpoff_base (info);
7193 else
7194 rela.r_addend = 0;
7195
7196 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_TPREL));
7197 rela.r_offset = globals->root.sgot->output_section->vma +
7198 globals->root.sgot->output_offset + off;
7199
7200 loc = globals->root.srelgot->contents;
7201 loc += globals->root.srelgot->reloc_count++
7202 * RELOC_SIZE (htab);
7203
7204 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7205
7206 bfd_put_NN (output_bfd, rela.r_addend,
7207 globals->root.sgot->contents + off);
7208 }
7209 else
7210 bfd_put_NN (output_bfd, relocation - tpoff_base (info),
7211 globals->root.sgot->contents + off);
7212
7213 symbol_got_offset_mark (input_bfd, h, r_symndx);
7214 }
7215 break;
7216
7217 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
7218 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7219 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7220 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
7221 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7222 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7223 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7224 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
7225 {
7226 bool need_relocs = false;
7227 int indx = h && h->dynindx != -1 ? h->dynindx : 0;
7228 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
7229
7230 need_relocs = (h == NULL
7231 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7232 || h->root.type != bfd_link_hash_undefweak);
7233
7234 BFD_ASSERT (globals->root.srelgot != NULL);
7235 BFD_ASSERT (globals->root.sgot != NULL);
7236
7237 if (need_relocs)
7238 {
7239 bfd_byte *loc;
7240 Elf_Internal_Rela rela;
7241 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLSDESC));
7242
7243 rela.r_addend = 0;
7244 rela.r_offset = (globals->root.sgotplt->output_section->vma
7245 + globals->root.sgotplt->output_offset
7246 + off + globals->sgotplt_jump_table_size);
7247
7248 if (indx == 0)
7249 rela.r_addend = relocation - dtpoff_base (info);
7250
7251 /* Allocate the next available slot in the PLT reloc
7252 section to hold our R_AARCH64_TLSDESC, the next
7253 available slot is determined from reloc_count,
7254 which we step. But note, reloc_count was
7255 artifically moved down while allocating slots for
7256 real PLT relocs such that all of the PLT relocs
7257 will fit above the initial reloc_count and the
7258 extra stuff will fit below. */
7259 loc = globals->root.srelplt->contents;
7260 loc += globals->root.srelplt->reloc_count++
7261 * RELOC_SIZE (globals);
7262
7263 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7264
7265 bfd_put_NN (output_bfd, (bfd_vma) 0,
7266 globals->root.sgotplt->contents + off +
7267 globals->sgotplt_jump_table_size);
7268 bfd_put_NN (output_bfd, (bfd_vma) 0,
7269 globals->root.sgotplt->contents + off +
7270 globals->sgotplt_jump_table_size +
7271 GOT_ENTRY_SIZE);
7272 }
7273
7274 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
7275 }
7276 break;
7277 default:
7278 break;
7279 }
7280
7281 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
7282 because such sections are not SEC_ALLOC and thus ld.so will
7283 not process them. */
7284 if (unresolved_reloc
7285 && !((input_section->flags & SEC_DEBUGGING) != 0
7286 && h->def_dynamic)
7287 && _bfd_elf_section_offset (output_bfd, info, input_section,
7288 +rel->r_offset) != (bfd_vma) - 1)
7289 {
7290 _bfd_error_handler
7291 /* xgettext:c-format */
7292 (_("%pB(%pA+%#" PRIx64 "): "
7293 "unresolvable %s relocation against symbol `%s'"),
7294 input_bfd, input_section, (uint64_t) rel->r_offset, howto->name,
7295 h->root.root.string);
7296 return false;
7297 }
7298
7299 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
7300 {
7301 bfd_reloc_code_real_type real_r_type
7302 = elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type);
7303
7304 switch (r)
7305 {
7306 case bfd_reloc_overflow:
7307 (*info->callbacks->reloc_overflow)
7308 (info, (h ? &h->root : NULL), name, howto->name, (bfd_vma) 0,
7309 input_bfd, input_section, rel->r_offset);
7310 if (real_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
7311 || real_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
7312 {
7313 (*info->callbacks->warning)
7314 (info,
7315 _("too many GOT entries for -fpic, "
7316 "please recompile with -fPIC"),
7317 name, input_bfd, input_section, rel->r_offset);
7318 return false;
7319 }
7320 /* Overflow can occur when a variable is referenced with a type
7321 that has a larger alignment than the type with which it was
7322 declared. eg:
7323 file1.c: extern int foo; int a (void) { return foo; }
7324 file2.c: char bar, foo, baz;
7325 If the variable is placed into a data section at an offset
7326 that is incompatible with the larger alignment requirement
7327 overflow will occur. (Strictly speaking this is not overflow
7328 but rather an alignment problem, but the bfd_reloc_ error
7329 enum does not have a value to cover that situation).
7330
7331 Try to catch this situation here and provide a more helpful
7332 error message to the user. */
7333 if (addend & (((bfd_vma) 1 << howto->rightshift) - 1)
7334 /* FIXME: Are we testing all of the appropriate reloc
7335 types here ? */
7336 && (real_r_type == BFD_RELOC_AARCH64_LD_LO19_PCREL
7337 || real_r_type == BFD_RELOC_AARCH64_LDST16_LO12
7338 || real_r_type == BFD_RELOC_AARCH64_LDST32_LO12
7339 || real_r_type == BFD_RELOC_AARCH64_LDST64_LO12
7340 || real_r_type == BFD_RELOC_AARCH64_LDST128_LO12))
7341 {
7342 info->callbacks->warning
7343 (info, _("one possible cause of this error is that the \
7344 symbol is being referenced in the indicated code as if it had a larger \
7345 alignment than was declared where it was defined"),
7346 name, input_bfd, input_section, rel->r_offset);
7347 }
7348 break;
7349
7350 case bfd_reloc_undefined:
7351 (*info->callbacks->undefined_symbol)
7352 (info, name, input_bfd, input_section, rel->r_offset, true);
7353 break;
7354
7355 case bfd_reloc_outofrange:
7356 error_message = _("out of range");
7357 goto common_error;
7358
7359 case bfd_reloc_notsupported:
7360 error_message = _("unsupported relocation");
7361 goto common_error;
7362
7363 case bfd_reloc_dangerous:
7364 /* error_message should already be set. */
7365 goto common_error;
7366
7367 default:
7368 error_message = _("unknown error");
7369 /* Fall through. */
7370
7371 common_error:
7372 BFD_ASSERT (error_message != NULL);
7373 (*info->callbacks->reloc_dangerous)
7374 (info, error_message, input_bfd, input_section, rel->r_offset);
7375 break;
7376 }
7377 }
7378
7379 if (!save_addend)
7380 addend = 0;
7381 }
7382
7383 return true;
7384 }
7385
7386 /* Set the right machine number. */
7387
7388 static bool
7389 elfNN_aarch64_object_p (bfd *abfd)
7390 {
7391 #if ARCH_SIZE == 32
7392 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64_ilp32);
7393 #else
7394 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
7395 #endif
7396 return true;
7397 }
7398
7399 /* Function to keep AArch64 specific flags in the ELF header. */
7400
7401 static bool
7402 elfNN_aarch64_set_private_flags (bfd *abfd, flagword flags)
7403 {
7404 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
7405 {
7406 }
7407 else
7408 {
7409 elf_elfheader (abfd)->e_flags = flags;
7410 elf_flags_init (abfd) = true;
7411 }
7412
7413 return true;
7414 }
7415
7416 /* Merge backend specific data from an object file to the output
7417 object file when linking. */
7418
7419 static bool
7420 elfNN_aarch64_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info)
7421 {
7422 bfd *obfd = info->output_bfd;
7423 flagword out_flags;
7424 flagword in_flags;
7425 bool flags_compatible = true;
7426 asection *sec;
7427
7428 /* Check if we have the same endianess. */
7429 if (!_bfd_generic_verify_endian_match (ibfd, info))
7430 return false;
7431
7432 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
7433 return true;
7434
7435 /* The input BFD must have had its flags initialised. */
7436 /* The following seems bogus to me -- The flags are initialized in
7437 the assembler but I don't think an elf_flags_init field is
7438 written into the object. */
7439 /* BFD_ASSERT (elf_flags_init (ibfd)); */
7440
7441 in_flags = elf_elfheader (ibfd)->e_flags;
7442 out_flags = elf_elfheader (obfd)->e_flags;
7443
7444 if (!elf_flags_init (obfd))
7445 {
7446 /* If the input is the default architecture and had the default
7447 flags then do not bother setting the flags for the output
7448 architecture, instead allow future merges to do this. If no
7449 future merges ever set these flags then they will retain their
7450 uninitialised values, which surprise surprise, correspond
7451 to the default values. */
7452 if (bfd_get_arch_info (ibfd)->the_default
7453 && elf_elfheader (ibfd)->e_flags == 0)
7454 return true;
7455
7456 elf_flags_init (obfd) = true;
7457 elf_elfheader (obfd)->e_flags = in_flags;
7458
7459 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
7460 && bfd_get_arch_info (obfd)->the_default)
7461 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
7462 bfd_get_mach (ibfd));
7463
7464 return true;
7465 }
7466
7467 /* Identical flags must be compatible. */
7468 if (in_flags == out_flags)
7469 return true;
7470
7471 /* Check to see if the input BFD actually contains any sections. If
7472 not, its flags may not have been initialised either, but it
7473 cannot actually cause any incompatiblity. Do not short-circuit
7474 dynamic objects; their section list may be emptied by
7475 elf_link_add_object_symbols.
7476
7477 Also check to see if there are no code sections in the input.
7478 In this case there is no need to check for code specific flags.
7479 XXX - do we need to worry about floating-point format compatability
7480 in data sections ? */
7481 if (!(ibfd->flags & DYNAMIC))
7482 {
7483 bool null_input_bfd = true;
7484 bool only_data_sections = true;
7485
7486 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
7487 {
7488 if ((bfd_section_flags (sec)
7489 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
7490 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
7491 only_data_sections = false;
7492
7493 null_input_bfd = false;
7494 break;
7495 }
7496
7497 if (null_input_bfd || only_data_sections)
7498 return true;
7499 }
7500
7501 return flags_compatible;
7502 }
7503
7504 /* Display the flags field. */
7505
7506 static bool
7507 elfNN_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
7508 {
7509 FILE *file = (FILE *) ptr;
7510 unsigned long flags;
7511
7512 BFD_ASSERT (abfd != NULL && ptr != NULL);
7513
7514 /* Print normal ELF private data. */
7515 _bfd_elf_print_private_bfd_data (abfd, ptr);
7516
7517 flags = elf_elfheader (abfd)->e_flags;
7518 /* Ignore init flag - it may not be set, despite the flags field
7519 containing valid data. */
7520
7521 /* xgettext:c-format */
7522 fprintf (file, _("private flags = 0x%lx:"), elf_elfheader (abfd)->e_flags);
7523
7524 if (flags)
7525 fprintf (file, _(" <Unrecognised flag bits set>"));
7526
7527 fputc ('\n', file);
7528
7529 return true;
7530 }
7531
7532 /* Return true if we need copy relocation against EH. */
7533
7534 static bool
7535 need_copy_relocation_p (struct elf_aarch64_link_hash_entry *eh)
7536 {
7537 struct elf_dyn_relocs *p;
7538 asection *s;
7539
7540 for (p = eh->root.dyn_relocs; p != NULL; p = p->next)
7541 {
7542 /* If there is any pc-relative reference, we need to keep copy relocation
7543 to avoid propagating the relocation into runtime that current glibc
7544 does not support. */
7545 if (p->pc_count)
7546 return true;
7547
7548 s = p->sec->output_section;
7549 /* Need copy relocation if it's against read-only section. */
7550 if (s != NULL && (s->flags & SEC_READONLY) != 0)
7551 return true;
7552 }
7553
7554 return false;
7555 }
7556
7557 /* Adjust a symbol defined by a dynamic object and referenced by a
7558 regular object. The current definition is in some section of the
7559 dynamic object, but we're not including those sections. We have to
7560 change the definition to something the rest of the link can
7561 understand. */
7562
7563 static bool
7564 elfNN_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
7565 struct elf_link_hash_entry *h)
7566 {
7567 struct elf_aarch64_link_hash_table *htab;
7568 asection *s, *srel;
7569
7570 /* If this is a function, put it in the procedure linkage table. We
7571 will fill in the contents of the procedure linkage table later,
7572 when we know the address of the .got section. */
7573 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
7574 {
7575 if (h->plt.refcount <= 0
7576 || (h->type != STT_GNU_IFUNC
7577 && (SYMBOL_CALLS_LOCAL (info, h)
7578 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
7579 && h->root.type == bfd_link_hash_undefweak))))
7580 {
7581 /* This case can occur if we saw a CALL26 reloc in
7582 an input file, but the symbol wasn't referred to
7583 by a dynamic object or all references were
7584 garbage collected. In which case we can end up
7585 resolving. */
7586 h->plt.offset = (bfd_vma) - 1;
7587 h->needs_plt = 0;
7588 }
7589
7590 return true;
7591 }
7592 else
7593 /* Otherwise, reset to -1. */
7594 h->plt.offset = (bfd_vma) - 1;
7595
7596
7597 /* If this is a weak symbol, and there is a real definition, the
7598 processor independent code will have arranged for us to see the
7599 real definition first, and we can just use the same value. */
7600 if (h->is_weakalias)
7601 {
7602 struct elf_link_hash_entry *def = weakdef (h);
7603 BFD_ASSERT (def->root.type == bfd_link_hash_defined);
7604 h->root.u.def.section = def->root.u.def.section;
7605 h->root.u.def.value = def->root.u.def.value;
7606 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
7607 h->non_got_ref = def->non_got_ref;
7608 return true;
7609 }
7610
7611 /* If we are creating a shared library, we must presume that the
7612 only references to the symbol are via the global offset table.
7613 For such cases we need not do anything here; the relocations will
7614 be handled correctly by relocate_section. */
7615 if (bfd_link_pic (info))
7616 return true;
7617
7618 /* If there are no references to this symbol that do not use the
7619 GOT, we don't need to generate a copy reloc. */
7620 if (!h->non_got_ref)
7621 return true;
7622
7623 /* If -z nocopyreloc was given, we won't generate them either. */
7624 if (info->nocopyreloc)
7625 {
7626 h->non_got_ref = 0;
7627 return true;
7628 }
7629
7630 if (ELIMINATE_COPY_RELOCS)
7631 {
7632 struct elf_aarch64_link_hash_entry *eh;
7633 /* If we don't find any dynamic relocs in read-only sections, then
7634 we'll be keeping the dynamic relocs and avoiding the copy reloc. */
7635 eh = (struct elf_aarch64_link_hash_entry *) h;
7636 if (!need_copy_relocation_p (eh))
7637 {
7638 h->non_got_ref = 0;
7639 return true;
7640 }
7641 }
7642
7643 /* We must allocate the symbol in our .dynbss section, which will
7644 become part of the .bss section of the executable. There will be
7645 an entry for this symbol in the .dynsym section. The dynamic
7646 object will contain position independent code, so all references
7647 from the dynamic object to this symbol will go through the global
7648 offset table. The dynamic linker will use the .dynsym entry to
7649 determine the address it must put in the global offset table, so
7650 both the dynamic object and the regular object will refer to the
7651 same memory location for the variable. */
7652
7653 htab = elf_aarch64_hash_table (info);
7654
7655 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
7656 to copy the initial value out of the dynamic object and into the
7657 runtime process image. */
7658 if ((h->root.u.def.section->flags & SEC_READONLY) != 0)
7659 {
7660 s = htab->root.sdynrelro;
7661 srel = htab->root.sreldynrelro;
7662 }
7663 else
7664 {
7665 s = htab->root.sdynbss;
7666 srel = htab->root.srelbss;
7667 }
7668 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
7669 {
7670 srel->size += RELOC_SIZE (htab);
7671 h->needs_copy = 1;
7672 }
7673
7674 return _bfd_elf_adjust_dynamic_copy (info, h, s);
7675
7676 }
7677
7678 static bool
7679 elfNN_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
7680 {
7681 struct elf_aarch64_local_symbol *locals;
7682 locals = elf_aarch64_locals (abfd);
7683 if (locals == NULL)
7684 {
7685 locals = (struct elf_aarch64_local_symbol *)
7686 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
7687 if (locals == NULL)
7688 return false;
7689 elf_aarch64_locals (abfd) = locals;
7690 }
7691 return true;
7692 }
7693
7694 /* Create the .got section to hold the global offset table. */
7695
7696 static bool
7697 aarch64_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
7698 {
7699 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
7700 flagword flags;
7701 asection *s;
7702 struct elf_link_hash_entry *h;
7703 struct elf_link_hash_table *htab = elf_hash_table (info);
7704
7705 /* This function may be called more than once. */
7706 if (htab->sgot != NULL)
7707 return true;
7708
7709 flags = bed->dynamic_sec_flags;
7710
7711 s = bfd_make_section_anyway_with_flags (abfd,
7712 (bed->rela_plts_and_copies_p
7713 ? ".rela.got" : ".rel.got"),
7714 (bed->dynamic_sec_flags
7715 | SEC_READONLY));
7716 if (s == NULL
7717 || !bfd_set_section_alignment (s, bed->s->log_file_align))
7718 return false;
7719 htab->srelgot = s;
7720
7721 s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
7722 if (s == NULL
7723 || !bfd_set_section_alignment (s, bed->s->log_file_align))
7724 return false;
7725 htab->sgot = s;
7726 htab->sgot->size += GOT_ENTRY_SIZE;
7727
7728 if (bed->want_got_sym)
7729 {
7730 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
7731 (or .got.plt) section. We don't do this in the linker script
7732 because we don't want to define the symbol if we are not creating
7733 a global offset table. */
7734 h = _bfd_elf_define_linkage_sym (abfd, info, s,
7735 "_GLOBAL_OFFSET_TABLE_");
7736 elf_hash_table (info)->hgot = h;
7737 if (h == NULL)
7738 return false;
7739 }
7740
7741 if (bed->want_got_plt)
7742 {
7743 s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
7744 if (s == NULL
7745 || !bfd_set_section_alignment (s, bed->s->log_file_align))
7746 return false;
7747 htab->sgotplt = s;
7748 }
7749
7750 /* The first bit of the global offset table is the header. */
7751 s->size += bed->got_header_size;
7752
7753 return true;
7754 }
7755
7756 /* Look through the relocs for a section during the first phase. */
7757
7758 static bool
7759 elfNN_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
7760 asection *sec, const Elf_Internal_Rela *relocs)
7761 {
7762 Elf_Internal_Shdr *symtab_hdr;
7763 struct elf_link_hash_entry **sym_hashes;
7764 const Elf_Internal_Rela *rel;
7765 const Elf_Internal_Rela *rel_end;
7766 asection *sreloc;
7767
7768 struct elf_aarch64_link_hash_table *htab;
7769
7770 if (bfd_link_relocatable (info))
7771 return true;
7772
7773 BFD_ASSERT (is_aarch64_elf (abfd));
7774
7775 htab = elf_aarch64_hash_table (info);
7776 sreloc = NULL;
7777
7778 symtab_hdr = &elf_symtab_hdr (abfd);
7779 sym_hashes = elf_sym_hashes (abfd);
7780
7781 rel_end = relocs + sec->reloc_count;
7782 for (rel = relocs; rel < rel_end; rel++)
7783 {
7784 struct elf_link_hash_entry *h;
7785 unsigned int r_symndx;
7786 unsigned int r_type;
7787 bfd_reloc_code_real_type bfd_r_type;
7788 Elf_Internal_Sym *isym;
7789
7790 r_symndx = ELFNN_R_SYM (rel->r_info);
7791 r_type = ELFNN_R_TYPE (rel->r_info);
7792
7793 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
7794 {
7795 /* xgettext:c-format */
7796 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd, r_symndx);
7797 return false;
7798 }
7799
7800 if (r_symndx < symtab_hdr->sh_info)
7801 {
7802 /* A local symbol. */
7803 isym = bfd_sym_from_r_symndx (&htab->root.sym_cache,
7804 abfd, r_symndx);
7805 if (isym == NULL)
7806 return false;
7807
7808 /* Check relocation against local STT_GNU_IFUNC symbol. */
7809 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
7810 {
7811 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel,
7812 true);
7813 if (h == NULL)
7814 return false;
7815
7816 /* Fake a STT_GNU_IFUNC symbol. */
7817 h->type = STT_GNU_IFUNC;
7818 h->def_regular = 1;
7819 h->ref_regular = 1;
7820 h->forced_local = 1;
7821 h->root.type = bfd_link_hash_defined;
7822 }
7823 else
7824 h = NULL;
7825 }
7826 else
7827 {
7828 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
7829 while (h->root.type == bfd_link_hash_indirect
7830 || h->root.type == bfd_link_hash_warning)
7831 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7832 }
7833
7834 /* Could be done earlier, if h were already available. */
7835 bfd_r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
7836
7837 if (h != NULL)
7838 {
7839 /* If a relocation refers to _GLOBAL_OFFSET_TABLE_, create the .got.
7840 This shows up in particular in an R_AARCH64_PREL64 in large model
7841 when calculating the pc-relative address to .got section which is
7842 used to initialize the gp register. */
7843 if (h->root.root.string
7844 && strcmp (h->root.root.string, "_GLOBAL_OFFSET_TABLE_") == 0)
7845 {
7846 if (htab->root.dynobj == NULL)
7847 htab->root.dynobj = abfd;
7848
7849 if (! aarch64_elf_create_got_section (htab->root.dynobj, info))
7850 return false;
7851
7852 BFD_ASSERT (h == htab->root.hgot);
7853 }
7854
7855 /* Create the ifunc sections for static executables. If we
7856 never see an indirect function symbol nor we are building
7857 a static executable, those sections will be empty and
7858 won't appear in output. */
7859 switch (bfd_r_type)
7860 {
7861 default:
7862 break;
7863
7864 case BFD_RELOC_AARCH64_ADD_LO12:
7865 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7866 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7867 case BFD_RELOC_AARCH64_CALL26:
7868 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7869 case BFD_RELOC_AARCH64_JUMP26:
7870 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7871 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7872 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7873 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7874 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7875 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
7876 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7877 case BFD_RELOC_AARCH64_NN:
7878 if (htab->root.dynobj == NULL)
7879 htab->root.dynobj = abfd;
7880 if (!_bfd_elf_create_ifunc_sections (htab->root.dynobj, info))
7881 return false;
7882 break;
7883 }
7884
7885 /* It is referenced by a non-shared object. */
7886 h->ref_regular = 1;
7887 }
7888
7889 switch (bfd_r_type)
7890 {
7891 case BFD_RELOC_AARCH64_16:
7892 #if ARCH_SIZE == 64
7893 case BFD_RELOC_AARCH64_32:
7894 #endif
7895 if (bfd_link_pic (info) && (sec->flags & SEC_ALLOC) != 0)
7896 {
7897 if (h != NULL
7898 /* This is an absolute symbol. It represents a value instead
7899 of an address. */
7900 && (bfd_is_abs_symbol (&h->root)
7901 /* This is an undefined symbol. */
7902 || h->root.type == bfd_link_hash_undefined))
7903 break;
7904
7905 /* For local symbols, defined global symbols in a non-ABS section,
7906 it is assumed that the value is an address. */
7907 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
7908 _bfd_error_handler
7909 /* xgettext:c-format */
7910 (_("%pB: relocation %s against `%s' can not be used when making "
7911 "a shared object"),
7912 abfd, elfNN_aarch64_howto_table[howto_index].name,
7913 (h) ? h->root.root.string : "a local symbol");
7914 bfd_set_error (bfd_error_bad_value);
7915 return false;
7916 }
7917 else
7918 break;
7919
7920 case BFD_RELOC_AARCH64_MOVW_G0_NC:
7921 case BFD_RELOC_AARCH64_MOVW_G1_NC:
7922 case BFD_RELOC_AARCH64_MOVW_G2_NC:
7923 case BFD_RELOC_AARCH64_MOVW_G3:
7924 if (bfd_link_pic (info))
7925 {
7926 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
7927 _bfd_error_handler
7928 /* xgettext:c-format */
7929 (_("%pB: relocation %s against `%s' can not be used when making "
7930 "a shared object; recompile with -fPIC"),
7931 abfd, elfNN_aarch64_howto_table[howto_index].name,
7932 (h) ? h->root.root.string : "a local symbol");
7933 bfd_set_error (bfd_error_bad_value);
7934 return false;
7935 }
7936 /* Fall through. */
7937
7938 case BFD_RELOC_AARCH64_16_PCREL:
7939 case BFD_RELOC_AARCH64_32_PCREL:
7940 case BFD_RELOC_AARCH64_64_PCREL:
7941 case BFD_RELOC_AARCH64_ADD_LO12:
7942 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7943 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7944 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
7945 case BFD_RELOC_AARCH64_LDST128_LO12:
7946 case BFD_RELOC_AARCH64_LDST16_LO12:
7947 case BFD_RELOC_AARCH64_LDST32_LO12:
7948 case BFD_RELOC_AARCH64_LDST64_LO12:
7949 case BFD_RELOC_AARCH64_LDST8_LO12:
7950 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
7951 if (h == NULL || bfd_link_pic (info))
7952 break;
7953 /* Fall through. */
7954
7955 case BFD_RELOC_AARCH64_NN:
7956
7957 /* We don't need to handle relocs into sections not going into
7958 the "real" output. */
7959 if ((sec->flags & SEC_ALLOC) == 0)
7960 break;
7961
7962 if (h != NULL)
7963 {
7964 if (!bfd_link_pic (info))
7965 h->non_got_ref = 1;
7966
7967 h->plt.refcount += 1;
7968 h->pointer_equality_needed = 1;
7969 }
7970
7971 /* No need to do anything if we're not creating a shared
7972 object. */
7973 if (!(bfd_link_pic (info)
7974 /* If on the other hand, we are creating an executable, we
7975 may need to keep relocations for symbols satisfied by a
7976 dynamic library if we manage to avoid copy relocs for the
7977 symbol.
7978
7979 NOTE: Currently, there is no support of copy relocs
7980 elimination on pc-relative relocation types, because there is
7981 no dynamic relocation support for them in glibc. We still
7982 record the dynamic symbol reference for them. This is
7983 because one symbol may be referenced by both absolute
7984 relocation (for example, BFD_RELOC_AARCH64_NN) and
7985 pc-relative relocation. We need full symbol reference
7986 information to make correct decision later in
7987 elfNN_aarch64_adjust_dynamic_symbol. */
7988 || (ELIMINATE_COPY_RELOCS
7989 && !bfd_link_pic (info)
7990 && h != NULL
7991 && (h->root.type == bfd_link_hash_defweak
7992 || !h->def_regular))))
7993 break;
7994
7995 {
7996 struct elf_dyn_relocs *p;
7997 struct elf_dyn_relocs **head;
7998 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
7999
8000 /* We must copy these reloc types into the output file.
8001 Create a reloc section in dynobj and make room for
8002 this reloc. */
8003 if (sreloc == NULL)
8004 {
8005 if (htab->root.dynobj == NULL)
8006 htab->root.dynobj = abfd;
8007
8008 sreloc = _bfd_elf_make_dynamic_reloc_section
8009 (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ true);
8010
8011 if (sreloc == NULL)
8012 return false;
8013 }
8014
8015 /* If this is a global symbol, we count the number of
8016 relocations we need for this symbol. */
8017 if (h != NULL)
8018 {
8019 head = &h->dyn_relocs;
8020 }
8021 else
8022 {
8023 /* Track dynamic relocs needed for local syms too.
8024 We really need local syms available to do this
8025 easily. Oh well. */
8026
8027 asection *s;
8028 void **vpp;
8029
8030 isym = bfd_sym_from_r_symndx (&htab->root.sym_cache,
8031 abfd, r_symndx);
8032 if (isym == NULL)
8033 return false;
8034
8035 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
8036 if (s == NULL)
8037 s = sec;
8038
8039 /* Beware of type punned pointers vs strict aliasing
8040 rules. */
8041 vpp = &(elf_section_data (s)->local_dynrel);
8042 head = (struct elf_dyn_relocs **) vpp;
8043 }
8044
8045 p = *head;
8046 if (p == NULL || p->sec != sec)
8047 {
8048 size_t amt = sizeof *p;
8049 p = ((struct elf_dyn_relocs *)
8050 bfd_zalloc (htab->root.dynobj, amt));
8051 if (p == NULL)
8052 return false;
8053 p->next = *head;
8054 *head = p;
8055 p->sec = sec;
8056 }
8057
8058 p->count += 1;
8059
8060 if (elfNN_aarch64_howto_table[howto_index].pc_relative)
8061 p->pc_count += 1;
8062 }
8063 break;
8064
8065 /* RR: We probably want to keep a consistency check that
8066 there are no dangling GOT_PAGE relocs. */
8067 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8068 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8069 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8070 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8071 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8072 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8073 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8074 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
8075 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8076 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8077 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8078 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8079 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8080 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8081 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8082 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8083 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8084 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8085 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8086 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8087 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8088 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8089 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8090 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8091 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8092 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8093 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8094 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8095 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8096 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8097 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8098 {
8099 unsigned got_type;
8100 unsigned old_got_type;
8101
8102 got_type = aarch64_reloc_got_type (bfd_r_type);
8103
8104 if (h)
8105 {
8106 h->got.refcount += 1;
8107 old_got_type = elf_aarch64_hash_entry (h)->got_type;
8108 }
8109 else
8110 {
8111 struct elf_aarch64_local_symbol *locals;
8112
8113 if (!elfNN_aarch64_allocate_local_symbols
8114 (abfd, symtab_hdr->sh_info))
8115 return false;
8116
8117 locals = elf_aarch64_locals (abfd);
8118 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
8119 locals[r_symndx].got_refcount += 1;
8120 old_got_type = locals[r_symndx].got_type;
8121 }
8122
8123 /* If a variable is accessed with both general dynamic TLS
8124 methods, two slots may be created. */
8125 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
8126 got_type |= old_got_type;
8127
8128 /* We will already have issued an error message if there
8129 is a TLS/non-TLS mismatch, based on the symbol type.
8130 So just combine any TLS types needed. */
8131 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
8132 && got_type != GOT_NORMAL)
8133 got_type |= old_got_type;
8134
8135 /* If the symbol is accessed by both IE and GD methods, we
8136 are able to relax. Turn off the GD flag, without
8137 messing up with any other kind of TLS types that may be
8138 involved. */
8139 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
8140 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
8141
8142 if (old_got_type != got_type)
8143 {
8144 if (h != NULL)
8145 elf_aarch64_hash_entry (h)->got_type = got_type;
8146 else
8147 {
8148 struct elf_aarch64_local_symbol *locals;
8149 locals = elf_aarch64_locals (abfd);
8150 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
8151 locals[r_symndx].got_type = got_type;
8152 }
8153 }
8154
8155 if (htab->root.dynobj == NULL)
8156 htab->root.dynobj = abfd;
8157 if (! aarch64_elf_create_got_section (htab->root.dynobj, info))
8158 return false;
8159 break;
8160 }
8161
8162 case BFD_RELOC_AARCH64_CALL26:
8163 case BFD_RELOC_AARCH64_JUMP26:
8164 /* If this is a local symbol then we resolve it
8165 directly without creating a PLT entry. */
8166 if (h == NULL)
8167 continue;
8168
8169 h->needs_plt = 1;
8170 if (h->plt.refcount <= 0)
8171 h->plt.refcount = 1;
8172 else
8173 h->plt.refcount += 1;
8174 break;
8175
8176 default:
8177 break;
8178 }
8179 }
8180
8181 return true;
8182 }
8183
8184 /* Treat mapping symbols as special target symbols. */
8185
8186 static bool
8187 elfNN_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
8188 asymbol *sym)
8189 {
8190 return bfd_is_aarch64_special_symbol_name (sym->name,
8191 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
8192 }
8193
8194 /* If the ELF symbol SYM might be a function in SEC, return the
8195 function size and set *CODE_OFF to the function's entry point,
8196 otherwise return zero. */
8197
8198 static bfd_size_type
8199 elfNN_aarch64_maybe_function_sym (const asymbol *sym, asection *sec,
8200 bfd_vma *code_off)
8201 {
8202 bfd_size_type size;
8203 elf_symbol_type * elf_sym = (elf_symbol_type *) sym;
8204
8205 if ((sym->flags & (BSF_SECTION_SYM | BSF_FILE | BSF_OBJECT
8206 | BSF_THREAD_LOCAL | BSF_RELC | BSF_SRELC)) != 0
8207 || sym->section != sec)
8208 return 0;
8209
8210 size = (sym->flags & BSF_SYNTHETIC) ? 0 : elf_sym->internal_elf_sym.st_size;
8211
8212 if (!(sym->flags & BSF_SYNTHETIC))
8213 switch (ELF_ST_TYPE (elf_sym->internal_elf_sym.st_info))
8214 {
8215 case STT_NOTYPE:
8216 /* Ignore symbols created by the annobin plugin for gcc and clang.
8217 These symbols are hidden, local, notype and have a size of 0. */
8218 if (size == 0
8219 && sym->flags & BSF_LOCAL
8220 && ELF_ST_VISIBILITY (elf_sym->internal_elf_sym.st_other) == STV_HIDDEN)
8221 return 0;
8222 /* Fall through. */
8223 case STT_FUNC:
8224 /* FIXME: Allow STT_GNU_IFUNC as well ? */
8225 break;
8226 default:
8227 return 0;
8228 }
8229
8230 if ((sym->flags & BSF_LOCAL)
8231 && bfd_is_aarch64_special_symbol_name (sym->name,
8232 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY))
8233 return 0;
8234
8235 *code_off = sym->value;
8236
8237 /* Do not return 0 for the function's size. */
8238 return size ? size : 1;
8239 }
8240
8241 static bool
8242 elfNN_aarch64_find_inliner_info (bfd *abfd,
8243 const char **filename_ptr,
8244 const char **functionname_ptr,
8245 unsigned int *line_ptr)
8246 {
8247 bool found;
8248 found = _bfd_dwarf2_find_inliner_info
8249 (abfd, filename_ptr,
8250 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
8251 return found;
8252 }
8253
8254
8255 static bool
8256 elfNN_aarch64_init_file_header (bfd *abfd, struct bfd_link_info *link_info)
8257 {
8258 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
8259
8260 if (!_bfd_elf_init_file_header (abfd, link_info))
8261 return false;
8262
8263 i_ehdrp = elf_elfheader (abfd);
8264 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
8265 return true;
8266 }
8267
8268 static enum elf_reloc_type_class
8269 elfNN_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
8270 const asection *rel_sec ATTRIBUTE_UNUSED,
8271 const Elf_Internal_Rela *rela)
8272 {
8273 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
8274
8275 if (htab->root.dynsym != NULL
8276 && htab->root.dynsym->contents != NULL)
8277 {
8278 /* Check relocation against STT_GNU_IFUNC symbol if there are
8279 dynamic symbols. */
8280 bfd *abfd = info->output_bfd;
8281 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
8282 unsigned long r_symndx = ELFNN_R_SYM (rela->r_info);
8283 if (r_symndx != STN_UNDEF)
8284 {
8285 Elf_Internal_Sym sym;
8286 if (!bed->s->swap_symbol_in (abfd,
8287 (htab->root.dynsym->contents
8288 + r_symndx * bed->s->sizeof_sym),
8289 0, &sym))
8290 {
8291 /* xgettext:c-format */
8292 _bfd_error_handler (_("%pB symbol number %lu references"
8293 " nonexistent SHT_SYMTAB_SHNDX section"),
8294 abfd, r_symndx);
8295 /* Ideally an error class should be returned here. */
8296 }
8297 else if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
8298 return reloc_class_ifunc;
8299 }
8300 }
8301
8302 switch ((int) ELFNN_R_TYPE (rela->r_info))
8303 {
8304 case AARCH64_R (IRELATIVE):
8305 return reloc_class_ifunc;
8306 case AARCH64_R (RELATIVE):
8307 return reloc_class_relative;
8308 case AARCH64_R (JUMP_SLOT):
8309 return reloc_class_plt;
8310 case AARCH64_R (COPY):
8311 return reloc_class_copy;
8312 default:
8313 return reloc_class_normal;
8314 }
8315 }
8316
8317 /* Handle an AArch64 specific section when reading an object file. This is
8318 called when bfd_section_from_shdr finds a section with an unknown
8319 type. */
8320
8321 static bool
8322 elfNN_aarch64_section_from_shdr (bfd *abfd,
8323 Elf_Internal_Shdr *hdr,
8324 const char *name, int shindex)
8325 {
8326 /* There ought to be a place to keep ELF backend specific flags, but
8327 at the moment there isn't one. We just keep track of the
8328 sections by their name, instead. Fortunately, the ABI gives
8329 names for all the AArch64 specific sections, so we will probably get
8330 away with this. */
8331 switch (hdr->sh_type)
8332 {
8333 case SHT_AARCH64_ATTRIBUTES:
8334 break;
8335
8336 default:
8337 return false;
8338 }
8339
8340 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
8341 return false;
8342
8343 return true;
8344 }
8345
8346 /* Process any AArch64-specific program segment types. */
8347
8348 static bool
8349 elfNN_aarch64_section_from_phdr (bfd *abfd ATTRIBUTE_UNUSED,
8350 Elf_Internal_Phdr *hdr,
8351 int hdr_index ATTRIBUTE_UNUSED,
8352 const char *name ATTRIBUTE_UNUSED)
8353 {
8354 /* Right now we only handle the PT_AARCH64_MEMTAG_MTE segment type. */
8355 if (hdr == NULL || hdr->p_type != PT_AARCH64_MEMTAG_MTE)
8356 return false;
8357
8358 if (hdr->p_filesz > 0)
8359 {
8360 /* Sections created from memory tag p_type's are always named
8361 "memtag". This makes it easier for tools (for example, GDB)
8362 to find them. */
8363 asection *newsect = bfd_make_section_anyway (abfd, "memtag");
8364
8365 if (newsect == NULL)
8366 return false;
8367
8368 unsigned int opb = bfd_octets_per_byte (abfd, NULL);
8369
8370 /* p_vaddr holds the original start address of the tagged memory
8371 range. */
8372 newsect->vma = hdr->p_vaddr / opb;
8373
8374 /* p_filesz holds the storage size of the packed tags. */
8375 newsect->size = hdr->p_filesz;
8376 newsect->filepos = hdr->p_offset;
8377
8378 /* p_memsz holds the size of the memory range that contains tags. The
8379 section's rawsize field is reused for this purpose. */
8380 newsect->rawsize = hdr->p_memsz;
8381
8382 /* Make sure the section's flags has SEC_HAS_CONTENTS set, otherwise
8383 BFD will return all zeroes when attempting to get contents from this
8384 section. */
8385 newsect->flags |= SEC_HAS_CONTENTS;
8386 }
8387
8388 return true;
8389 }
8390
8391 /* Implements the bfd_elf_modify_headers hook for aarch64. */
8392
8393 static bool
8394 elfNN_aarch64_modify_headers (bfd *abfd,
8395 struct bfd_link_info *info)
8396 {
8397 struct elf_segment_map *m;
8398 unsigned int segment_count = 0;
8399 Elf_Internal_Phdr *p;
8400
8401 for (m = elf_seg_map (abfd); m != NULL; m = m->next, segment_count++)
8402 {
8403 /* We are only interested in the memory tag segment that will be dumped
8404 to a core file. If we have no memory tags or this isn't a core file we
8405 are dealing with, just skip this segment. */
8406 if (m->p_type != PT_AARCH64_MEMTAG_MTE
8407 || bfd_get_format (abfd) != bfd_core)
8408 continue;
8409
8410 /* For memory tag segments in core files, the size of the file contents
8411 is smaller than the size of the memory range. Adjust the memory size
8412 accordingly. The real memory size is held in the section's rawsize
8413 field. */
8414 if (m->count > 0)
8415 {
8416 p = elf_tdata (abfd)->phdr;
8417 p += m->idx;
8418 p->p_memsz = m->sections[0]->rawsize;
8419 p->p_flags = 0;
8420 p->p_paddr = 0;
8421 p->p_align = 0;
8422 }
8423 }
8424
8425 /* Give the generic code a chance to handle the headers. */
8426 return _bfd_elf_modify_headers (abfd, info);
8427 }
8428
8429 /* A structure used to record a list of sections, independently
8430 of the next and prev fields in the asection structure. */
8431 typedef struct section_list
8432 {
8433 asection *sec;
8434 struct section_list *next;
8435 struct section_list *prev;
8436 }
8437 section_list;
8438
8439 /* Unfortunately we need to keep a list of sections for which
8440 an _aarch64_elf_section_data structure has been allocated. This
8441 is because it is possible for functions like elfNN_aarch64_write_section
8442 to be called on a section which has had an elf_data_structure
8443 allocated for it (and so the used_by_bfd field is valid) but
8444 for which the AArch64 extended version of this structure - the
8445 _aarch64_elf_section_data structure - has not been allocated. */
8446 static section_list *sections_with_aarch64_elf_section_data = NULL;
8447
8448 static void
8449 record_section_with_aarch64_elf_section_data (asection *sec)
8450 {
8451 struct section_list *entry;
8452
8453 entry = bfd_malloc (sizeof (*entry));
8454 if (entry == NULL)
8455 return;
8456 entry->sec = sec;
8457 entry->next = sections_with_aarch64_elf_section_data;
8458 entry->prev = NULL;
8459 if (entry->next != NULL)
8460 entry->next->prev = entry;
8461 sections_with_aarch64_elf_section_data = entry;
8462 }
8463
8464 static struct section_list *
8465 find_aarch64_elf_section_entry (asection *sec)
8466 {
8467 struct section_list *entry;
8468 static struct section_list *last_entry = NULL;
8469
8470 /* This is a short cut for the typical case where the sections are added
8471 to the sections_with_aarch64_elf_section_data list in forward order and
8472 then looked up here in backwards order. This makes a real difference
8473 to the ld-srec/sec64k.exp linker test. */
8474 entry = sections_with_aarch64_elf_section_data;
8475 if (last_entry != NULL)
8476 {
8477 if (last_entry->sec == sec)
8478 entry = last_entry;
8479 else if (last_entry->next != NULL && last_entry->next->sec == sec)
8480 entry = last_entry->next;
8481 }
8482
8483 for (; entry; entry = entry->next)
8484 if (entry->sec == sec)
8485 break;
8486
8487 if (entry)
8488 /* Record the entry prior to this one - it is the entry we are
8489 most likely to want to locate next time. Also this way if we
8490 have been called from
8491 unrecord_section_with_aarch64_elf_section_data () we will not
8492 be caching a pointer that is about to be freed. */
8493 last_entry = entry->prev;
8494
8495 return entry;
8496 }
8497
8498 static void
8499 unrecord_section_with_aarch64_elf_section_data (asection *sec)
8500 {
8501 struct section_list *entry;
8502
8503 entry = find_aarch64_elf_section_entry (sec);
8504
8505 if (entry)
8506 {
8507 if (entry->prev != NULL)
8508 entry->prev->next = entry->next;
8509 if (entry->next != NULL)
8510 entry->next->prev = entry->prev;
8511 if (entry == sections_with_aarch64_elf_section_data)
8512 sections_with_aarch64_elf_section_data = entry->next;
8513 free (entry);
8514 }
8515 }
8516
8517
8518 typedef struct
8519 {
8520 void *finfo;
8521 struct bfd_link_info *info;
8522 asection *sec;
8523 int sec_shndx;
8524 int (*func) (void *, const char *, Elf_Internal_Sym *,
8525 asection *, struct elf_link_hash_entry *);
8526 } output_arch_syminfo;
8527
8528 enum map_symbol_type
8529 {
8530 AARCH64_MAP_INSN,
8531 AARCH64_MAP_DATA
8532 };
8533
8534
8535 /* Output a single mapping symbol. */
8536
8537 static bool
8538 elfNN_aarch64_output_map_sym (output_arch_syminfo *osi,
8539 enum map_symbol_type type, bfd_vma offset)
8540 {
8541 static const char *names[2] = { "$x", "$d" };
8542 Elf_Internal_Sym sym;
8543
8544 sym.st_value = (osi->sec->output_section->vma
8545 + osi->sec->output_offset + offset);
8546 sym.st_size = 0;
8547 sym.st_other = 0;
8548 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
8549 sym.st_shndx = osi->sec_shndx;
8550 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
8551 }
8552
8553 /* Output a single local symbol for a generated stub. */
8554
8555 static bool
8556 elfNN_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
8557 bfd_vma offset, bfd_vma size)
8558 {
8559 Elf_Internal_Sym sym;
8560
8561 sym.st_value = (osi->sec->output_section->vma
8562 + osi->sec->output_offset + offset);
8563 sym.st_size = size;
8564 sym.st_other = 0;
8565 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
8566 sym.st_shndx = osi->sec_shndx;
8567 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
8568 }
8569
8570 static bool
8571 aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
8572 {
8573 struct elf_aarch64_stub_hash_entry *stub_entry;
8574 asection *stub_sec;
8575 bfd_vma addr;
8576 char *stub_name;
8577 output_arch_syminfo *osi;
8578
8579 /* Massage our args to the form they really have. */
8580 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
8581 osi = (output_arch_syminfo *) in_arg;
8582
8583 stub_sec = stub_entry->stub_sec;
8584
8585 /* Ensure this stub is attached to the current section being
8586 processed. */
8587 if (stub_sec != osi->sec)
8588 return true;
8589
8590 addr = (bfd_vma) stub_entry->stub_offset;
8591
8592 stub_name = stub_entry->output_name;
8593
8594 switch (stub_entry->stub_type)
8595 {
8596 case aarch64_stub_adrp_branch:
8597 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
8598 sizeof (aarch64_adrp_branch_stub)))
8599 return false;
8600 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
8601 return false;
8602 break;
8603 case aarch64_stub_long_branch:
8604 if (!elfNN_aarch64_output_stub_sym
8605 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
8606 return false;
8607 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
8608 return false;
8609 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
8610 return false;
8611 break;
8612 case aarch64_stub_bti_direct_branch:
8613 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
8614 sizeof (aarch64_bti_direct_branch_stub)))
8615 return false;
8616 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
8617 return false;
8618 break;
8619 case aarch64_stub_erratum_835769_veneer:
8620 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
8621 sizeof (aarch64_erratum_835769_stub)))
8622 return false;
8623 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
8624 return false;
8625 break;
8626 case aarch64_stub_erratum_843419_veneer:
8627 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
8628 sizeof (aarch64_erratum_843419_stub)))
8629 return false;
8630 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
8631 return false;
8632 break;
8633 case aarch64_stub_none:
8634 break;
8635
8636 default:
8637 abort ();
8638 }
8639
8640 return true;
8641 }
8642
8643 /* Output mapping symbols for linker generated sections. */
8644
8645 static bool
8646 elfNN_aarch64_output_arch_local_syms (bfd *output_bfd,
8647 struct bfd_link_info *info,
8648 void *finfo,
8649 int (*func) (void *, const char *,
8650 Elf_Internal_Sym *,
8651 asection *,
8652 struct elf_link_hash_entry
8653 *))
8654 {
8655 output_arch_syminfo osi;
8656 struct elf_aarch64_link_hash_table *htab;
8657
8658 if (info->strip == strip_all
8659 && !info->emitrelocations
8660 && !bfd_link_relocatable (info))
8661 return true;
8662
8663 htab = elf_aarch64_hash_table (info);
8664
8665 osi.finfo = finfo;
8666 osi.info = info;
8667 osi.func = func;
8668
8669 /* Long calls stubs. */
8670 if (htab->stub_bfd && htab->stub_bfd->sections)
8671 {
8672 asection *stub_sec;
8673
8674 for (stub_sec = htab->stub_bfd->sections;
8675 stub_sec != NULL; stub_sec = stub_sec->next)
8676 {
8677 /* Ignore non-stub sections. */
8678 if (!strstr (stub_sec->name, STUB_SUFFIX))
8679 continue;
8680
8681 osi.sec = stub_sec;
8682
8683 osi.sec_shndx = _bfd_elf_section_from_bfd_section
8684 (output_bfd, osi.sec->output_section);
8685
8686 /* The first instruction in a stub is always a branch. */
8687 if (!elfNN_aarch64_output_map_sym (&osi, AARCH64_MAP_INSN, 0))
8688 return false;
8689
8690 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
8691 &osi);
8692 }
8693 }
8694
8695 /* Finally, output mapping symbols for the PLT. */
8696 if (!htab->root.splt || htab->root.splt->size == 0)
8697 return true;
8698
8699 osi.sec_shndx = _bfd_elf_section_from_bfd_section
8700 (output_bfd, htab->root.splt->output_section);
8701 osi.sec = htab->root.splt;
8702
8703 elfNN_aarch64_output_map_sym (&osi, AARCH64_MAP_INSN, 0);
8704
8705 return true;
8706
8707 }
8708
8709 /* Allocate target specific section data. */
8710
8711 static bool
8712 elfNN_aarch64_new_section_hook (bfd *abfd, asection *sec)
8713 {
8714 if (!sec->used_by_bfd)
8715 {
8716 _aarch64_elf_section_data *sdata;
8717 size_t amt = sizeof (*sdata);
8718
8719 sdata = bfd_zalloc (abfd, amt);
8720 if (sdata == NULL)
8721 return false;
8722 sec->used_by_bfd = sdata;
8723 }
8724
8725 record_section_with_aarch64_elf_section_data (sec);
8726
8727 return _bfd_elf_new_section_hook (abfd, sec);
8728 }
8729
8730
8731 static void
8732 unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
8733 asection *sec,
8734 void *ignore ATTRIBUTE_UNUSED)
8735 {
8736 unrecord_section_with_aarch64_elf_section_data (sec);
8737 }
8738
8739 static bool
8740 elfNN_aarch64_close_and_cleanup (bfd *abfd)
8741 {
8742 if (abfd->sections)
8743 bfd_map_over_sections (abfd,
8744 unrecord_section_via_map_over_sections, NULL);
8745
8746 return _bfd_elf_close_and_cleanup (abfd);
8747 }
8748
8749 static bool
8750 elfNN_aarch64_bfd_free_cached_info (bfd *abfd)
8751 {
8752 if (abfd->sections)
8753 bfd_map_over_sections (abfd,
8754 unrecord_section_via_map_over_sections, NULL);
8755
8756 return _bfd_free_cached_info (abfd);
8757 }
8758
8759 /* Create dynamic sections. This is different from the ARM backend in that
8760 the got, plt, gotplt and their relocation sections are all created in the
8761 standard part of the bfd elf backend. */
8762
8763 static bool
8764 elfNN_aarch64_create_dynamic_sections (bfd *dynobj,
8765 struct bfd_link_info *info)
8766 {
8767 /* We need to create .got section. */
8768 if (!aarch64_elf_create_got_section (dynobj, info))
8769 return false;
8770
8771 return _bfd_elf_create_dynamic_sections (dynobj, info);
8772 }
8773
8774
8775 /* Allocate space in .plt, .got and associated reloc sections for
8776 dynamic relocs. */
8777
8778 static bool
8779 elfNN_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
8780 {
8781 struct bfd_link_info *info;
8782 struct elf_aarch64_link_hash_table *htab;
8783 struct elf_aarch64_link_hash_entry *eh;
8784 struct elf_dyn_relocs *p;
8785
8786 /* An example of a bfd_link_hash_indirect symbol is versioned
8787 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
8788 -> __gxx_personality_v0(bfd_link_hash_defined)
8789
8790 There is no need to process bfd_link_hash_indirect symbols here
8791 because we will also be presented with the concrete instance of
8792 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
8793 called to copy all relevant data from the generic to the concrete
8794 symbol instance. */
8795 if (h->root.type == bfd_link_hash_indirect)
8796 return true;
8797
8798 if (h->root.type == bfd_link_hash_warning)
8799 h = (struct elf_link_hash_entry *) h->root.u.i.link;
8800
8801 info = (struct bfd_link_info *) inf;
8802 htab = elf_aarch64_hash_table (info);
8803
8804 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
8805 here if it is defined and referenced in a non-shared object. */
8806 if (h->type == STT_GNU_IFUNC
8807 && h->def_regular)
8808 return true;
8809 else if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
8810 {
8811 /* Make sure this symbol is output as a dynamic symbol.
8812 Undefined weak syms won't yet be marked as dynamic. */
8813 if (h->dynindx == -1 && !h->forced_local
8814 && h->root.type == bfd_link_hash_undefweak)
8815 {
8816 if (!bfd_elf_link_record_dynamic_symbol (info, h))
8817 return false;
8818 }
8819
8820 if (bfd_link_pic (info) || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
8821 {
8822 asection *s = htab->root.splt;
8823
8824 /* If this is the first .plt entry, make room for the special
8825 first entry. */
8826 if (s->size == 0)
8827 s->size += htab->plt_header_size;
8828
8829 h->plt.offset = s->size;
8830
8831 /* If this symbol is not defined in a regular file, and we are
8832 not generating a shared library, then set the symbol to this
8833 location in the .plt. This is required to make function
8834 pointers compare as equal between the normal executable and
8835 the shared library. */
8836 if (!bfd_link_pic (info) && !h->def_regular)
8837 {
8838 h->root.u.def.section = s;
8839 h->root.u.def.value = h->plt.offset;
8840 }
8841
8842 /* Make room for this entry. For now we only create the
8843 small model PLT entries. We later need to find a way
8844 of relaxing into these from the large model PLT entries. */
8845 s->size += htab->plt_entry_size;
8846
8847 /* We also need to make an entry in the .got.plt section, which
8848 will be placed in the .got section by the linker script. */
8849 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
8850
8851 /* We also need to make an entry in the .rela.plt section. */
8852 htab->root.srelplt->size += RELOC_SIZE (htab);
8853
8854 /* We need to ensure that all GOT entries that serve the PLT
8855 are consecutive with the special GOT slots [0] [1] and
8856 [2]. Any addtional relocations, such as
8857 R_AARCH64_TLSDESC, must be placed after the PLT related
8858 entries. We abuse the reloc_count such that during
8859 sizing we adjust reloc_count to indicate the number of
8860 PLT related reserved entries. In subsequent phases when
8861 filling in the contents of the reloc entries, PLT related
8862 entries are placed by computing their PLT index (0
8863 .. reloc_count). While other none PLT relocs are placed
8864 at the slot indicated by reloc_count and reloc_count is
8865 updated. */
8866
8867 htab->root.srelplt->reloc_count++;
8868
8869 /* Mark the DSO in case R_<CLS>_JUMP_SLOT relocs against
8870 variant PCS symbols are present. */
8871 if (h->other & STO_AARCH64_VARIANT_PCS)
8872 htab->variant_pcs = 1;
8873
8874 }
8875 else
8876 {
8877 h->plt.offset = (bfd_vma) - 1;
8878 h->needs_plt = 0;
8879 }
8880 }
8881 else
8882 {
8883 h->plt.offset = (bfd_vma) - 1;
8884 h->needs_plt = 0;
8885 }
8886
8887 eh = (struct elf_aarch64_link_hash_entry *) h;
8888 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
8889
8890 if (h->got.refcount > 0)
8891 {
8892 bool dyn;
8893 unsigned got_type = elf_aarch64_hash_entry (h)->got_type;
8894
8895 h->got.offset = (bfd_vma) - 1;
8896
8897 dyn = htab->root.dynamic_sections_created;
8898
8899 /* Make sure this symbol is output as a dynamic symbol.
8900 Undefined weak syms won't yet be marked as dynamic. */
8901 if (dyn && h->dynindx == -1 && !h->forced_local
8902 && h->root.type == bfd_link_hash_undefweak)
8903 {
8904 if (!bfd_elf_link_record_dynamic_symbol (info, h))
8905 return false;
8906 }
8907
8908 if (got_type == GOT_UNKNOWN)
8909 {
8910 }
8911 else if (got_type == GOT_NORMAL)
8912 {
8913 h->got.offset = htab->root.sgot->size;
8914 htab->root.sgot->size += GOT_ENTRY_SIZE;
8915 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
8916 || h->root.type != bfd_link_hash_undefweak)
8917 && (bfd_link_pic (info)
8918 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h))
8919 /* Undefined weak symbol in static PIE resolves to 0 without
8920 any dynamic relocations. */
8921 && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
8922 {
8923 htab->root.srelgot->size += RELOC_SIZE (htab);
8924 }
8925 }
8926 else
8927 {
8928 int indx;
8929 if (got_type & GOT_TLSDESC_GD)
8930 {
8931 eh->tlsdesc_got_jump_table_offset =
8932 (htab->root.sgotplt->size
8933 - aarch64_compute_jump_table_size (htab));
8934 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
8935 h->got.offset = (bfd_vma) - 2;
8936 }
8937
8938 if (got_type & GOT_TLS_GD)
8939 {
8940 h->got.offset = htab->root.sgot->size;
8941 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
8942 }
8943
8944 if (got_type & GOT_TLS_IE)
8945 {
8946 h->got.offset = htab->root.sgot->size;
8947 htab->root.sgot->size += GOT_ENTRY_SIZE;
8948 }
8949
8950 indx = h && h->dynindx != -1 ? h->dynindx : 0;
8951 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
8952 || h->root.type != bfd_link_hash_undefweak)
8953 && (!bfd_link_executable (info)
8954 || indx != 0
8955 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
8956 {
8957 if (got_type & GOT_TLSDESC_GD)
8958 {
8959 htab->root.srelplt->size += RELOC_SIZE (htab);
8960 /* Note reloc_count not incremented here! We have
8961 already adjusted reloc_count for this relocation
8962 type. */
8963
8964 /* TLSDESC PLT is now needed, but not yet determined. */
8965 htab->root.tlsdesc_plt = (bfd_vma) - 1;
8966 }
8967
8968 if (got_type & GOT_TLS_GD)
8969 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
8970
8971 if (got_type & GOT_TLS_IE)
8972 htab->root.srelgot->size += RELOC_SIZE (htab);
8973 }
8974 }
8975 }
8976 else
8977 {
8978 h->got.offset = (bfd_vma) - 1;
8979 }
8980
8981 if (h->dyn_relocs == NULL)
8982 return true;
8983
8984 for (p = h->dyn_relocs; p != NULL; p = p->next)
8985 if (eh->def_protected)
8986 {
8987 /* Disallow copy relocations against protected symbol. */
8988 asection *s = p->sec->output_section;
8989 if (s != NULL && (s->flags & SEC_READONLY) != 0)
8990 {
8991 info->callbacks->einfo
8992 /* xgettext:c-format */
8993 (_ ("%F%P: %pB: copy relocation against non-copyable "
8994 "protected symbol `%s'\n"),
8995 p->sec->owner, h->root.root.string);
8996 return false;
8997 }
8998 }
8999
9000 /* In the shared -Bsymbolic case, discard space allocated for
9001 dynamic pc-relative relocs against symbols which turn out to be
9002 defined in regular objects. For the normal shared case, discard
9003 space for pc-relative relocs that have become local due to symbol
9004 visibility changes. */
9005
9006 if (bfd_link_pic (info))
9007 {
9008 /* Relocs that use pc_count are those that appear on a call
9009 insn, or certain REL relocs that can generated via assembly.
9010 We want calls to protected symbols to resolve directly to the
9011 function rather than going via the plt. If people want
9012 function pointer comparisons to work as expected then they
9013 should avoid writing weird assembly. */
9014 if (SYMBOL_CALLS_LOCAL (info, h))
9015 {
9016 struct elf_dyn_relocs **pp;
9017
9018 for (pp = &h->dyn_relocs; (p = *pp) != NULL;)
9019 {
9020 p->count -= p->pc_count;
9021 p->pc_count = 0;
9022 if (p->count == 0)
9023 *pp = p->next;
9024 else
9025 pp = &p->next;
9026 }
9027 }
9028
9029 /* Also discard relocs on undefined weak syms with non-default
9030 visibility. */
9031 if (h->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
9032 {
9033 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
9034 || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
9035 h->dyn_relocs = NULL;
9036
9037 /* Make sure undefined weak symbols are output as a dynamic
9038 symbol in PIEs. */
9039 else if (h->dynindx == -1
9040 && !h->forced_local
9041 && h->root.type == bfd_link_hash_undefweak
9042 && !bfd_elf_link_record_dynamic_symbol (info, h))
9043 return false;
9044 }
9045
9046 }
9047 else if (ELIMINATE_COPY_RELOCS)
9048 {
9049 /* For the non-shared case, discard space for relocs against
9050 symbols which turn out to need copy relocs or are not
9051 dynamic. */
9052
9053 if (!h->non_got_ref
9054 && ((h->def_dynamic
9055 && !h->def_regular)
9056 || (htab->root.dynamic_sections_created
9057 && (h->root.type == bfd_link_hash_undefweak
9058 || h->root.type == bfd_link_hash_undefined))))
9059 {
9060 /* Make sure this symbol is output as a dynamic symbol.
9061 Undefined weak syms won't yet be marked as dynamic. */
9062 if (h->dynindx == -1
9063 && !h->forced_local
9064 && h->root.type == bfd_link_hash_undefweak
9065 && !bfd_elf_link_record_dynamic_symbol (info, h))
9066 return false;
9067
9068 /* If that succeeded, we know we'll be keeping all the
9069 relocs. */
9070 if (h->dynindx != -1)
9071 goto keep;
9072 }
9073
9074 h->dyn_relocs = NULL;
9075
9076 keep:;
9077 }
9078
9079 /* Finally, allocate space. */
9080 for (p = h->dyn_relocs; p != NULL; p = p->next)
9081 {
9082 asection *sreloc;
9083
9084 sreloc = elf_section_data (p->sec)->sreloc;
9085
9086 BFD_ASSERT (sreloc != NULL);
9087
9088 sreloc->size += p->count * RELOC_SIZE (htab);
9089 }
9090
9091 return true;
9092 }
9093
9094 /* Allocate space in .plt, .got and associated reloc sections for
9095 ifunc dynamic relocs. */
9096
9097 static bool
9098 elfNN_aarch64_allocate_ifunc_dynrelocs (struct elf_link_hash_entry *h,
9099 void *inf)
9100 {
9101 struct bfd_link_info *info;
9102 struct elf_aarch64_link_hash_table *htab;
9103
9104 /* An example of a bfd_link_hash_indirect symbol is versioned
9105 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
9106 -> __gxx_personality_v0(bfd_link_hash_defined)
9107
9108 There is no need to process bfd_link_hash_indirect symbols here
9109 because we will also be presented with the concrete instance of
9110 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
9111 called to copy all relevant data from the generic to the concrete
9112 symbol instance. */
9113 if (h->root.type == bfd_link_hash_indirect)
9114 return true;
9115
9116 if (h->root.type == bfd_link_hash_warning)
9117 h = (struct elf_link_hash_entry *) h->root.u.i.link;
9118
9119 info = (struct bfd_link_info *) inf;
9120 htab = elf_aarch64_hash_table (info);
9121
9122 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
9123 here if it is defined and referenced in a non-shared object. */
9124 if (h->type == STT_GNU_IFUNC
9125 && h->def_regular)
9126 return _bfd_elf_allocate_ifunc_dyn_relocs (info, h,
9127 &h->dyn_relocs,
9128 htab->plt_entry_size,
9129 htab->plt_header_size,
9130 GOT_ENTRY_SIZE,
9131 false);
9132 return true;
9133 }
9134
9135 /* Allocate space in .plt, .got and associated reloc sections for
9136 local ifunc dynamic relocs. */
9137
9138 static int
9139 elfNN_aarch64_allocate_local_ifunc_dynrelocs (void **slot, void *inf)
9140 {
9141 struct elf_link_hash_entry *h
9142 = (struct elf_link_hash_entry *) *slot;
9143
9144 if (h->type != STT_GNU_IFUNC
9145 || !h->def_regular
9146 || !h->ref_regular
9147 || !h->forced_local
9148 || h->root.type != bfd_link_hash_defined)
9149 abort ();
9150
9151 return elfNN_aarch64_allocate_ifunc_dynrelocs (h, inf);
9152 }
9153
9154 /* This is the most important function of all . Innocuosly named
9155 though ! */
9156
9157 static bool
9158 elfNN_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
9159 struct bfd_link_info *info)
9160 {
9161 struct elf_aarch64_link_hash_table *htab;
9162 bfd *dynobj;
9163 asection *s;
9164 bool relocs;
9165 bfd *ibfd;
9166
9167 htab = elf_aarch64_hash_table ((info));
9168 dynobj = htab->root.dynobj;
9169
9170 BFD_ASSERT (dynobj != NULL);
9171
9172 if (htab->root.dynamic_sections_created)
9173 {
9174 if (bfd_link_executable (info) && !info->nointerp)
9175 {
9176 s = bfd_get_linker_section (dynobj, ".interp");
9177 if (s == NULL)
9178 abort ();
9179 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
9180 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
9181 }
9182 }
9183
9184 /* Set up .got offsets for local syms, and space for local dynamic
9185 relocs. */
9186 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
9187 {
9188 struct elf_aarch64_local_symbol *locals = NULL;
9189 Elf_Internal_Shdr *symtab_hdr;
9190 asection *srel;
9191 unsigned int i;
9192
9193 if (!is_aarch64_elf (ibfd))
9194 continue;
9195
9196 for (s = ibfd->sections; s != NULL; s = s->next)
9197 {
9198 struct elf_dyn_relocs *p;
9199
9200 for (p = (struct elf_dyn_relocs *)
9201 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
9202 {
9203 if (!bfd_is_abs_section (p->sec)
9204 && bfd_is_abs_section (p->sec->output_section))
9205 {
9206 /* Input section has been discarded, either because
9207 it is a copy of a linkonce section or due to
9208 linker script /DISCARD/, so we'll be discarding
9209 the relocs too. */
9210 }
9211 else if (p->count != 0)
9212 {
9213 srel = elf_section_data (p->sec)->sreloc;
9214 srel->size += p->count * RELOC_SIZE (htab);
9215 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
9216 info->flags |= DF_TEXTREL;
9217 }
9218 }
9219 }
9220
9221 locals = elf_aarch64_locals (ibfd);
9222 if (!locals)
9223 continue;
9224
9225 symtab_hdr = &elf_symtab_hdr (ibfd);
9226 srel = htab->root.srelgot;
9227 for (i = 0; i < symtab_hdr->sh_info; i++)
9228 {
9229 locals[i].got_offset = (bfd_vma) - 1;
9230 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
9231 if (locals[i].got_refcount > 0)
9232 {
9233 unsigned got_type = locals[i].got_type;
9234 if (got_type & GOT_TLSDESC_GD)
9235 {
9236 locals[i].tlsdesc_got_jump_table_offset =
9237 (htab->root.sgotplt->size
9238 - aarch64_compute_jump_table_size (htab));
9239 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
9240 locals[i].got_offset = (bfd_vma) - 2;
9241 }
9242
9243 if (got_type & GOT_TLS_GD)
9244 {
9245 locals[i].got_offset = htab->root.sgot->size;
9246 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
9247 }
9248
9249 if (got_type & GOT_TLS_IE
9250 || got_type & GOT_NORMAL)
9251 {
9252 locals[i].got_offset = htab->root.sgot->size;
9253 htab->root.sgot->size += GOT_ENTRY_SIZE;
9254 }
9255
9256 if (got_type == GOT_UNKNOWN)
9257 {
9258 }
9259
9260 if (bfd_link_pic (info))
9261 {
9262 if (got_type & GOT_TLSDESC_GD)
9263 {
9264 htab->root.srelplt->size += RELOC_SIZE (htab);
9265 /* Note RELOC_COUNT not incremented here! */
9266 htab->root.tlsdesc_plt = (bfd_vma) - 1;
9267 }
9268
9269 if (got_type & GOT_TLS_GD)
9270 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
9271
9272 if (got_type & GOT_TLS_IE
9273 || got_type & GOT_NORMAL)
9274 htab->root.srelgot->size += RELOC_SIZE (htab);
9275 }
9276 }
9277 else
9278 {
9279 locals[i].got_refcount = (bfd_vma) - 1;
9280 }
9281 }
9282 }
9283
9284
9285 /* Allocate global sym .plt and .got entries, and space for global
9286 sym dynamic relocs. */
9287 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_dynrelocs,
9288 info);
9289
9290 /* Allocate global ifunc sym .plt and .got entries, and space for global
9291 ifunc sym dynamic relocs. */
9292 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_ifunc_dynrelocs,
9293 info);
9294
9295 /* Allocate .plt and .got entries, and space for local ifunc symbols. */
9296 htab_traverse (htab->loc_hash_table,
9297 elfNN_aarch64_allocate_local_ifunc_dynrelocs,
9298 info);
9299
9300 /* For every jump slot reserved in the sgotplt, reloc_count is
9301 incremented. However, when we reserve space for TLS descriptors,
9302 it's not incremented, so in order to compute the space reserved
9303 for them, it suffices to multiply the reloc count by the jump
9304 slot size. */
9305
9306 if (htab->root.srelplt)
9307 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
9308
9309 if (htab->root.tlsdesc_plt)
9310 {
9311 if (htab->root.splt->size == 0)
9312 htab->root.splt->size += htab->plt_header_size;
9313
9314 /* If we're not using lazy TLS relocations, don't generate the
9315 GOT and PLT entry required. */
9316 if ((info->flags & DF_BIND_NOW))
9317 htab->root.tlsdesc_plt = 0;
9318 else
9319 {
9320 htab->root.tlsdesc_plt = htab->root.splt->size;
9321 htab->root.splt->size += htab->tlsdesc_plt_entry_size;
9322
9323 htab->root.tlsdesc_got = htab->root.sgot->size;
9324 htab->root.sgot->size += GOT_ENTRY_SIZE;
9325 }
9326 }
9327
9328 /* Init mapping symbols information to use later to distingush between
9329 code and data while scanning for errata. */
9330 if (htab->fix_erratum_835769 || htab->fix_erratum_843419)
9331 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
9332 {
9333 if (!is_aarch64_elf (ibfd))
9334 continue;
9335 bfd_elfNN_aarch64_init_maps (ibfd);
9336 }
9337
9338 /* We now have determined the sizes of the various dynamic sections.
9339 Allocate memory for them. */
9340 relocs = false;
9341 for (s = dynobj->sections; s != NULL; s = s->next)
9342 {
9343 if ((s->flags & SEC_LINKER_CREATED) == 0)
9344 continue;
9345
9346 if (s == htab->root.splt
9347 || s == htab->root.sgot
9348 || s == htab->root.sgotplt
9349 || s == htab->root.iplt
9350 || s == htab->root.igotplt
9351 || s == htab->root.sdynbss
9352 || s == htab->root.sdynrelro)
9353 {
9354 /* Strip this section if we don't need it; see the
9355 comment below. */
9356 }
9357 else if (startswith (bfd_section_name (s), ".rela"))
9358 {
9359 if (s->size != 0 && s != htab->root.srelplt)
9360 relocs = true;
9361
9362 /* We use the reloc_count field as a counter if we need
9363 to copy relocs into the output file. */
9364 if (s != htab->root.srelplt)
9365 s->reloc_count = 0;
9366 }
9367 else
9368 {
9369 /* It's not one of our sections, so don't allocate space. */
9370 continue;
9371 }
9372
9373 if (s->size == 0)
9374 {
9375 /* If we don't need this section, strip it from the
9376 output file. This is mostly to handle .rela.bss and
9377 .rela.plt. We must create both sections in
9378 create_dynamic_sections, because they must be created
9379 before the linker maps input sections to output
9380 sections. The linker does that before
9381 adjust_dynamic_symbol is called, and it is that
9382 function which decides whether anything needs to go
9383 into these sections. */
9384 s->flags |= SEC_EXCLUDE;
9385 continue;
9386 }
9387
9388 if ((s->flags & SEC_HAS_CONTENTS) == 0)
9389 continue;
9390
9391 /* Allocate memory for the section contents. We use bfd_zalloc
9392 here in case unused entries are not reclaimed before the
9393 section's contents are written out. This should not happen,
9394 but this way if it does, we get a R_AARCH64_NONE reloc instead
9395 of garbage. */
9396 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
9397 if (s->contents == NULL)
9398 return false;
9399 }
9400
9401 if (htab->root.dynamic_sections_created)
9402 {
9403 /* Add some entries to the .dynamic section. We fill in the
9404 values later, in elfNN_aarch64_finish_dynamic_sections, but we
9405 must add the entries now so that we get the correct size for
9406 the .dynamic section. The DT_DEBUG entry is filled in by the
9407 dynamic linker and used by the debugger. */
9408 #define add_dynamic_entry(TAG, VAL) \
9409 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
9410
9411 if (!_bfd_elf_add_dynamic_tags (output_bfd, info, relocs))
9412 return false;
9413
9414 if (htab->root.splt->size != 0)
9415 {
9416 if (htab->variant_pcs
9417 && !add_dynamic_entry (DT_AARCH64_VARIANT_PCS, 0))
9418 return false;
9419
9420 if ((elf_aarch64_tdata (output_bfd)->plt_type == PLT_BTI_PAC)
9421 && (!add_dynamic_entry (DT_AARCH64_BTI_PLT, 0)
9422 || !add_dynamic_entry (DT_AARCH64_PAC_PLT, 0)))
9423 return false;
9424
9425 else if ((elf_aarch64_tdata (output_bfd)->plt_type == PLT_BTI)
9426 && !add_dynamic_entry (DT_AARCH64_BTI_PLT, 0))
9427 return false;
9428
9429 else if ((elf_aarch64_tdata (output_bfd)->plt_type == PLT_PAC)
9430 && !add_dynamic_entry (DT_AARCH64_PAC_PLT, 0))
9431 return false;
9432 }
9433 }
9434 #undef add_dynamic_entry
9435
9436 return true;
9437 }
9438
9439 static inline void
9440 elf_aarch64_update_plt_entry (bfd *output_bfd,
9441 bfd_reloc_code_real_type r_type,
9442 bfd_byte *plt_entry, bfd_vma value)
9443 {
9444 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (r_type);
9445
9446 /* FIXME: We should check the return value from this function call. */
9447 (void) _bfd_aarch64_elf_put_addend (output_bfd, plt_entry, r_type, howto, value);
9448 }
9449
9450 static void
9451 elfNN_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
9452 struct elf_aarch64_link_hash_table
9453 *htab, bfd *output_bfd,
9454 struct bfd_link_info *info)
9455 {
9456 bfd_byte *plt_entry;
9457 bfd_vma plt_index;
9458 bfd_vma got_offset;
9459 bfd_vma gotplt_entry_address;
9460 bfd_vma plt_entry_address;
9461 Elf_Internal_Rela rela;
9462 bfd_byte *loc;
9463 asection *plt, *gotplt, *relplt;
9464
9465 /* When building a static executable, use .iplt, .igot.plt and
9466 .rela.iplt sections for STT_GNU_IFUNC symbols. */
9467 if (htab->root.splt != NULL)
9468 {
9469 plt = htab->root.splt;
9470 gotplt = htab->root.sgotplt;
9471 relplt = htab->root.srelplt;
9472 }
9473 else
9474 {
9475 plt = htab->root.iplt;
9476 gotplt = htab->root.igotplt;
9477 relplt = htab->root.irelplt;
9478 }
9479
9480 /* Get the index in the procedure linkage table which
9481 corresponds to this symbol. This is the index of this symbol
9482 in all the symbols for which we are making plt entries. The
9483 first entry in the procedure linkage table is reserved.
9484
9485 Get the offset into the .got table of the entry that
9486 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
9487 bytes. The first three are reserved for the dynamic linker.
9488
9489 For static executables, we don't reserve anything. */
9490
9491 if (plt == htab->root.splt)
9492 {
9493 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
9494 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
9495 }
9496 else
9497 {
9498 plt_index = h->plt.offset / htab->plt_entry_size;
9499 got_offset = plt_index * GOT_ENTRY_SIZE;
9500 }
9501
9502 plt_entry = plt->contents + h->plt.offset;
9503 plt_entry_address = plt->output_section->vma
9504 + plt->output_offset + h->plt.offset;
9505 gotplt_entry_address = gotplt->output_section->vma +
9506 gotplt->output_offset + got_offset;
9507
9508 /* Copy in the boiler-plate for the PLTn entry. */
9509 memcpy (plt_entry, htab->plt_entry, htab->plt_entry_size);
9510
9511 /* First instruction in BTI enabled PLT stub is a BTI
9512 instruction so skip it. */
9513 if (elf_aarch64_tdata (output_bfd)->plt_type & PLT_BTI
9514 && elf_elfheader (output_bfd)->e_type == ET_EXEC)
9515 plt_entry = plt_entry + 4;
9516
9517 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
9518 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
9519 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
9520 plt_entry,
9521 PG (gotplt_entry_address) -
9522 PG (plt_entry_address));
9523
9524 /* Fill in the lo12 bits for the load from the pltgot. */
9525 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
9526 plt_entry + 4,
9527 PG_OFFSET (gotplt_entry_address));
9528
9529 /* Fill in the lo12 bits for the add from the pltgot entry. */
9530 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
9531 plt_entry + 8,
9532 PG_OFFSET (gotplt_entry_address));
9533
9534 /* All the GOTPLT Entries are essentially initialized to PLT0. */
9535 bfd_put_NN (output_bfd,
9536 plt->output_section->vma + plt->output_offset,
9537 gotplt->contents + got_offset);
9538
9539 rela.r_offset = gotplt_entry_address;
9540
9541 if (h->dynindx == -1
9542 || ((bfd_link_executable (info)
9543 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
9544 && h->def_regular
9545 && h->type == STT_GNU_IFUNC))
9546 {
9547 /* If an STT_GNU_IFUNC symbol is locally defined, generate
9548 R_AARCH64_IRELATIVE instead of R_AARCH64_JUMP_SLOT. */
9549 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
9550 rela.r_addend = (h->root.u.def.value
9551 + h->root.u.def.section->output_section->vma
9552 + h->root.u.def.section->output_offset);
9553 }
9554 else
9555 {
9556 /* Fill in the entry in the .rela.plt section. */
9557 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (JUMP_SLOT));
9558 rela.r_addend = 0;
9559 }
9560
9561 /* Compute the relocation entry to used based on PLT index and do
9562 not adjust reloc_count. The reloc_count has already been adjusted
9563 to account for this entry. */
9564 loc = relplt->contents + plt_index * RELOC_SIZE (htab);
9565 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
9566 }
9567
9568 /* Size sections even though they're not dynamic. We use it to setup
9569 _TLS_MODULE_BASE_, if needed. */
9570
9571 static bool
9572 elfNN_aarch64_always_size_sections (bfd *output_bfd,
9573 struct bfd_link_info *info)
9574 {
9575 asection *tls_sec;
9576
9577 if (bfd_link_relocatable (info))
9578 return true;
9579
9580 tls_sec = elf_hash_table (info)->tls_sec;
9581
9582 if (tls_sec)
9583 {
9584 struct elf_link_hash_entry *tlsbase;
9585
9586 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
9587 "_TLS_MODULE_BASE_", true, true, false);
9588
9589 if (tlsbase)
9590 {
9591 struct bfd_link_hash_entry *h = NULL;
9592 const struct elf_backend_data *bed =
9593 get_elf_backend_data (output_bfd);
9594
9595 if (!(_bfd_generic_link_add_one_symbol
9596 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
9597 tls_sec, 0, NULL, false, bed->collect, &h)))
9598 return false;
9599
9600 tlsbase->type = STT_TLS;
9601 tlsbase = (struct elf_link_hash_entry *) h;
9602 tlsbase->def_regular = 1;
9603 tlsbase->other = STV_HIDDEN;
9604 (*bed->elf_backend_hide_symbol) (info, tlsbase, true);
9605 }
9606 }
9607
9608 return true;
9609 }
9610
9611 /* Finish up dynamic symbol handling. We set the contents of various
9612 dynamic sections here. */
9613
9614 static bool
9615 elfNN_aarch64_finish_dynamic_symbol (bfd *output_bfd,
9616 struct bfd_link_info *info,
9617 struct elf_link_hash_entry *h,
9618 Elf_Internal_Sym *sym)
9619 {
9620 struct elf_aarch64_link_hash_table *htab;
9621 htab = elf_aarch64_hash_table (info);
9622
9623 if (h->plt.offset != (bfd_vma) - 1)
9624 {
9625 asection *plt, *gotplt, *relplt;
9626
9627 /* This symbol has an entry in the procedure linkage table. Set
9628 it up. */
9629
9630 /* When building a static executable, use .iplt, .igot.plt and
9631 .rela.iplt sections for STT_GNU_IFUNC symbols. */
9632 if (htab->root.splt != NULL)
9633 {
9634 plt = htab->root.splt;
9635 gotplt = htab->root.sgotplt;
9636 relplt = htab->root.srelplt;
9637 }
9638 else
9639 {
9640 plt = htab->root.iplt;
9641 gotplt = htab->root.igotplt;
9642 relplt = htab->root.irelplt;
9643 }
9644
9645 /* This symbol has an entry in the procedure linkage table. Set
9646 it up. */
9647 if ((h->dynindx == -1
9648 && !((h->forced_local || bfd_link_executable (info))
9649 && h->def_regular
9650 && h->type == STT_GNU_IFUNC))
9651 || plt == NULL
9652 || gotplt == NULL
9653 || relplt == NULL)
9654 return false;
9655
9656 elfNN_aarch64_create_small_pltn_entry (h, htab, output_bfd, info);
9657 if (!h->def_regular)
9658 {
9659 /* Mark the symbol as undefined, rather than as defined in
9660 the .plt section. */
9661 sym->st_shndx = SHN_UNDEF;
9662 /* If the symbol is weak we need to clear the value.
9663 Otherwise, the PLT entry would provide a definition for
9664 the symbol even if the symbol wasn't defined anywhere,
9665 and so the symbol would never be NULL. Leave the value if
9666 there were any relocations where pointer equality matters
9667 (this is a clue for the dynamic linker, to make function
9668 pointer comparisons work between an application and shared
9669 library). */
9670 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
9671 sym->st_value = 0;
9672 }
9673 }
9674
9675 if (h->got.offset != (bfd_vma) - 1
9676 && elf_aarch64_hash_entry (h)->got_type == GOT_NORMAL
9677 /* Undefined weak symbol in static PIE resolves to 0 without
9678 any dynamic relocations. */
9679 && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
9680 {
9681 Elf_Internal_Rela rela;
9682 bfd_byte *loc;
9683
9684 /* This symbol has an entry in the global offset table. Set it
9685 up. */
9686 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
9687 abort ();
9688
9689 rela.r_offset = (htab->root.sgot->output_section->vma
9690 + htab->root.sgot->output_offset
9691 + (h->got.offset & ~(bfd_vma) 1));
9692
9693 if (h->def_regular
9694 && h->type == STT_GNU_IFUNC)
9695 {
9696 if (bfd_link_pic (info))
9697 {
9698 /* Generate R_AARCH64_GLOB_DAT. */
9699 goto do_glob_dat;
9700 }
9701 else
9702 {
9703 asection *plt;
9704
9705 if (!h->pointer_equality_needed)
9706 abort ();
9707
9708 /* For non-shared object, we can't use .got.plt, which
9709 contains the real function address if we need pointer
9710 equality. We load the GOT entry with the PLT entry. */
9711 plt = htab->root.splt ? htab->root.splt : htab->root.iplt;
9712 bfd_put_NN (output_bfd, (plt->output_section->vma
9713 + plt->output_offset
9714 + h->plt.offset),
9715 htab->root.sgot->contents
9716 + (h->got.offset & ~(bfd_vma) 1));
9717 return true;
9718 }
9719 }
9720 else if (bfd_link_pic (info) && SYMBOL_REFERENCES_LOCAL (info, h))
9721 {
9722 if (!(h->def_regular || ELF_COMMON_DEF_P (h)))
9723 return false;
9724
9725 BFD_ASSERT ((h->got.offset & 1) != 0);
9726 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
9727 rela.r_addend = (h->root.u.def.value
9728 + h->root.u.def.section->output_section->vma
9729 + h->root.u.def.section->output_offset);
9730 }
9731 else
9732 {
9733 do_glob_dat:
9734 BFD_ASSERT ((h->got.offset & 1) == 0);
9735 bfd_put_NN (output_bfd, (bfd_vma) 0,
9736 htab->root.sgot->contents + h->got.offset);
9737 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (GLOB_DAT));
9738 rela.r_addend = 0;
9739 }
9740
9741 loc = htab->root.srelgot->contents;
9742 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
9743 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
9744 }
9745
9746 if (h->needs_copy)
9747 {
9748 Elf_Internal_Rela rela;
9749 asection *s;
9750 bfd_byte *loc;
9751
9752 /* This symbol needs a copy reloc. Set it up. */
9753 if (h->dynindx == -1
9754 || (h->root.type != bfd_link_hash_defined
9755 && h->root.type != bfd_link_hash_defweak)
9756 || htab->root.srelbss == NULL)
9757 abort ();
9758
9759 rela.r_offset = (h->root.u.def.value
9760 + h->root.u.def.section->output_section->vma
9761 + h->root.u.def.section->output_offset);
9762 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (COPY));
9763 rela.r_addend = 0;
9764 if (h->root.u.def.section == htab->root.sdynrelro)
9765 s = htab->root.sreldynrelro;
9766 else
9767 s = htab->root.srelbss;
9768 loc = s->contents + s->reloc_count++ * RELOC_SIZE (htab);
9769 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
9770 }
9771
9772 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
9773 be NULL for local symbols. */
9774 if (sym != NULL
9775 && (h == elf_hash_table (info)->hdynamic
9776 || h == elf_hash_table (info)->hgot))
9777 sym->st_shndx = SHN_ABS;
9778
9779 return true;
9780 }
9781
9782 /* Finish up local dynamic symbol handling. We set the contents of
9783 various dynamic sections here. */
9784
9785 static int
9786 elfNN_aarch64_finish_local_dynamic_symbol (void **slot, void *inf)
9787 {
9788 struct elf_link_hash_entry *h
9789 = (struct elf_link_hash_entry *) *slot;
9790 struct bfd_link_info *info
9791 = (struct bfd_link_info *) inf;
9792
9793 return elfNN_aarch64_finish_dynamic_symbol (info->output_bfd,
9794 info, h, NULL);
9795 }
9796
9797 static void
9798 elfNN_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
9799 struct elf_aarch64_link_hash_table
9800 *htab)
9801 {
9802 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
9803 small and large plts and at the minute just generates
9804 the small PLT. */
9805
9806 /* PLT0 of the small PLT looks like this in ELF64 -
9807 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
9808 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
9809 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
9810 // symbol resolver
9811 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
9812 // GOTPLT entry for this.
9813 br x17
9814 PLT0 will be slightly different in ELF32 due to different got entry
9815 size. */
9816 bfd_vma plt_got_2nd_ent; /* Address of GOT[2]. */
9817 bfd_vma plt_base;
9818
9819
9820 memcpy (htab->root.splt->contents, htab->plt0_entry,
9821 htab->plt_header_size);
9822
9823 /* PR 26312: Explicitly set the sh_entsize to 0 so that
9824 consumers do not think that the section contains fixed
9825 sized objects. */
9826 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize = 0;
9827
9828 plt_got_2nd_ent = (htab->root.sgotplt->output_section->vma
9829 + htab->root.sgotplt->output_offset
9830 + GOT_ENTRY_SIZE * 2);
9831
9832 plt_base = htab->root.splt->output_section->vma +
9833 htab->root.splt->output_offset;
9834
9835 /* First instruction in BTI enabled PLT stub is a BTI
9836 instruction so skip it. */
9837 bfd_byte *plt0_entry = htab->root.splt->contents;
9838 if (elf_aarch64_tdata (output_bfd)->plt_type & PLT_BTI)
9839 plt0_entry = plt0_entry + 4;
9840
9841 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
9842 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
9843 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
9844 plt0_entry + 4,
9845 PG (plt_got_2nd_ent) - PG (plt_base + 4));
9846
9847 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
9848 plt0_entry + 8,
9849 PG_OFFSET (plt_got_2nd_ent));
9850
9851 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
9852 plt0_entry + 12,
9853 PG_OFFSET (plt_got_2nd_ent));
9854 }
9855
9856 static bool
9857 elfNN_aarch64_finish_dynamic_sections (bfd *output_bfd,
9858 struct bfd_link_info *info)
9859 {
9860 struct elf_aarch64_link_hash_table *htab;
9861 bfd *dynobj;
9862 asection *sdyn;
9863
9864 htab = elf_aarch64_hash_table (info);
9865 dynobj = htab->root.dynobj;
9866 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
9867
9868 if (htab->root.dynamic_sections_created)
9869 {
9870 ElfNN_External_Dyn *dyncon, *dynconend;
9871
9872 if (sdyn == NULL || htab->root.sgot == NULL)
9873 abort ();
9874
9875 dyncon = (ElfNN_External_Dyn *) sdyn->contents;
9876 dynconend = (ElfNN_External_Dyn *) (sdyn->contents + sdyn->size);
9877 for (; dyncon < dynconend; dyncon++)
9878 {
9879 Elf_Internal_Dyn dyn;
9880 asection *s;
9881
9882 bfd_elfNN_swap_dyn_in (dynobj, dyncon, &dyn);
9883
9884 switch (dyn.d_tag)
9885 {
9886 default:
9887 continue;
9888
9889 case DT_PLTGOT:
9890 s = htab->root.sgotplt;
9891 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
9892 break;
9893
9894 case DT_JMPREL:
9895 s = htab->root.srelplt;
9896 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
9897 break;
9898
9899 case DT_PLTRELSZ:
9900 s = htab->root.srelplt;
9901 dyn.d_un.d_val = s->size;
9902 break;
9903
9904 case DT_TLSDESC_PLT:
9905 s = htab->root.splt;
9906 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
9907 + htab->root.tlsdesc_plt;
9908 break;
9909
9910 case DT_TLSDESC_GOT:
9911 s = htab->root.sgot;
9912 BFD_ASSERT (htab->root.tlsdesc_got != (bfd_vma)-1);
9913 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
9914 + htab->root.tlsdesc_got;
9915 break;
9916 }
9917
9918 bfd_elfNN_swap_dyn_out (output_bfd, &dyn, dyncon);
9919 }
9920
9921 }
9922
9923 /* Fill in the special first entry in the procedure linkage table. */
9924 if (htab->root.splt && htab->root.splt->size > 0)
9925 {
9926 elfNN_aarch64_init_small_plt0_entry (output_bfd, htab);
9927
9928 if (htab->root.tlsdesc_plt && !(info->flags & DF_BIND_NOW))
9929 {
9930 BFD_ASSERT (htab->root.tlsdesc_got != (bfd_vma)-1);
9931 bfd_put_NN (output_bfd, (bfd_vma) 0,
9932 htab->root.sgot->contents + htab->root.tlsdesc_got);
9933
9934 const bfd_byte *entry = elfNN_aarch64_tlsdesc_small_plt_entry;
9935 htab->tlsdesc_plt_entry_size = PLT_TLSDESC_ENTRY_SIZE;
9936
9937 aarch64_plt_type type = elf_aarch64_tdata (output_bfd)->plt_type;
9938 if (type == PLT_BTI || type == PLT_BTI_PAC)
9939 {
9940 entry = elfNN_aarch64_tlsdesc_small_plt_bti_entry;
9941 }
9942
9943 memcpy (htab->root.splt->contents + htab->root.tlsdesc_plt,
9944 entry, htab->tlsdesc_plt_entry_size);
9945
9946 {
9947 bfd_vma adrp1_addr =
9948 htab->root.splt->output_section->vma
9949 + htab->root.splt->output_offset
9950 + htab->root.tlsdesc_plt + 4;
9951
9952 bfd_vma adrp2_addr = adrp1_addr + 4;
9953
9954 bfd_vma got_addr =
9955 htab->root.sgot->output_section->vma
9956 + htab->root.sgot->output_offset;
9957
9958 bfd_vma pltgot_addr =
9959 htab->root.sgotplt->output_section->vma
9960 + htab->root.sgotplt->output_offset;
9961
9962 bfd_vma dt_tlsdesc_got = got_addr + htab->root.tlsdesc_got;
9963
9964 bfd_byte *plt_entry =
9965 htab->root.splt->contents + htab->root.tlsdesc_plt;
9966
9967 /* First instruction in BTI enabled PLT stub is a BTI
9968 instruction so skip it. */
9969 if (type & PLT_BTI)
9970 {
9971 plt_entry = plt_entry + 4;
9972 adrp1_addr = adrp1_addr + 4;
9973 adrp2_addr = adrp2_addr + 4;
9974 }
9975
9976 /* adrp x2, DT_TLSDESC_GOT */
9977 elf_aarch64_update_plt_entry (output_bfd,
9978 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
9979 plt_entry + 4,
9980 (PG (dt_tlsdesc_got)
9981 - PG (adrp1_addr)));
9982
9983 /* adrp x3, 0 */
9984 elf_aarch64_update_plt_entry (output_bfd,
9985 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
9986 plt_entry + 8,
9987 (PG (pltgot_addr)
9988 - PG (adrp2_addr)));
9989
9990 /* ldr x2, [x2, #0] */
9991 elf_aarch64_update_plt_entry (output_bfd,
9992 BFD_RELOC_AARCH64_LDSTNN_LO12,
9993 plt_entry + 12,
9994 PG_OFFSET (dt_tlsdesc_got));
9995
9996 /* add x3, x3, 0 */
9997 elf_aarch64_update_plt_entry (output_bfd,
9998 BFD_RELOC_AARCH64_ADD_LO12,
9999 plt_entry + 16,
10000 PG_OFFSET (pltgot_addr));
10001 }
10002 }
10003 }
10004
10005 if (htab->root.sgotplt)
10006 {
10007 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
10008 {
10009 _bfd_error_handler
10010 (_("discarded output section: `%pA'"), htab->root.sgotplt);
10011 return false;
10012 }
10013
10014 /* Fill in the first three entries in the global offset table. */
10015 if (htab->root.sgotplt->size > 0)
10016 {
10017 bfd_put_NN (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents);
10018
10019 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
10020 bfd_put_NN (output_bfd,
10021 (bfd_vma) 0,
10022 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
10023 bfd_put_NN (output_bfd,
10024 (bfd_vma) 0,
10025 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
10026 }
10027
10028 if (htab->root.sgot)
10029 {
10030 if (htab->root.sgot->size > 0)
10031 {
10032 bfd_vma addr =
10033 sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0;
10034 bfd_put_NN (output_bfd, addr, htab->root.sgot->contents);
10035 }
10036 }
10037
10038 elf_section_data (htab->root.sgotplt->output_section)->
10039 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
10040 }
10041
10042 if (htab->root.sgot && htab->root.sgot->size > 0)
10043 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
10044 = GOT_ENTRY_SIZE;
10045
10046 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
10047 htab_traverse (htab->loc_hash_table,
10048 elfNN_aarch64_finish_local_dynamic_symbol,
10049 info);
10050
10051 return true;
10052 }
10053
10054 /* Check if BTI enabled PLTs are needed. Returns the type needed. */
10055 static aarch64_plt_type
10056 get_plt_type (bfd *abfd)
10057 {
10058 aarch64_plt_type ret = PLT_NORMAL;
10059 bfd_byte *contents, *extdyn, *extdynend;
10060 asection *sec = bfd_get_section_by_name (abfd, ".dynamic");
10061 if (!sec
10062 || (sec->flags & SEC_HAS_CONTENTS) == 0
10063 || sec->size < sizeof (ElfNN_External_Dyn)
10064 || !bfd_malloc_and_get_section (abfd, sec, &contents))
10065 return ret;
10066 extdyn = contents;
10067 extdynend = contents + sec->size - sizeof (ElfNN_External_Dyn);
10068 for (; extdyn <= extdynend; extdyn += sizeof (ElfNN_External_Dyn))
10069 {
10070 Elf_Internal_Dyn dyn;
10071 bfd_elfNN_swap_dyn_in (abfd, extdyn, &dyn);
10072
10073 /* Let's check the processor specific dynamic array tags. */
10074 bfd_vma tag = dyn.d_tag;
10075 if (tag < DT_LOPROC || tag > DT_HIPROC)
10076 continue;
10077
10078 switch (tag)
10079 {
10080 case DT_AARCH64_BTI_PLT:
10081 ret |= PLT_BTI;
10082 break;
10083
10084 case DT_AARCH64_PAC_PLT:
10085 ret |= PLT_PAC;
10086 break;
10087
10088 default: break;
10089 }
10090 }
10091 free (contents);
10092 return ret;
10093 }
10094
10095 static long
10096 elfNN_aarch64_get_synthetic_symtab (bfd *abfd,
10097 long symcount,
10098 asymbol **syms,
10099 long dynsymcount,
10100 asymbol **dynsyms,
10101 asymbol **ret)
10102 {
10103 elf_aarch64_tdata (abfd)->plt_type = get_plt_type (abfd);
10104 return _bfd_elf_get_synthetic_symtab (abfd, symcount, syms,
10105 dynsymcount, dynsyms, ret);
10106 }
10107
10108 /* Return address for Ith PLT stub in section PLT, for relocation REL
10109 or (bfd_vma) -1 if it should not be included. */
10110
10111 static bfd_vma
10112 elfNN_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
10113 const arelent *rel ATTRIBUTE_UNUSED)
10114 {
10115 size_t plt0_size = PLT_ENTRY_SIZE;
10116 size_t pltn_size = PLT_SMALL_ENTRY_SIZE;
10117
10118 if (elf_aarch64_tdata (plt->owner)->plt_type == PLT_BTI_PAC)
10119 {
10120 if (elf_elfheader (plt->owner)->e_type == ET_EXEC)
10121 pltn_size = PLT_BTI_PAC_SMALL_ENTRY_SIZE;
10122 else
10123 pltn_size = PLT_PAC_SMALL_ENTRY_SIZE;
10124 }
10125 else if (elf_aarch64_tdata (plt->owner)->plt_type == PLT_BTI)
10126 {
10127 if (elf_elfheader (plt->owner)->e_type == ET_EXEC)
10128 pltn_size = PLT_BTI_SMALL_ENTRY_SIZE;
10129 }
10130 else if (elf_aarch64_tdata (plt->owner)->plt_type == PLT_PAC)
10131 {
10132 pltn_size = PLT_PAC_SMALL_ENTRY_SIZE;
10133 }
10134
10135 return plt->vma + plt0_size + i * pltn_size;
10136 }
10137
10138 /* Returns TRUE if NAME is an AArch64 mapping symbol.
10139 The ARM ELF standard defines $x (for A64 code) and $d (for data).
10140 It also allows a period initiated suffix to be added to the symbol, ie:
10141 "$[adtx]\.[:sym_char]+". */
10142
10143 static bool
10144 is_aarch64_mapping_symbol (const char * name)
10145 {
10146 return name != NULL /* Paranoia. */
10147 && name[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
10148 the mapping symbols could have acquired a prefix.
10149 We do not support this here, since such symbols no
10150 longer conform to the ARM ELF ABI. */
10151 && (name[1] == 'd' || name[1] == 'x')
10152 && (name[2] == 0 || name[2] == '.');
10153 /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
10154 any characters that follow the period are legal characters for the body
10155 of a symbol's name. For now we just assume that this is the case. */
10156 }
10157
10158 /* Make sure that mapping symbols in object files are not removed via the
10159 "strip --strip-unneeded" tool. These symbols might needed in order to
10160 correctly generate linked files. Once an object file has been linked,
10161 it should be safe to remove them. */
10162
10163 static void
10164 elfNN_aarch64_backend_symbol_processing (bfd *abfd, asymbol *sym)
10165 {
10166 if (((abfd->flags & (EXEC_P | DYNAMIC)) == 0)
10167 && sym->section != bfd_abs_section_ptr
10168 && is_aarch64_mapping_symbol (sym->name))
10169 sym->flags |= BSF_KEEP;
10170 }
10171
10172 /* Implement elf_backend_setup_gnu_properties for AArch64. It serves as a
10173 wrapper function for _bfd_aarch64_elf_link_setup_gnu_properties to account
10174 for the effect of GNU properties of the output_bfd. */
10175 static bfd *
10176 elfNN_aarch64_link_setup_gnu_properties (struct bfd_link_info *info)
10177 {
10178 uint32_t prop = elf_aarch64_tdata (info->output_bfd)->gnu_and_prop;
10179 bfd *pbfd = _bfd_aarch64_elf_link_setup_gnu_properties (info, &prop);
10180 elf_aarch64_tdata (info->output_bfd)->gnu_and_prop = prop;
10181 elf_aarch64_tdata (info->output_bfd)->plt_type
10182 |= (prop & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) ? PLT_BTI : 0;
10183 setup_plt_values (info, elf_aarch64_tdata (info->output_bfd)->plt_type);
10184 return pbfd;
10185 }
10186
10187 /* Implement elf_backend_merge_gnu_properties for AArch64. It serves as a
10188 wrapper function for _bfd_aarch64_elf_merge_gnu_properties to account
10189 for the effect of GNU properties of the output_bfd. */
10190 static bool
10191 elfNN_aarch64_merge_gnu_properties (struct bfd_link_info *info,
10192 bfd *abfd, bfd *bbfd,
10193 elf_property *aprop,
10194 elf_property *bprop)
10195 {
10196 uint32_t prop
10197 = elf_aarch64_tdata (info->output_bfd)->gnu_and_prop;
10198
10199 /* If output has been marked with BTI using command line argument, give out
10200 warning if necessary. */
10201 /* Properties are merged per type, hence only check for warnings when merging
10202 GNU_PROPERTY_AARCH64_FEATURE_1_AND. */
10203 if (((aprop && aprop->pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND)
10204 || (bprop && bprop->pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND))
10205 && (prop & GNU_PROPERTY_AARCH64_FEATURE_1_BTI)
10206 && (!elf_aarch64_tdata (info->output_bfd)->no_bti_warn))
10207 {
10208 if ((aprop && !(aprop->u.number & GNU_PROPERTY_AARCH64_FEATURE_1_BTI))
10209 || !aprop)
10210 {
10211 _bfd_error_handler (_("%pB: warning: BTI turned on by -z force-bti when "
10212 "all inputs do not have BTI in NOTE section."),
10213 abfd);
10214 }
10215 if ((bprop && !(bprop->u.number & GNU_PROPERTY_AARCH64_FEATURE_1_BTI))
10216 || !bprop)
10217 {
10218 _bfd_error_handler (_("%pB: warning: BTI turned on by -z force-bti when "
10219 "all inputs do not have BTI in NOTE section."),
10220 bbfd);
10221 }
10222 }
10223
10224 return _bfd_aarch64_elf_merge_gnu_properties (info, abfd, aprop,
10225 bprop, prop);
10226 }
10227
10228 /* We use this so we can override certain functions
10229 (though currently we don't). */
10230
10231 const struct elf_size_info elfNN_aarch64_size_info =
10232 {
10233 sizeof (ElfNN_External_Ehdr),
10234 sizeof (ElfNN_External_Phdr),
10235 sizeof (ElfNN_External_Shdr),
10236 sizeof (ElfNN_External_Rel),
10237 sizeof (ElfNN_External_Rela),
10238 sizeof (ElfNN_External_Sym),
10239 sizeof (ElfNN_External_Dyn),
10240 sizeof (Elf_External_Note),
10241 4, /* Hash table entry size. */
10242 1, /* Internal relocs per external relocs. */
10243 ARCH_SIZE, /* Arch size. */
10244 LOG_FILE_ALIGN, /* Log_file_align. */
10245 ELFCLASSNN, EV_CURRENT,
10246 bfd_elfNN_write_out_phdrs,
10247 bfd_elfNN_write_shdrs_and_ehdr,
10248 bfd_elfNN_checksum_contents,
10249 bfd_elfNN_write_relocs,
10250 bfd_elfNN_swap_symbol_in,
10251 bfd_elfNN_swap_symbol_out,
10252 bfd_elfNN_slurp_reloc_table,
10253 bfd_elfNN_slurp_symbol_table,
10254 bfd_elfNN_swap_dyn_in,
10255 bfd_elfNN_swap_dyn_out,
10256 bfd_elfNN_swap_reloc_in,
10257 bfd_elfNN_swap_reloc_out,
10258 bfd_elfNN_swap_reloca_in,
10259 bfd_elfNN_swap_reloca_out
10260 };
10261
10262 #define ELF_ARCH bfd_arch_aarch64
10263 #define ELF_MACHINE_CODE EM_AARCH64
10264 #define ELF_MAXPAGESIZE 0x10000
10265 #define ELF_COMMONPAGESIZE 0x1000
10266
10267 #define bfd_elfNN_close_and_cleanup \
10268 elfNN_aarch64_close_and_cleanup
10269
10270 #define bfd_elfNN_bfd_free_cached_info \
10271 elfNN_aarch64_bfd_free_cached_info
10272
10273 #define bfd_elfNN_bfd_is_target_special_symbol \
10274 elfNN_aarch64_is_target_special_symbol
10275
10276 #define bfd_elfNN_bfd_link_hash_table_create \
10277 elfNN_aarch64_link_hash_table_create
10278
10279 #define bfd_elfNN_bfd_merge_private_bfd_data \
10280 elfNN_aarch64_merge_private_bfd_data
10281
10282 #define bfd_elfNN_bfd_print_private_bfd_data \
10283 elfNN_aarch64_print_private_bfd_data
10284
10285 #define bfd_elfNN_bfd_reloc_type_lookup \
10286 elfNN_aarch64_reloc_type_lookup
10287
10288 #define bfd_elfNN_bfd_reloc_name_lookup \
10289 elfNN_aarch64_reloc_name_lookup
10290
10291 #define bfd_elfNN_bfd_set_private_flags \
10292 elfNN_aarch64_set_private_flags
10293
10294 #define bfd_elfNN_find_inliner_info \
10295 elfNN_aarch64_find_inliner_info
10296
10297 #define bfd_elfNN_get_synthetic_symtab \
10298 elfNN_aarch64_get_synthetic_symtab
10299
10300 #define bfd_elfNN_mkobject \
10301 elfNN_aarch64_mkobject
10302
10303 #define bfd_elfNN_new_section_hook \
10304 elfNN_aarch64_new_section_hook
10305
10306 #define elf_backend_adjust_dynamic_symbol \
10307 elfNN_aarch64_adjust_dynamic_symbol
10308
10309 #define elf_backend_always_size_sections \
10310 elfNN_aarch64_always_size_sections
10311
10312 #define elf_backend_check_relocs \
10313 elfNN_aarch64_check_relocs
10314
10315 #define elf_backend_copy_indirect_symbol \
10316 elfNN_aarch64_copy_indirect_symbol
10317
10318 #define elf_backend_merge_symbol_attribute \
10319 elfNN_aarch64_merge_symbol_attribute
10320
10321 /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
10322 to them in our hash. */
10323 #define elf_backend_create_dynamic_sections \
10324 elfNN_aarch64_create_dynamic_sections
10325
10326 #define elf_backend_init_index_section \
10327 _bfd_elf_init_2_index_sections
10328
10329 #define elf_backend_finish_dynamic_sections \
10330 elfNN_aarch64_finish_dynamic_sections
10331
10332 #define elf_backend_finish_dynamic_symbol \
10333 elfNN_aarch64_finish_dynamic_symbol
10334
10335 #define elf_backend_object_p \
10336 elfNN_aarch64_object_p
10337
10338 #define elf_backend_output_arch_local_syms \
10339 elfNN_aarch64_output_arch_local_syms
10340
10341 #define elf_backend_maybe_function_sym \
10342 elfNN_aarch64_maybe_function_sym
10343
10344 #define elf_backend_plt_sym_val \
10345 elfNN_aarch64_plt_sym_val
10346
10347 #define elf_backend_init_file_header \
10348 elfNN_aarch64_init_file_header
10349
10350 #define elf_backend_relocate_section \
10351 elfNN_aarch64_relocate_section
10352
10353 #define elf_backend_reloc_type_class \
10354 elfNN_aarch64_reloc_type_class
10355
10356 #define elf_backend_section_from_shdr \
10357 elfNN_aarch64_section_from_shdr
10358
10359 #define elf_backend_section_from_phdr \
10360 elfNN_aarch64_section_from_phdr
10361
10362 #define elf_backend_modify_headers \
10363 elfNN_aarch64_modify_headers
10364
10365 #define elf_backend_size_dynamic_sections \
10366 elfNN_aarch64_size_dynamic_sections
10367
10368 #define elf_backend_size_info \
10369 elfNN_aarch64_size_info
10370
10371 #define elf_backend_write_section \
10372 elfNN_aarch64_write_section
10373
10374 #define elf_backend_symbol_processing \
10375 elfNN_aarch64_backend_symbol_processing
10376
10377 #define elf_backend_setup_gnu_properties \
10378 elfNN_aarch64_link_setup_gnu_properties
10379
10380 #define elf_backend_merge_gnu_properties \
10381 elfNN_aarch64_merge_gnu_properties
10382
10383 #define elf_backend_can_refcount 1
10384 #define elf_backend_can_gc_sections 1
10385 #define elf_backend_plt_readonly 1
10386 #define elf_backend_want_got_plt 1
10387 #define elf_backend_want_plt_sym 0
10388 #define elf_backend_want_dynrelro 1
10389 #define elf_backend_may_use_rel_p 0
10390 #define elf_backend_may_use_rela_p 1
10391 #define elf_backend_default_use_rela_p 1
10392 #define elf_backend_rela_normal 1
10393 #define elf_backend_dtrel_excludes_plt 1
10394 #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
10395 #define elf_backend_default_execstack 0
10396 #define elf_backend_extern_protected_data 0
10397 #define elf_backend_hash_symbol elf_aarch64_hash_symbol
10398
10399 #undef elf_backend_obj_attrs_section
10400 #define elf_backend_obj_attrs_section ".ARM.attributes"
10401
10402 #include "elfNN-target.h"
10403
10404 /* CloudABI support. */
10405
10406 #undef TARGET_LITTLE_SYM
10407 #define TARGET_LITTLE_SYM aarch64_elfNN_le_cloudabi_vec
10408 #undef TARGET_LITTLE_NAME
10409 #define TARGET_LITTLE_NAME "elfNN-littleaarch64-cloudabi"
10410 #undef TARGET_BIG_SYM
10411 #define TARGET_BIG_SYM aarch64_elfNN_be_cloudabi_vec
10412 #undef TARGET_BIG_NAME
10413 #define TARGET_BIG_NAME "elfNN-bigaarch64-cloudabi"
10414
10415 #undef ELF_OSABI
10416 #define ELF_OSABI ELFOSABI_CLOUDABI
10417
10418 #undef elfNN_bed
10419 #define elfNN_bed elfNN_aarch64_cloudabi_bed
10420
10421 #include "elfNN-target.h"