Remove path name from test case
[binutils-gdb.git] / bfd / elfnn-aarch64.c
1 /* AArch64-specific support for NN-bit ELF.
2 Copyright (C) 2009-2023 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 /* Notes on implementation:
22
23 Thread Local Store (TLS)
24
25 Overview:
26
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
29
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
32
33 adrp x0, :tlsgd:foo
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
37 bl __tls_get_addr
38 nop
39
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
42
43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE21(foo)
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
46 .tlsdesccall foo
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
48
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
52
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
56
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
59
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
64
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
70
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
75
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
80
81 Implementation:
82
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
86
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
92
93 The flow:
94
95 elfNN_aarch64_check_relocs()
96
97 This function is invoked for each relocation.
98
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
103
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
106
107 elfNN_aarch64_allocate_dynrelocs ()
108
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
113 for this symbol.
114
115 elfNN_aarch64_size_dynamic_sections ()
116
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
121
122 elfNN_aarch64_relocate_section ()
123
124 Calls elfNN_aarch64_final_link_relocate ()
125
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
133
134 elfNN_aarch64_final_link_relocate ()
135
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
137
138 #include "sysdep.h"
139 #include "bfd.h"
140 #include "libiberty.h"
141 #include "libbfd.h"
142 #include "elf-bfd.h"
143 #include "bfdlink.h"
144 #include "objalloc.h"
145 #include "elf/aarch64.h"
146 #include "elfxx-aarch64.h"
147 #include "cpu-aarch64.h"
148
149 #define ARCH_SIZE NN
150
151 #if ARCH_SIZE == 64
152 #define AARCH64_R(NAME) R_AARCH64_ ## NAME
153 #define AARCH64_R_STR(NAME) "R_AARCH64_" #NAME
154 #define HOWTO64(...) HOWTO (__VA_ARGS__)
155 #define HOWTO32(...) EMPTY_HOWTO (0)
156 #define LOG_FILE_ALIGN 3
157 #define BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC BFD_RELOC_AARCH64_TLSDESC_LD64_LO12
158 #endif
159
160 #if ARCH_SIZE == 32
161 #define AARCH64_R(NAME) R_AARCH64_P32_ ## NAME
162 #define AARCH64_R_STR(NAME) "R_AARCH64_P32_" #NAME
163 #define HOWTO64(...) EMPTY_HOWTO (0)
164 #define HOWTO32(...) HOWTO (__VA_ARGS__)
165 #define LOG_FILE_ALIGN 2
166 #define BFD_RELOC_AARCH64_TLSDESC_LD32_LO12 BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
167 #define R_AARCH64_P32_TLSDESC_ADD_LO12 R_AARCH64_P32_TLSDESC_ADD_LO12_NC
168 #endif
169
170 #define IS_AARCH64_TLS_RELOC(R_TYPE) \
171 ((R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
172 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
173 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \
174 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC \
175 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G1 \
176 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
177 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC \
178 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
179 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
180 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
181 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
182 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12 \
183 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12 \
184 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC \
185 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC \
186 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21 \
187 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21 \
188 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12 \
189 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC \
190 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12 \
191 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC \
192 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12 \
193 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC \
194 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12 \
195 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC \
196 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0 \
197 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC \
198 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1 \
199 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC \
200 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2 \
201 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12 \
202 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12 \
203 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
204 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12 \
205 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC \
206 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12 \
207 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC \
208 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12 \
209 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC \
210 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12 \
211 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC \
212 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0 \
213 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
214 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 \
215 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
216 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2 \
217 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPMOD \
218 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPREL \
219 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_TPREL \
220 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
221
222 #define IS_AARCH64_TLS_RELAX_RELOC(R_TYPE) \
223 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
224 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12 \
225 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
226 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
227 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
228 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
229 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC \
230 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
231 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
232 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1 \
233 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
234 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
235 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \
236 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
237 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC \
238 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G1 \
239 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
240 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
241 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC \
242 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC \
243 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21 \
244 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21)
245
246 #define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
247 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC \
248 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
249 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12 \
250 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
251 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
252 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
253 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC \
254 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD64_LO12 \
255 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
256 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
257 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
258 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1)
259
260 #define ELIMINATE_COPY_RELOCS 1
261
262 /* Return size of a relocation entry. HTAB is the bfd's
263 elf_aarch64_link_hash_entry. */
264 #define RELOC_SIZE(HTAB) (sizeof (ElfNN_External_Rela))
265
266 /* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32. */
267 #define GOT_ENTRY_SIZE (ARCH_SIZE / 8)
268 #define PLT_ENTRY_SIZE (32)
269 #define PLT_SMALL_ENTRY_SIZE (16)
270 #define PLT_TLSDESC_ENTRY_SIZE (32)
271 /* PLT sizes with BTI insn. */
272 #define PLT_BTI_SMALL_ENTRY_SIZE (24)
273 /* PLT sizes with PAC insn. */
274 #define PLT_PAC_SMALL_ENTRY_SIZE (24)
275 /* PLT sizes with BTI and PAC insn. */
276 #define PLT_BTI_PAC_SMALL_ENTRY_SIZE (24)
277
278 /* Encoding of the nop instruction. */
279 #define INSN_NOP 0xd503201f
280
281 #define aarch64_compute_jump_table_size(htab) \
282 (((htab)->root.srelplt == NULL) ? 0 \
283 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
284
285 /* The first entry in a procedure linkage table looks like this
286 if the distance between the PLTGOT and the PLT is < 4GB use
287 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
288 in x16 and needs to work out PLTGOT[1] by using an address of
289 [x16,#-GOT_ENTRY_SIZE]. */
290 static const bfd_byte elfNN_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
291 {
292 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
293 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
294 #if ARCH_SIZE == 64
295 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
296 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
297 #else
298 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */
299 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */
300 #endif
301 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
302 0x1f, 0x20, 0x03, 0xd5, /* nop */
303 0x1f, 0x20, 0x03, 0xd5, /* nop */
304 0x1f, 0x20, 0x03, 0xd5, /* nop */
305 };
306
307 static const bfd_byte elfNN_aarch64_small_plt0_bti_entry[PLT_ENTRY_SIZE] =
308 {
309 0x5f, 0x24, 0x03, 0xd5, /* bti c. */
310 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
311 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
312 #if ARCH_SIZE == 64
313 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
314 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
315 #else
316 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */
317 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */
318 #endif
319 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
320 0x1f, 0x20, 0x03, 0xd5, /* nop */
321 0x1f, 0x20, 0x03, 0xd5, /* nop */
322 };
323
324 /* Per function entry in a procedure linkage table looks like this
325 if the distance between the PLTGOT and the PLT is < 4GB use
326 these PLT entries. Use BTI versions of the PLTs when enabled. */
327 static const bfd_byte elfNN_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
328 {
329 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
330 #if ARCH_SIZE == 64
331 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
332 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
333 #else
334 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
335 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
336 #endif
337 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
338 };
339
340 static const bfd_byte
341 elfNN_aarch64_small_plt_bti_entry[PLT_BTI_SMALL_ENTRY_SIZE] =
342 {
343 0x5f, 0x24, 0x03, 0xd5, /* bti c. */
344 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
345 #if ARCH_SIZE == 64
346 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
347 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
348 #else
349 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
350 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
351 #endif
352 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
353 0x1f, 0x20, 0x03, 0xd5, /* nop */
354 };
355
356 static const bfd_byte
357 elfNN_aarch64_small_plt_pac_entry[PLT_PAC_SMALL_ENTRY_SIZE] =
358 {
359 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
360 #if ARCH_SIZE == 64
361 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
362 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
363 #else
364 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
365 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
366 #endif
367 0x9f, 0x21, 0x03, 0xd5, /* autia1716 */
368 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
369 0x1f, 0x20, 0x03, 0xd5, /* nop */
370 };
371
372 static const bfd_byte
373 elfNN_aarch64_small_plt_bti_pac_entry[PLT_BTI_PAC_SMALL_ENTRY_SIZE] =
374 {
375 0x5f, 0x24, 0x03, 0xd5, /* bti c. */
376 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
377 #if ARCH_SIZE == 64
378 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
379 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
380 #else
381 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
382 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
383 #endif
384 0x9f, 0x21, 0x03, 0xd5, /* autia1716 */
385 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
386 };
387
388 static const bfd_byte
389 elfNN_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
390 {
391 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
392 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
393 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
394 #if ARCH_SIZE == 64
395 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */
396 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
397 #else
398 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */
399 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */
400 #endif
401 0x40, 0x00, 0x1f, 0xd6, /* br x2 */
402 0x1f, 0x20, 0x03, 0xd5, /* nop */
403 0x1f, 0x20, 0x03, 0xd5, /* nop */
404 };
405
406 static const bfd_byte
407 elfNN_aarch64_tlsdesc_small_plt_bti_entry[PLT_TLSDESC_ENTRY_SIZE] =
408 {
409 0x5f, 0x24, 0x03, 0xd5, /* bti c. */
410 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
411 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
412 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
413 #if ARCH_SIZE == 64
414 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */
415 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
416 #else
417 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */
418 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */
419 #endif
420 0x40, 0x00, 0x1f, 0xd6, /* br x2 */
421 0x1f, 0x20, 0x03, 0xd5, /* nop */
422 };
423
424 #define elf_info_to_howto elfNN_aarch64_info_to_howto
425 #define elf_info_to_howto_rel elfNN_aarch64_info_to_howto
426
427 #define AARCH64_ELF_ABI_VERSION 0
428
429 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
430 #define ALL_ONES (~ (bfd_vma) 0)
431
432 /* Indexed by the bfd interal reloc enumerators.
433 Therefore, the table needs to be synced with BFD_RELOC_AARCH64_*
434 in reloc.c. */
435
436 static reloc_howto_type elfNN_aarch64_howto_table[] =
437 {
438 EMPTY_HOWTO (0),
439
440 /* Basic data relocations. */
441
442 /* Deprecated, but retained for backwards compatibility. */
443 HOWTO64 (R_AARCH64_NULL, /* type */
444 0, /* rightshift */
445 0, /* size */
446 0, /* bitsize */
447 false, /* pc_relative */
448 0, /* bitpos */
449 complain_overflow_dont, /* complain_on_overflow */
450 bfd_elf_generic_reloc, /* special_function */
451 "R_AARCH64_NULL", /* name */
452 false, /* partial_inplace */
453 0, /* src_mask */
454 0, /* dst_mask */
455 false), /* pcrel_offset */
456 HOWTO (R_AARCH64_NONE, /* type */
457 0, /* rightshift */
458 0, /* size */
459 0, /* bitsize */
460 false, /* pc_relative */
461 0, /* bitpos */
462 complain_overflow_dont, /* complain_on_overflow */
463 bfd_elf_generic_reloc, /* special_function */
464 "R_AARCH64_NONE", /* name */
465 false, /* partial_inplace */
466 0, /* src_mask */
467 0, /* dst_mask */
468 false), /* pcrel_offset */
469
470 /* .xword: (S+A) */
471 HOWTO64 (AARCH64_R (ABS64), /* type */
472 0, /* rightshift */
473 8, /* size */
474 64, /* bitsize */
475 false, /* pc_relative */
476 0, /* bitpos */
477 complain_overflow_unsigned, /* complain_on_overflow */
478 bfd_elf_generic_reloc, /* special_function */
479 AARCH64_R_STR (ABS64), /* name */
480 false, /* partial_inplace */
481 0, /* src_mask */
482 ALL_ONES, /* dst_mask */
483 false), /* pcrel_offset */
484
485 /* .word: (S+A) */
486 HOWTO (AARCH64_R (ABS32), /* type */
487 0, /* rightshift */
488 4, /* size */
489 32, /* bitsize */
490 false, /* pc_relative */
491 0, /* bitpos */
492 complain_overflow_unsigned, /* complain_on_overflow */
493 bfd_elf_generic_reloc, /* special_function */
494 AARCH64_R_STR (ABS32), /* name */
495 false, /* partial_inplace */
496 0, /* src_mask */
497 0xffffffff, /* dst_mask */
498 false), /* pcrel_offset */
499
500 /* .half: (S+A) */
501 HOWTO (AARCH64_R (ABS16), /* type */
502 0, /* rightshift */
503 2, /* size */
504 16, /* bitsize */
505 false, /* pc_relative */
506 0, /* bitpos */
507 complain_overflow_unsigned, /* complain_on_overflow */
508 bfd_elf_generic_reloc, /* special_function */
509 AARCH64_R_STR (ABS16), /* name */
510 false, /* partial_inplace */
511 0, /* src_mask */
512 0xffff, /* dst_mask */
513 false), /* pcrel_offset */
514
515 /* .xword: (S+A-P) */
516 HOWTO64 (AARCH64_R (PREL64), /* type */
517 0, /* rightshift */
518 8, /* size */
519 64, /* bitsize */
520 true, /* pc_relative */
521 0, /* bitpos */
522 complain_overflow_signed, /* complain_on_overflow */
523 bfd_elf_generic_reloc, /* special_function */
524 AARCH64_R_STR (PREL64), /* name */
525 false, /* partial_inplace */
526 0, /* src_mask */
527 ALL_ONES, /* dst_mask */
528 true), /* pcrel_offset */
529
530 /* .word: (S+A-P) */
531 HOWTO (AARCH64_R (PREL32), /* type */
532 0, /* rightshift */
533 4, /* size */
534 32, /* bitsize */
535 true, /* pc_relative */
536 0, /* bitpos */
537 complain_overflow_signed, /* complain_on_overflow */
538 bfd_elf_generic_reloc, /* special_function */
539 AARCH64_R_STR (PREL32), /* name */
540 false, /* partial_inplace */
541 0, /* src_mask */
542 0xffffffff, /* dst_mask */
543 true), /* pcrel_offset */
544
545 /* .half: (S+A-P) */
546 HOWTO (AARCH64_R (PREL16), /* type */
547 0, /* rightshift */
548 2, /* size */
549 16, /* bitsize */
550 true, /* pc_relative */
551 0, /* bitpos */
552 complain_overflow_signed, /* complain_on_overflow */
553 bfd_elf_generic_reloc, /* special_function */
554 AARCH64_R_STR (PREL16), /* name */
555 false, /* partial_inplace */
556 0, /* src_mask */
557 0xffff, /* dst_mask */
558 true), /* pcrel_offset */
559
560 /* Group relocations to create a 16, 32, 48 or 64 bit
561 unsigned data or abs address inline. */
562
563 /* MOVZ: ((S+A) >> 0) & 0xffff */
564 HOWTO (AARCH64_R (MOVW_UABS_G0), /* type */
565 0, /* rightshift */
566 4, /* size */
567 16, /* bitsize */
568 false, /* pc_relative */
569 0, /* bitpos */
570 complain_overflow_unsigned, /* complain_on_overflow */
571 bfd_elf_generic_reloc, /* special_function */
572 AARCH64_R_STR (MOVW_UABS_G0), /* name */
573 false, /* partial_inplace */
574 0, /* src_mask */
575 0xffff, /* dst_mask */
576 false), /* pcrel_offset */
577
578 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
579 HOWTO (AARCH64_R (MOVW_UABS_G0_NC), /* type */
580 0, /* rightshift */
581 4, /* size */
582 16, /* bitsize */
583 false, /* pc_relative */
584 0, /* bitpos */
585 complain_overflow_dont, /* complain_on_overflow */
586 bfd_elf_generic_reloc, /* special_function */
587 AARCH64_R_STR (MOVW_UABS_G0_NC), /* name */
588 false, /* partial_inplace */
589 0, /* src_mask */
590 0xffff, /* dst_mask */
591 false), /* pcrel_offset */
592
593 /* MOVZ: ((S+A) >> 16) & 0xffff */
594 HOWTO (AARCH64_R (MOVW_UABS_G1), /* type */
595 16, /* rightshift */
596 4, /* size */
597 16, /* bitsize */
598 false, /* pc_relative */
599 0, /* bitpos */
600 complain_overflow_unsigned, /* complain_on_overflow */
601 bfd_elf_generic_reloc, /* special_function */
602 AARCH64_R_STR (MOVW_UABS_G1), /* name */
603 false, /* partial_inplace */
604 0, /* src_mask */
605 0xffff, /* dst_mask */
606 false), /* pcrel_offset */
607
608 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
609 HOWTO64 (AARCH64_R (MOVW_UABS_G1_NC), /* type */
610 16, /* rightshift */
611 4, /* size */
612 16, /* bitsize */
613 false, /* pc_relative */
614 0, /* bitpos */
615 complain_overflow_dont, /* complain_on_overflow */
616 bfd_elf_generic_reloc, /* special_function */
617 AARCH64_R_STR (MOVW_UABS_G1_NC), /* name */
618 false, /* partial_inplace */
619 0, /* src_mask */
620 0xffff, /* dst_mask */
621 false), /* pcrel_offset */
622
623 /* MOVZ: ((S+A) >> 32) & 0xffff */
624 HOWTO64 (AARCH64_R (MOVW_UABS_G2), /* type */
625 32, /* rightshift */
626 4, /* size */
627 16, /* bitsize */
628 false, /* pc_relative */
629 0, /* bitpos */
630 complain_overflow_unsigned, /* complain_on_overflow */
631 bfd_elf_generic_reloc, /* special_function */
632 AARCH64_R_STR (MOVW_UABS_G2), /* name */
633 false, /* partial_inplace */
634 0, /* src_mask */
635 0xffff, /* dst_mask */
636 false), /* pcrel_offset */
637
638 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
639 HOWTO64 (AARCH64_R (MOVW_UABS_G2_NC), /* type */
640 32, /* rightshift */
641 4, /* size */
642 16, /* bitsize */
643 false, /* pc_relative */
644 0, /* bitpos */
645 complain_overflow_dont, /* complain_on_overflow */
646 bfd_elf_generic_reloc, /* special_function */
647 AARCH64_R_STR (MOVW_UABS_G2_NC), /* name */
648 false, /* partial_inplace */
649 0, /* src_mask */
650 0xffff, /* dst_mask */
651 false), /* pcrel_offset */
652
653 /* MOVZ: ((S+A) >> 48) & 0xffff */
654 HOWTO64 (AARCH64_R (MOVW_UABS_G3), /* type */
655 48, /* rightshift */
656 4, /* size */
657 16, /* bitsize */
658 false, /* pc_relative */
659 0, /* bitpos */
660 complain_overflow_unsigned, /* complain_on_overflow */
661 bfd_elf_generic_reloc, /* special_function */
662 AARCH64_R_STR (MOVW_UABS_G3), /* name */
663 false, /* partial_inplace */
664 0, /* src_mask */
665 0xffff, /* dst_mask */
666 false), /* pcrel_offset */
667
668 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
669 signed data or abs address inline. Will change instruction
670 to MOVN or MOVZ depending on sign of calculated value. */
671
672 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
673 HOWTO (AARCH64_R (MOVW_SABS_G0), /* type */
674 0, /* rightshift */
675 4, /* size */
676 17, /* bitsize */
677 false, /* pc_relative */
678 0, /* bitpos */
679 complain_overflow_signed, /* complain_on_overflow */
680 bfd_elf_generic_reloc, /* special_function */
681 AARCH64_R_STR (MOVW_SABS_G0), /* name */
682 false, /* partial_inplace */
683 0, /* src_mask */
684 0xffff, /* dst_mask */
685 false), /* pcrel_offset */
686
687 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
688 HOWTO64 (AARCH64_R (MOVW_SABS_G1), /* type */
689 16, /* rightshift */
690 4, /* size */
691 17, /* bitsize */
692 false, /* pc_relative */
693 0, /* bitpos */
694 complain_overflow_signed, /* complain_on_overflow */
695 bfd_elf_generic_reloc, /* special_function */
696 AARCH64_R_STR (MOVW_SABS_G1), /* name */
697 false, /* partial_inplace */
698 0, /* src_mask */
699 0xffff, /* dst_mask */
700 false), /* pcrel_offset */
701
702 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
703 HOWTO64 (AARCH64_R (MOVW_SABS_G2), /* type */
704 32, /* rightshift */
705 4, /* size */
706 17, /* bitsize */
707 false, /* pc_relative */
708 0, /* bitpos */
709 complain_overflow_signed, /* complain_on_overflow */
710 bfd_elf_generic_reloc, /* special_function */
711 AARCH64_R_STR (MOVW_SABS_G2), /* name */
712 false, /* partial_inplace */
713 0, /* src_mask */
714 0xffff, /* dst_mask */
715 false), /* pcrel_offset */
716
717 /* Group relocations to create a 16, 32, 48 or 64 bit
718 PC relative address inline. */
719
720 /* MOV[NZ]: ((S+A-P) >> 0) & 0xffff */
721 HOWTO (AARCH64_R (MOVW_PREL_G0), /* type */
722 0, /* rightshift */
723 4, /* size */
724 17, /* bitsize */
725 true, /* pc_relative */
726 0, /* bitpos */
727 complain_overflow_signed, /* complain_on_overflow */
728 bfd_elf_generic_reloc, /* special_function */
729 AARCH64_R_STR (MOVW_PREL_G0), /* name */
730 false, /* partial_inplace */
731 0, /* src_mask */
732 0xffff, /* dst_mask */
733 true), /* pcrel_offset */
734
735 /* MOVK: ((S+A-P) >> 0) & 0xffff [no overflow check] */
736 HOWTO (AARCH64_R (MOVW_PREL_G0_NC), /* type */
737 0, /* rightshift */
738 4, /* size */
739 16, /* bitsize */
740 true, /* pc_relative */
741 0, /* bitpos */
742 complain_overflow_dont, /* complain_on_overflow */
743 bfd_elf_generic_reloc, /* special_function */
744 AARCH64_R_STR (MOVW_PREL_G0_NC), /* name */
745 false, /* partial_inplace */
746 0, /* src_mask */
747 0xffff, /* dst_mask */
748 true), /* pcrel_offset */
749
750 /* MOV[NZ]: ((S+A-P) >> 16) & 0xffff */
751 HOWTO (AARCH64_R (MOVW_PREL_G1), /* type */
752 16, /* rightshift */
753 4, /* size */
754 17, /* bitsize */
755 true, /* pc_relative */
756 0, /* bitpos */
757 complain_overflow_signed, /* complain_on_overflow */
758 bfd_elf_generic_reloc, /* special_function */
759 AARCH64_R_STR (MOVW_PREL_G1), /* name */
760 false, /* partial_inplace */
761 0, /* src_mask */
762 0xffff, /* dst_mask */
763 true), /* pcrel_offset */
764
765 /* MOVK: ((S+A-P) >> 16) & 0xffff [no overflow check] */
766 HOWTO64 (AARCH64_R (MOVW_PREL_G1_NC), /* type */
767 16, /* rightshift */
768 4, /* size */
769 16, /* bitsize */
770 true, /* pc_relative */
771 0, /* bitpos */
772 complain_overflow_dont, /* complain_on_overflow */
773 bfd_elf_generic_reloc, /* special_function */
774 AARCH64_R_STR (MOVW_PREL_G1_NC), /* name */
775 false, /* partial_inplace */
776 0, /* src_mask */
777 0xffff, /* dst_mask */
778 true), /* pcrel_offset */
779
780 /* MOV[NZ]: ((S+A-P) >> 32) & 0xffff */
781 HOWTO64 (AARCH64_R (MOVW_PREL_G2), /* type */
782 32, /* rightshift */
783 4, /* size */
784 17, /* bitsize */
785 true, /* pc_relative */
786 0, /* bitpos */
787 complain_overflow_signed, /* complain_on_overflow */
788 bfd_elf_generic_reloc, /* special_function */
789 AARCH64_R_STR (MOVW_PREL_G2), /* name */
790 false, /* partial_inplace */
791 0, /* src_mask */
792 0xffff, /* dst_mask */
793 true), /* pcrel_offset */
794
795 /* MOVK: ((S+A-P) >> 32) & 0xffff [no overflow check] */
796 HOWTO64 (AARCH64_R (MOVW_PREL_G2_NC), /* type */
797 32, /* rightshift */
798 4, /* size */
799 16, /* bitsize */
800 true, /* pc_relative */
801 0, /* bitpos */
802 complain_overflow_dont, /* complain_on_overflow */
803 bfd_elf_generic_reloc, /* special_function */
804 AARCH64_R_STR (MOVW_PREL_G2_NC), /* name */
805 false, /* partial_inplace */
806 0, /* src_mask */
807 0xffff, /* dst_mask */
808 true), /* pcrel_offset */
809
810 /* MOV[NZ]: ((S+A-P) >> 48) & 0xffff */
811 HOWTO64 (AARCH64_R (MOVW_PREL_G3), /* type */
812 48, /* rightshift */
813 4, /* size */
814 16, /* bitsize */
815 true, /* pc_relative */
816 0, /* bitpos */
817 complain_overflow_dont, /* complain_on_overflow */
818 bfd_elf_generic_reloc, /* special_function */
819 AARCH64_R_STR (MOVW_PREL_G3), /* name */
820 false, /* partial_inplace */
821 0, /* src_mask */
822 0xffff, /* dst_mask */
823 true), /* pcrel_offset */
824
825 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store
826 addresses: PG(x) is (x & ~0xfff). */
827
828 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
829 HOWTO (AARCH64_R (LD_PREL_LO19), /* type */
830 2, /* rightshift */
831 4, /* size */
832 19, /* bitsize */
833 true, /* pc_relative */
834 0, /* bitpos */
835 complain_overflow_signed, /* complain_on_overflow */
836 bfd_elf_generic_reloc, /* special_function */
837 AARCH64_R_STR (LD_PREL_LO19), /* name */
838 false, /* partial_inplace */
839 0, /* src_mask */
840 0x7ffff, /* dst_mask */
841 true), /* pcrel_offset */
842
843 /* ADR: (S+A-P) & 0x1fffff */
844 HOWTO (AARCH64_R (ADR_PREL_LO21), /* type */
845 0, /* rightshift */
846 4, /* size */
847 21, /* bitsize */
848 true, /* pc_relative */
849 0, /* bitpos */
850 complain_overflow_signed, /* complain_on_overflow */
851 bfd_elf_generic_reloc, /* special_function */
852 AARCH64_R_STR (ADR_PREL_LO21), /* name */
853 false, /* partial_inplace */
854 0, /* src_mask */
855 0x1fffff, /* dst_mask */
856 true), /* pcrel_offset */
857
858 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
859 HOWTO (AARCH64_R (ADR_PREL_PG_HI21), /* type */
860 12, /* rightshift */
861 4, /* size */
862 21, /* bitsize */
863 true, /* pc_relative */
864 0, /* bitpos */
865 complain_overflow_signed, /* complain_on_overflow */
866 bfd_elf_generic_reloc, /* special_function */
867 AARCH64_R_STR (ADR_PREL_PG_HI21), /* name */
868 false, /* partial_inplace */
869 0, /* src_mask */
870 0x1fffff, /* dst_mask */
871 true), /* pcrel_offset */
872
873 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
874 HOWTO64 (AARCH64_R (ADR_PREL_PG_HI21_NC), /* type */
875 12, /* rightshift */
876 4, /* size */
877 21, /* bitsize */
878 true, /* pc_relative */
879 0, /* bitpos */
880 complain_overflow_dont, /* complain_on_overflow */
881 bfd_elf_generic_reloc, /* special_function */
882 AARCH64_R_STR (ADR_PREL_PG_HI21_NC), /* name */
883 false, /* partial_inplace */
884 0, /* src_mask */
885 0x1fffff, /* dst_mask */
886 true), /* pcrel_offset */
887
888 /* ADD: (S+A) & 0xfff [no overflow check] */
889 HOWTO (AARCH64_R (ADD_ABS_LO12_NC), /* type */
890 0, /* rightshift */
891 4, /* size */
892 12, /* bitsize */
893 false, /* pc_relative */
894 10, /* bitpos */
895 complain_overflow_dont, /* complain_on_overflow */
896 bfd_elf_generic_reloc, /* special_function */
897 AARCH64_R_STR (ADD_ABS_LO12_NC), /* name */
898 false, /* partial_inplace */
899 0, /* src_mask */
900 0x3ffc00, /* dst_mask */
901 false), /* pcrel_offset */
902
903 /* LD/ST8: (S+A) & 0xfff */
904 HOWTO (AARCH64_R (LDST8_ABS_LO12_NC), /* type */
905 0, /* rightshift */
906 4, /* size */
907 12, /* bitsize */
908 false, /* pc_relative */
909 0, /* bitpos */
910 complain_overflow_dont, /* complain_on_overflow */
911 bfd_elf_generic_reloc, /* special_function */
912 AARCH64_R_STR (LDST8_ABS_LO12_NC), /* name */
913 false, /* partial_inplace */
914 0, /* src_mask */
915 0xfff, /* dst_mask */
916 false), /* pcrel_offset */
917
918 /* Relocations for control-flow instructions. */
919
920 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
921 HOWTO (AARCH64_R (TSTBR14), /* type */
922 2, /* rightshift */
923 4, /* size */
924 14, /* bitsize */
925 true, /* pc_relative */
926 0, /* bitpos */
927 complain_overflow_signed, /* complain_on_overflow */
928 bfd_elf_generic_reloc, /* special_function */
929 AARCH64_R_STR (TSTBR14), /* name */
930 false, /* partial_inplace */
931 0, /* src_mask */
932 0x3fff, /* dst_mask */
933 true), /* pcrel_offset */
934
935 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
936 HOWTO (AARCH64_R (CONDBR19), /* type */
937 2, /* rightshift */
938 4, /* size */
939 19, /* bitsize */
940 true, /* pc_relative */
941 0, /* bitpos */
942 complain_overflow_signed, /* complain_on_overflow */
943 bfd_elf_generic_reloc, /* special_function */
944 AARCH64_R_STR (CONDBR19), /* name */
945 false, /* partial_inplace */
946 0, /* src_mask */
947 0x7ffff, /* dst_mask */
948 true), /* pcrel_offset */
949
950 /* B: ((S+A-P) >> 2) & 0x3ffffff */
951 HOWTO (AARCH64_R (JUMP26), /* type */
952 2, /* rightshift */
953 4, /* size */
954 26, /* bitsize */
955 true, /* pc_relative */
956 0, /* bitpos */
957 complain_overflow_signed, /* complain_on_overflow */
958 bfd_elf_generic_reloc, /* special_function */
959 AARCH64_R_STR (JUMP26), /* name */
960 false, /* partial_inplace */
961 0, /* src_mask */
962 0x3ffffff, /* dst_mask */
963 true), /* pcrel_offset */
964
965 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
966 HOWTO (AARCH64_R (CALL26), /* type */
967 2, /* rightshift */
968 4, /* size */
969 26, /* bitsize */
970 true, /* pc_relative */
971 0, /* bitpos */
972 complain_overflow_signed, /* complain_on_overflow */
973 bfd_elf_generic_reloc, /* special_function */
974 AARCH64_R_STR (CALL26), /* name */
975 false, /* partial_inplace */
976 0, /* src_mask */
977 0x3ffffff, /* dst_mask */
978 true), /* pcrel_offset */
979
980 /* LD/ST16: (S+A) & 0xffe */
981 HOWTO (AARCH64_R (LDST16_ABS_LO12_NC), /* type */
982 1, /* rightshift */
983 4, /* size */
984 12, /* bitsize */
985 false, /* pc_relative */
986 0, /* bitpos */
987 complain_overflow_dont, /* complain_on_overflow */
988 bfd_elf_generic_reloc, /* special_function */
989 AARCH64_R_STR (LDST16_ABS_LO12_NC), /* name */
990 false, /* partial_inplace */
991 0, /* src_mask */
992 0xffe, /* dst_mask */
993 false), /* pcrel_offset */
994
995 /* LD/ST32: (S+A) & 0xffc */
996 HOWTO (AARCH64_R (LDST32_ABS_LO12_NC), /* type */
997 2, /* rightshift */
998 4, /* size */
999 12, /* bitsize */
1000 false, /* pc_relative */
1001 0, /* bitpos */
1002 complain_overflow_dont, /* complain_on_overflow */
1003 bfd_elf_generic_reloc, /* special_function */
1004 AARCH64_R_STR (LDST32_ABS_LO12_NC), /* name */
1005 false, /* partial_inplace */
1006 0, /* src_mask */
1007 0xffc, /* dst_mask */
1008 false), /* pcrel_offset */
1009
1010 /* LD/ST64: (S+A) & 0xff8 */
1011 HOWTO (AARCH64_R (LDST64_ABS_LO12_NC), /* type */
1012 3, /* rightshift */
1013 4, /* size */
1014 12, /* bitsize */
1015 false, /* pc_relative */
1016 0, /* bitpos */
1017 complain_overflow_dont, /* complain_on_overflow */
1018 bfd_elf_generic_reloc, /* special_function */
1019 AARCH64_R_STR (LDST64_ABS_LO12_NC), /* name */
1020 false, /* partial_inplace */
1021 0, /* src_mask */
1022 0xff8, /* dst_mask */
1023 false), /* pcrel_offset */
1024
1025 /* LD/ST128: (S+A) & 0xff0 */
1026 HOWTO (AARCH64_R (LDST128_ABS_LO12_NC), /* type */
1027 4, /* rightshift */
1028 4, /* size */
1029 12, /* bitsize */
1030 false, /* pc_relative */
1031 0, /* bitpos */
1032 complain_overflow_dont, /* complain_on_overflow */
1033 bfd_elf_generic_reloc, /* special_function */
1034 AARCH64_R_STR (LDST128_ABS_LO12_NC), /* name */
1035 false, /* partial_inplace */
1036 0, /* src_mask */
1037 0xff0, /* dst_mask */
1038 false), /* pcrel_offset */
1039
1040 /* Set a load-literal immediate field to bits
1041 0x1FFFFC of G(S)-P */
1042 HOWTO (AARCH64_R (GOT_LD_PREL19), /* type */
1043 2, /* rightshift */
1044 4, /* size */
1045 19, /* bitsize */
1046 true, /* pc_relative */
1047 0, /* bitpos */
1048 complain_overflow_signed, /* complain_on_overflow */
1049 bfd_elf_generic_reloc, /* special_function */
1050 AARCH64_R_STR (GOT_LD_PREL19), /* name */
1051 false, /* partial_inplace */
1052 0, /* src_mask */
1053 0xffffe0, /* dst_mask */
1054 true), /* pcrel_offset */
1055
1056 /* Get to the page for the GOT entry for the symbol
1057 (G(S) - P) using an ADRP instruction. */
1058 HOWTO (AARCH64_R (ADR_GOT_PAGE), /* type */
1059 12, /* rightshift */
1060 4, /* size */
1061 21, /* bitsize */
1062 true, /* pc_relative */
1063 0, /* bitpos */
1064 complain_overflow_dont, /* complain_on_overflow */
1065 bfd_elf_generic_reloc, /* special_function */
1066 AARCH64_R_STR (ADR_GOT_PAGE), /* name */
1067 false, /* partial_inplace */
1068 0, /* src_mask */
1069 0x1fffff, /* dst_mask */
1070 true), /* pcrel_offset */
1071
1072 /* LD64: GOT offset G(S) & 0xff8 */
1073 HOWTO64 (AARCH64_R (LD64_GOT_LO12_NC), /* type */
1074 3, /* rightshift */
1075 4, /* size */
1076 12, /* bitsize */
1077 false, /* pc_relative */
1078 0, /* bitpos */
1079 complain_overflow_dont, /* complain_on_overflow */
1080 bfd_elf_generic_reloc, /* special_function */
1081 AARCH64_R_STR (LD64_GOT_LO12_NC), /* name */
1082 false, /* partial_inplace */
1083 0, /* src_mask */
1084 0xff8, /* dst_mask */
1085 false), /* pcrel_offset */
1086
1087 /* LD32: GOT offset G(S) & 0xffc */
1088 HOWTO32 (AARCH64_R (LD32_GOT_LO12_NC), /* type */
1089 2, /* rightshift */
1090 4, /* size */
1091 12, /* bitsize */
1092 false, /* pc_relative */
1093 0, /* bitpos */
1094 complain_overflow_dont, /* complain_on_overflow */
1095 bfd_elf_generic_reloc, /* special_function */
1096 AARCH64_R_STR (LD32_GOT_LO12_NC), /* name */
1097 false, /* partial_inplace */
1098 0, /* src_mask */
1099 0xffc, /* dst_mask */
1100 false), /* pcrel_offset */
1101
1102 /* Lower 16 bits of GOT offset for the symbol. */
1103 HOWTO64 (AARCH64_R (MOVW_GOTOFF_G0_NC), /* type */
1104 0, /* rightshift */
1105 4, /* size */
1106 16, /* bitsize */
1107 false, /* pc_relative */
1108 0, /* bitpos */
1109 complain_overflow_dont, /* complain_on_overflow */
1110 bfd_elf_generic_reloc, /* special_function */
1111 AARCH64_R_STR (MOVW_GOTOFF_G0_NC), /* name */
1112 false, /* partial_inplace */
1113 0, /* src_mask */
1114 0xffff, /* dst_mask */
1115 false), /* pcrel_offset */
1116
1117 /* Higher 16 bits of GOT offset for the symbol. */
1118 HOWTO64 (AARCH64_R (MOVW_GOTOFF_G1), /* type */
1119 16, /* rightshift */
1120 4, /* size */
1121 16, /* bitsize */
1122 false, /* pc_relative */
1123 0, /* bitpos */
1124 complain_overflow_unsigned, /* complain_on_overflow */
1125 bfd_elf_generic_reloc, /* special_function */
1126 AARCH64_R_STR (MOVW_GOTOFF_G1), /* name */
1127 false, /* partial_inplace */
1128 0, /* src_mask */
1129 0xffff, /* dst_mask */
1130 false), /* pcrel_offset */
1131
1132 /* LD64: GOT offset for the symbol. */
1133 HOWTO64 (AARCH64_R (LD64_GOTOFF_LO15), /* type */
1134 3, /* rightshift */
1135 4, /* size */
1136 12, /* bitsize */
1137 false, /* pc_relative */
1138 0, /* bitpos */
1139 complain_overflow_unsigned, /* complain_on_overflow */
1140 bfd_elf_generic_reloc, /* special_function */
1141 AARCH64_R_STR (LD64_GOTOFF_LO15), /* name */
1142 false, /* partial_inplace */
1143 0, /* src_mask */
1144 0x7ff8, /* dst_mask */
1145 false), /* pcrel_offset */
1146
1147 /* LD32: GOT offset to the page address of GOT table.
1148 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x5ffc. */
1149 HOWTO32 (AARCH64_R (LD32_GOTPAGE_LO14), /* type */
1150 2, /* rightshift */
1151 4, /* size */
1152 12, /* bitsize */
1153 false, /* pc_relative */
1154 0, /* bitpos */
1155 complain_overflow_unsigned, /* complain_on_overflow */
1156 bfd_elf_generic_reloc, /* special_function */
1157 AARCH64_R_STR (LD32_GOTPAGE_LO14), /* name */
1158 false, /* partial_inplace */
1159 0, /* src_mask */
1160 0x5ffc, /* dst_mask */
1161 false), /* pcrel_offset */
1162
1163 /* LD64: GOT offset to the page address of GOT table.
1164 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x7ff8. */
1165 HOWTO64 (AARCH64_R (LD64_GOTPAGE_LO15), /* type */
1166 3, /* rightshift */
1167 4, /* size */
1168 12, /* bitsize */
1169 false, /* pc_relative */
1170 0, /* bitpos */
1171 complain_overflow_unsigned, /* complain_on_overflow */
1172 bfd_elf_generic_reloc, /* special_function */
1173 AARCH64_R_STR (LD64_GOTPAGE_LO15), /* name */
1174 false, /* partial_inplace */
1175 0, /* src_mask */
1176 0x7ff8, /* dst_mask */
1177 false), /* pcrel_offset */
1178
1179 /* Get to the page for the GOT entry for the symbol
1180 (G(S) - P) using an ADRP instruction. */
1181 HOWTO (AARCH64_R (TLSGD_ADR_PAGE21), /* type */
1182 12, /* rightshift */
1183 4, /* size */
1184 21, /* bitsize */
1185 true, /* pc_relative */
1186 0, /* bitpos */
1187 complain_overflow_dont, /* complain_on_overflow */
1188 bfd_elf_generic_reloc, /* special_function */
1189 AARCH64_R_STR (TLSGD_ADR_PAGE21), /* name */
1190 false, /* partial_inplace */
1191 0, /* src_mask */
1192 0x1fffff, /* dst_mask */
1193 true), /* pcrel_offset */
1194
1195 HOWTO (AARCH64_R (TLSGD_ADR_PREL21), /* type */
1196 0, /* rightshift */
1197 4, /* size */
1198 21, /* bitsize */
1199 true, /* pc_relative */
1200 0, /* bitpos */
1201 complain_overflow_dont, /* complain_on_overflow */
1202 bfd_elf_generic_reloc, /* special_function */
1203 AARCH64_R_STR (TLSGD_ADR_PREL21), /* name */
1204 false, /* partial_inplace */
1205 0, /* src_mask */
1206 0x1fffff, /* dst_mask */
1207 true), /* pcrel_offset */
1208
1209 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
1210 HOWTO (AARCH64_R (TLSGD_ADD_LO12_NC), /* type */
1211 0, /* rightshift */
1212 4, /* size */
1213 12, /* bitsize */
1214 false, /* pc_relative */
1215 0, /* bitpos */
1216 complain_overflow_dont, /* complain_on_overflow */
1217 bfd_elf_generic_reloc, /* special_function */
1218 AARCH64_R_STR (TLSGD_ADD_LO12_NC), /* name */
1219 false, /* partial_inplace */
1220 0, /* src_mask */
1221 0xfff, /* dst_mask */
1222 false), /* pcrel_offset */
1223
1224 /* Lower 16 bits of GOT offset to tls_index. */
1225 HOWTO64 (AARCH64_R (TLSGD_MOVW_G0_NC), /* type */
1226 0, /* rightshift */
1227 4, /* size */
1228 16, /* bitsize */
1229 false, /* pc_relative */
1230 0, /* bitpos */
1231 complain_overflow_dont, /* complain_on_overflow */
1232 bfd_elf_generic_reloc, /* special_function */
1233 AARCH64_R_STR (TLSGD_MOVW_G0_NC), /* name */
1234 false, /* partial_inplace */
1235 0, /* src_mask */
1236 0xffff, /* dst_mask */
1237 false), /* pcrel_offset */
1238
1239 /* Higher 16 bits of GOT offset to tls_index. */
1240 HOWTO64 (AARCH64_R (TLSGD_MOVW_G1), /* type */
1241 16, /* rightshift */
1242 4, /* size */
1243 16, /* bitsize */
1244 false, /* pc_relative */
1245 0, /* bitpos */
1246 complain_overflow_unsigned, /* complain_on_overflow */
1247 bfd_elf_generic_reloc, /* special_function */
1248 AARCH64_R_STR (TLSGD_MOVW_G1), /* name */
1249 false, /* partial_inplace */
1250 0, /* src_mask */
1251 0xffff, /* dst_mask */
1252 false), /* pcrel_offset */
1253
1254 HOWTO (AARCH64_R (TLSIE_ADR_GOTTPREL_PAGE21), /* type */
1255 12, /* rightshift */
1256 4, /* size */
1257 21, /* bitsize */
1258 false, /* pc_relative */
1259 0, /* bitpos */
1260 complain_overflow_dont, /* complain_on_overflow */
1261 bfd_elf_generic_reloc, /* special_function */
1262 AARCH64_R_STR (TLSIE_ADR_GOTTPREL_PAGE21), /* name */
1263 false, /* partial_inplace */
1264 0, /* src_mask */
1265 0x1fffff, /* dst_mask */
1266 false), /* pcrel_offset */
1267
1268 HOWTO64 (AARCH64_R (TLSIE_LD64_GOTTPREL_LO12_NC), /* type */
1269 3, /* rightshift */
1270 4, /* size */
1271 12, /* bitsize */
1272 false, /* pc_relative */
1273 0, /* bitpos */
1274 complain_overflow_dont, /* complain_on_overflow */
1275 bfd_elf_generic_reloc, /* special_function */
1276 AARCH64_R_STR (TLSIE_LD64_GOTTPREL_LO12_NC), /* name */
1277 false, /* partial_inplace */
1278 0, /* src_mask */
1279 0xff8, /* dst_mask */
1280 false), /* pcrel_offset */
1281
1282 HOWTO32 (AARCH64_R (TLSIE_LD32_GOTTPREL_LO12_NC), /* type */
1283 2, /* rightshift */
1284 4, /* size */
1285 12, /* bitsize */
1286 false, /* pc_relative */
1287 0, /* bitpos */
1288 complain_overflow_dont, /* complain_on_overflow */
1289 bfd_elf_generic_reloc, /* special_function */
1290 AARCH64_R_STR (TLSIE_LD32_GOTTPREL_LO12_NC), /* name */
1291 false, /* partial_inplace */
1292 0, /* src_mask */
1293 0xffc, /* dst_mask */
1294 false), /* pcrel_offset */
1295
1296 HOWTO (AARCH64_R (TLSIE_LD_GOTTPREL_PREL19), /* type */
1297 2, /* rightshift */
1298 4, /* size */
1299 19, /* bitsize */
1300 false, /* pc_relative */
1301 0, /* bitpos */
1302 complain_overflow_dont, /* complain_on_overflow */
1303 bfd_elf_generic_reloc, /* special_function */
1304 AARCH64_R_STR (TLSIE_LD_GOTTPREL_PREL19), /* name */
1305 false, /* partial_inplace */
1306 0, /* src_mask */
1307 0x1ffffc, /* dst_mask */
1308 false), /* pcrel_offset */
1309
1310 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G0_NC), /* type */
1311 0, /* rightshift */
1312 4, /* size */
1313 16, /* bitsize */
1314 false, /* pc_relative */
1315 0, /* bitpos */
1316 complain_overflow_dont, /* complain_on_overflow */
1317 bfd_elf_generic_reloc, /* special_function */
1318 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G0_NC), /* name */
1319 false, /* partial_inplace */
1320 0, /* src_mask */
1321 0xffff, /* dst_mask */
1322 false), /* pcrel_offset */
1323
1324 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G1), /* type */
1325 16, /* rightshift */
1326 4, /* size */
1327 16, /* bitsize */
1328 false, /* pc_relative */
1329 0, /* bitpos */
1330 complain_overflow_unsigned, /* complain_on_overflow */
1331 bfd_elf_generic_reloc, /* special_function */
1332 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G1), /* name */
1333 false, /* partial_inplace */
1334 0, /* src_mask */
1335 0xffff, /* dst_mask */
1336 false), /* pcrel_offset */
1337
1338 /* ADD: bit[23:12] of byte offset to module TLS base address. */
1339 HOWTO (AARCH64_R (TLSLD_ADD_DTPREL_HI12), /* type */
1340 12, /* rightshift */
1341 4, /* size */
1342 12, /* bitsize */
1343 false, /* pc_relative */
1344 0, /* bitpos */
1345 complain_overflow_unsigned, /* complain_on_overflow */
1346 bfd_elf_generic_reloc, /* special_function */
1347 AARCH64_R_STR (TLSLD_ADD_DTPREL_HI12), /* name */
1348 false, /* partial_inplace */
1349 0, /* src_mask */
1350 0xfff, /* dst_mask */
1351 false), /* pcrel_offset */
1352
1353 /* Unsigned 12 bit byte offset to module TLS base address. */
1354 HOWTO (AARCH64_R (TLSLD_ADD_DTPREL_LO12), /* type */
1355 0, /* rightshift */
1356 4, /* size */
1357 12, /* bitsize */
1358 false, /* pc_relative */
1359 0, /* bitpos */
1360 complain_overflow_unsigned, /* complain_on_overflow */
1361 bfd_elf_generic_reloc, /* special_function */
1362 AARCH64_R_STR (TLSLD_ADD_DTPREL_LO12), /* name */
1363 false, /* partial_inplace */
1364 0, /* src_mask */
1365 0xfff, /* dst_mask */
1366 false), /* pcrel_offset */
1367
1368 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12. */
1369 HOWTO (AARCH64_R (TLSLD_ADD_DTPREL_LO12_NC), /* type */
1370 0, /* rightshift */
1371 4, /* size */
1372 12, /* bitsize */
1373 false, /* pc_relative */
1374 0, /* bitpos */
1375 complain_overflow_dont, /* complain_on_overflow */
1376 bfd_elf_generic_reloc, /* special_function */
1377 AARCH64_R_STR (TLSLD_ADD_DTPREL_LO12_NC), /* name */
1378 false, /* partial_inplace */
1379 0, /* src_mask */
1380 0xfff, /* dst_mask */
1381 false), /* pcrel_offset */
1382
1383 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
1384 HOWTO (AARCH64_R (TLSLD_ADD_LO12_NC), /* type */
1385 0, /* rightshift */
1386 4, /* size */
1387 12, /* bitsize */
1388 false, /* pc_relative */
1389 0, /* bitpos */
1390 complain_overflow_dont, /* complain_on_overflow */
1391 bfd_elf_generic_reloc, /* special_function */
1392 AARCH64_R_STR (TLSLD_ADD_LO12_NC), /* name */
1393 false, /* partial_inplace */
1394 0, /* src_mask */
1395 0xfff, /* dst_mask */
1396 false), /* pcrel_offset */
1397
1398 /* Get to the page for the GOT entry for the symbol
1399 (G(S) - P) using an ADRP instruction. */
1400 HOWTO (AARCH64_R (TLSLD_ADR_PAGE21), /* type */
1401 12, /* rightshift */
1402 4, /* size */
1403 21, /* bitsize */
1404 true, /* pc_relative */
1405 0, /* bitpos */
1406 complain_overflow_signed, /* complain_on_overflow */
1407 bfd_elf_generic_reloc, /* special_function */
1408 AARCH64_R_STR (TLSLD_ADR_PAGE21), /* name */
1409 false, /* partial_inplace */
1410 0, /* src_mask */
1411 0x1fffff, /* dst_mask */
1412 true), /* pcrel_offset */
1413
1414 HOWTO (AARCH64_R (TLSLD_ADR_PREL21), /* type */
1415 0, /* rightshift */
1416 4, /* size */
1417 21, /* bitsize */
1418 true, /* pc_relative */
1419 0, /* bitpos */
1420 complain_overflow_signed, /* complain_on_overflow */
1421 bfd_elf_generic_reloc, /* special_function */
1422 AARCH64_R_STR (TLSLD_ADR_PREL21), /* name */
1423 false, /* partial_inplace */
1424 0, /* src_mask */
1425 0x1fffff, /* dst_mask */
1426 true), /* pcrel_offset */
1427
1428 /* LD/ST16: bit[11:1] of byte offset to module TLS base address. */
1429 HOWTO64 (AARCH64_R (TLSLD_LDST16_DTPREL_LO12), /* type */
1430 1, /* rightshift */
1431 4, /* size */
1432 11, /* bitsize */
1433 false, /* pc_relative */
1434 10, /* bitpos */
1435 complain_overflow_unsigned, /* complain_on_overflow */
1436 bfd_elf_generic_reloc, /* special_function */
1437 AARCH64_R_STR (TLSLD_LDST16_DTPREL_LO12), /* name */
1438 false, /* partial_inplace */
1439 0, /* src_mask */
1440 0x1ffc00, /* dst_mask */
1441 false), /* pcrel_offset */
1442
1443 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12, but no overflow check. */
1444 HOWTO64 (AARCH64_R (TLSLD_LDST16_DTPREL_LO12_NC), /* type */
1445 1, /* rightshift */
1446 4, /* size */
1447 11, /* bitsize */
1448 false, /* pc_relative */
1449 10, /* bitpos */
1450 complain_overflow_dont, /* complain_on_overflow */
1451 bfd_elf_generic_reloc, /* special_function */
1452 AARCH64_R_STR (TLSLD_LDST16_DTPREL_LO12_NC), /* name */
1453 false, /* partial_inplace */
1454 0, /* src_mask */
1455 0x1ffc00, /* dst_mask */
1456 false), /* pcrel_offset */
1457
1458 /* LD/ST32: bit[11:2] of byte offset to module TLS base address. */
1459 HOWTO64 (AARCH64_R (TLSLD_LDST32_DTPREL_LO12), /* type */
1460 2, /* rightshift */
1461 4, /* size */
1462 10, /* bitsize */
1463 false, /* pc_relative */
1464 10, /* bitpos */
1465 complain_overflow_unsigned, /* complain_on_overflow */
1466 bfd_elf_generic_reloc, /* special_function */
1467 AARCH64_R_STR (TLSLD_LDST32_DTPREL_LO12), /* name */
1468 false, /* partial_inplace */
1469 0, /* src_mask */
1470 0x3ffc00, /* dst_mask */
1471 false), /* pcrel_offset */
1472
1473 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12, but no overflow check. */
1474 HOWTO64 (AARCH64_R (TLSLD_LDST32_DTPREL_LO12_NC), /* type */
1475 2, /* rightshift */
1476 4, /* size */
1477 10, /* bitsize */
1478 false, /* pc_relative */
1479 10, /* bitpos */
1480 complain_overflow_dont, /* complain_on_overflow */
1481 bfd_elf_generic_reloc, /* special_function */
1482 AARCH64_R_STR (TLSLD_LDST32_DTPREL_LO12_NC), /* name */
1483 false, /* partial_inplace */
1484 0, /* src_mask */
1485 0xffc00, /* dst_mask */
1486 false), /* pcrel_offset */
1487
1488 /* LD/ST64: bit[11:3] of byte offset to module TLS base address. */
1489 HOWTO64 (AARCH64_R (TLSLD_LDST64_DTPREL_LO12), /* type */
1490 3, /* rightshift */
1491 4, /* size */
1492 9, /* bitsize */
1493 false, /* pc_relative */
1494 10, /* bitpos */
1495 complain_overflow_unsigned, /* complain_on_overflow */
1496 bfd_elf_generic_reloc, /* special_function */
1497 AARCH64_R_STR (TLSLD_LDST64_DTPREL_LO12), /* name */
1498 false, /* partial_inplace */
1499 0, /* src_mask */
1500 0x3ffc00, /* dst_mask */
1501 false), /* pcrel_offset */
1502
1503 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12, but no overflow check. */
1504 HOWTO64 (AARCH64_R (TLSLD_LDST64_DTPREL_LO12_NC), /* type */
1505 3, /* rightshift */
1506 4, /* size */
1507 9, /* bitsize */
1508 false, /* pc_relative */
1509 10, /* bitpos */
1510 complain_overflow_dont, /* complain_on_overflow */
1511 bfd_elf_generic_reloc, /* special_function */
1512 AARCH64_R_STR (TLSLD_LDST64_DTPREL_LO12_NC), /* name */
1513 false, /* partial_inplace */
1514 0, /* src_mask */
1515 0x7fc00, /* dst_mask */
1516 false), /* pcrel_offset */
1517
1518 /* LD/ST8: bit[11:0] of byte offset to module TLS base address. */
1519 HOWTO64 (AARCH64_R (TLSLD_LDST8_DTPREL_LO12), /* type */
1520 0, /* rightshift */
1521 4, /* size */
1522 12, /* bitsize */
1523 false, /* pc_relative */
1524 10, /* bitpos */
1525 complain_overflow_unsigned, /* complain_on_overflow */
1526 bfd_elf_generic_reloc, /* special_function */
1527 AARCH64_R_STR (TLSLD_LDST8_DTPREL_LO12), /* name */
1528 false, /* partial_inplace */
1529 0, /* src_mask */
1530 0x3ffc00, /* dst_mask */
1531 false), /* pcrel_offset */
1532
1533 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12, but no overflow check. */
1534 HOWTO64 (AARCH64_R (TLSLD_LDST8_DTPREL_LO12_NC), /* type */
1535 0, /* rightshift */
1536 4, /* size */
1537 12, /* bitsize */
1538 false, /* pc_relative */
1539 10, /* bitpos */
1540 complain_overflow_dont, /* complain_on_overflow */
1541 bfd_elf_generic_reloc, /* special_function */
1542 AARCH64_R_STR (TLSLD_LDST8_DTPREL_LO12_NC), /* name */
1543 false, /* partial_inplace */
1544 0, /* src_mask */
1545 0x3ffc00, /* dst_mask */
1546 false), /* pcrel_offset */
1547
1548 /* MOVZ: bit[15:0] of byte offset to module TLS base address. */
1549 HOWTO (AARCH64_R (TLSLD_MOVW_DTPREL_G0), /* type */
1550 0, /* rightshift */
1551 4, /* size */
1552 16, /* bitsize */
1553 false, /* pc_relative */
1554 0, /* bitpos */
1555 complain_overflow_unsigned, /* complain_on_overflow */
1556 bfd_elf_generic_reloc, /* special_function */
1557 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G0), /* name */
1558 false, /* partial_inplace */
1559 0, /* src_mask */
1560 0xffff, /* dst_mask */
1561 false), /* pcrel_offset */
1562
1563 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
1564 HOWTO (AARCH64_R (TLSLD_MOVW_DTPREL_G0_NC), /* type */
1565 0, /* rightshift */
1566 4, /* size */
1567 16, /* bitsize */
1568 false, /* pc_relative */
1569 0, /* bitpos */
1570 complain_overflow_dont, /* complain_on_overflow */
1571 bfd_elf_generic_reloc, /* special_function */
1572 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G0_NC), /* name */
1573 false, /* partial_inplace */
1574 0, /* src_mask */
1575 0xffff, /* dst_mask */
1576 false), /* pcrel_offset */
1577
1578 /* MOVZ: bit[31:16] of byte offset to module TLS base address. */
1579 HOWTO (AARCH64_R (TLSLD_MOVW_DTPREL_G1), /* type */
1580 16, /* rightshift */
1581 4, /* size */
1582 16, /* bitsize */
1583 false, /* pc_relative */
1584 0, /* bitpos */
1585 complain_overflow_unsigned, /* complain_on_overflow */
1586 bfd_elf_generic_reloc, /* special_function */
1587 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G1), /* name */
1588 false, /* partial_inplace */
1589 0, /* src_mask */
1590 0xffff, /* dst_mask */
1591 false), /* pcrel_offset */
1592
1593 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
1594 HOWTO64 (AARCH64_R (TLSLD_MOVW_DTPREL_G1_NC), /* type */
1595 16, /* rightshift */
1596 4, /* size */
1597 16, /* bitsize */
1598 false, /* pc_relative */
1599 0, /* bitpos */
1600 complain_overflow_dont, /* complain_on_overflow */
1601 bfd_elf_generic_reloc, /* special_function */
1602 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G1_NC), /* name */
1603 false, /* partial_inplace */
1604 0, /* src_mask */
1605 0xffff, /* dst_mask */
1606 false), /* pcrel_offset */
1607
1608 /* MOVZ: bit[47:32] of byte offset to module TLS base address. */
1609 HOWTO64 (AARCH64_R (TLSLD_MOVW_DTPREL_G2), /* type */
1610 32, /* rightshift */
1611 4, /* size */
1612 16, /* bitsize */
1613 false, /* pc_relative */
1614 0, /* bitpos */
1615 complain_overflow_unsigned, /* complain_on_overflow */
1616 bfd_elf_generic_reloc, /* special_function */
1617 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G2), /* name */
1618 false, /* partial_inplace */
1619 0, /* src_mask */
1620 0xffff, /* dst_mask */
1621 false), /* pcrel_offset */
1622
1623 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G2), /* type */
1624 32, /* rightshift */
1625 4, /* size */
1626 16, /* bitsize */
1627 false, /* pc_relative */
1628 0, /* bitpos */
1629 complain_overflow_unsigned, /* complain_on_overflow */
1630 bfd_elf_generic_reloc, /* special_function */
1631 AARCH64_R_STR (TLSLE_MOVW_TPREL_G2), /* name */
1632 false, /* partial_inplace */
1633 0, /* src_mask */
1634 0xffff, /* dst_mask */
1635 false), /* pcrel_offset */
1636
1637 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G1), /* type */
1638 16, /* rightshift */
1639 4, /* size */
1640 16, /* bitsize */
1641 false, /* pc_relative */
1642 0, /* bitpos */
1643 complain_overflow_dont, /* complain_on_overflow */
1644 bfd_elf_generic_reloc, /* special_function */
1645 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1), /* name */
1646 false, /* partial_inplace */
1647 0, /* src_mask */
1648 0xffff, /* dst_mask */
1649 false), /* pcrel_offset */
1650
1651 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G1_NC), /* type */
1652 16, /* rightshift */
1653 4, /* size */
1654 16, /* bitsize */
1655 false, /* pc_relative */
1656 0, /* bitpos */
1657 complain_overflow_dont, /* complain_on_overflow */
1658 bfd_elf_generic_reloc, /* special_function */
1659 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1_NC), /* name */
1660 false, /* partial_inplace */
1661 0, /* src_mask */
1662 0xffff, /* dst_mask */
1663 false), /* pcrel_offset */
1664
1665 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0), /* type */
1666 0, /* rightshift */
1667 4, /* size */
1668 16, /* bitsize */
1669 false, /* pc_relative */
1670 0, /* bitpos */
1671 complain_overflow_dont, /* complain_on_overflow */
1672 bfd_elf_generic_reloc, /* special_function */
1673 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0), /* name */
1674 false, /* partial_inplace */
1675 0, /* src_mask */
1676 0xffff, /* dst_mask */
1677 false), /* pcrel_offset */
1678
1679 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0_NC), /* type */
1680 0, /* rightshift */
1681 4, /* size */
1682 16, /* bitsize */
1683 false, /* pc_relative */
1684 0, /* bitpos */
1685 complain_overflow_dont, /* complain_on_overflow */
1686 bfd_elf_generic_reloc, /* special_function */
1687 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0_NC), /* name */
1688 false, /* partial_inplace */
1689 0, /* src_mask */
1690 0xffff, /* dst_mask */
1691 false), /* pcrel_offset */
1692
1693 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_HI12), /* type */
1694 12, /* rightshift */
1695 4, /* size */
1696 12, /* bitsize */
1697 false, /* pc_relative */
1698 0, /* bitpos */
1699 complain_overflow_unsigned, /* complain_on_overflow */
1700 bfd_elf_generic_reloc, /* special_function */
1701 AARCH64_R_STR (TLSLE_ADD_TPREL_HI12), /* name */
1702 false, /* partial_inplace */
1703 0, /* src_mask */
1704 0xfff, /* dst_mask */
1705 false), /* pcrel_offset */
1706
1707 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12), /* type */
1708 0, /* rightshift */
1709 4, /* size */
1710 12, /* bitsize */
1711 false, /* pc_relative */
1712 0, /* bitpos */
1713 complain_overflow_unsigned, /* complain_on_overflow */
1714 bfd_elf_generic_reloc, /* special_function */
1715 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12), /* name */
1716 false, /* partial_inplace */
1717 0, /* src_mask */
1718 0xfff, /* dst_mask */
1719 false), /* pcrel_offset */
1720
1721 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12_NC), /* type */
1722 0, /* rightshift */
1723 4, /* size */
1724 12, /* bitsize */
1725 false, /* pc_relative */
1726 0, /* bitpos */
1727 complain_overflow_dont, /* complain_on_overflow */
1728 bfd_elf_generic_reloc, /* special_function */
1729 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12_NC), /* name */
1730 false, /* partial_inplace */
1731 0, /* src_mask */
1732 0xfff, /* dst_mask */
1733 false), /* pcrel_offset */
1734
1735 /* LD/ST16: bit[11:1] of byte offset to module TLS base address. */
1736 HOWTO (AARCH64_R (TLSLE_LDST16_TPREL_LO12), /* type */
1737 1, /* rightshift */
1738 4, /* size */
1739 11, /* bitsize */
1740 false, /* pc_relative */
1741 10, /* bitpos */
1742 complain_overflow_unsigned, /* complain_on_overflow */
1743 bfd_elf_generic_reloc, /* special_function */
1744 AARCH64_R_STR (TLSLE_LDST16_TPREL_LO12), /* name */
1745 false, /* partial_inplace */
1746 0, /* src_mask */
1747 0x1ffc00, /* dst_mask */
1748 false), /* pcrel_offset */
1749
1750 /* Same as BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12, but no overflow check. */
1751 HOWTO (AARCH64_R (TLSLE_LDST16_TPREL_LO12_NC), /* type */
1752 1, /* rightshift */
1753 4, /* size */
1754 11, /* bitsize */
1755 false, /* pc_relative */
1756 10, /* bitpos */
1757 complain_overflow_dont, /* complain_on_overflow */
1758 bfd_elf_generic_reloc, /* special_function */
1759 AARCH64_R_STR (TLSLE_LDST16_TPREL_LO12_NC), /* name */
1760 false, /* partial_inplace */
1761 0, /* src_mask */
1762 0x1ffc00, /* dst_mask */
1763 false), /* pcrel_offset */
1764
1765 /* LD/ST32: bit[11:2] of byte offset to module TLS base address. */
1766 HOWTO (AARCH64_R (TLSLE_LDST32_TPREL_LO12), /* type */
1767 2, /* rightshift */
1768 4, /* size */
1769 10, /* bitsize */
1770 false, /* pc_relative */
1771 10, /* bitpos */
1772 complain_overflow_unsigned, /* complain_on_overflow */
1773 bfd_elf_generic_reloc, /* special_function */
1774 AARCH64_R_STR (TLSLE_LDST32_TPREL_LO12), /* name */
1775 false, /* partial_inplace */
1776 0, /* src_mask */
1777 0xffc00, /* dst_mask */
1778 false), /* pcrel_offset */
1779
1780 /* Same as BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12, but no overflow check. */
1781 HOWTO (AARCH64_R (TLSLE_LDST32_TPREL_LO12_NC), /* type */
1782 2, /* rightshift */
1783 4, /* size */
1784 10, /* bitsize */
1785 false, /* pc_relative */
1786 10, /* bitpos */
1787 complain_overflow_dont, /* complain_on_overflow */
1788 bfd_elf_generic_reloc, /* special_function */
1789 AARCH64_R_STR (TLSLE_LDST32_TPREL_LO12_NC), /* name */
1790 false, /* partial_inplace */
1791 0, /* src_mask */
1792 0xffc00, /* dst_mask */
1793 false), /* pcrel_offset */
1794
1795 /* LD/ST64: bit[11:3] of byte offset to module TLS base address. */
1796 HOWTO (AARCH64_R (TLSLE_LDST64_TPREL_LO12), /* type */
1797 3, /* rightshift */
1798 4, /* size */
1799 9, /* bitsize */
1800 false, /* pc_relative */
1801 10, /* bitpos */
1802 complain_overflow_unsigned, /* complain_on_overflow */
1803 bfd_elf_generic_reloc, /* special_function */
1804 AARCH64_R_STR (TLSLE_LDST64_TPREL_LO12), /* name */
1805 false, /* partial_inplace */
1806 0, /* src_mask */
1807 0x7fc00, /* dst_mask */
1808 false), /* pcrel_offset */
1809
1810 /* Same as BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12, but no overflow check. */
1811 HOWTO (AARCH64_R (TLSLE_LDST64_TPREL_LO12_NC), /* type */
1812 3, /* rightshift */
1813 4, /* size */
1814 9, /* bitsize */
1815 false, /* pc_relative */
1816 10, /* bitpos */
1817 complain_overflow_dont, /* complain_on_overflow */
1818 bfd_elf_generic_reloc, /* special_function */
1819 AARCH64_R_STR (TLSLE_LDST64_TPREL_LO12_NC), /* name */
1820 false, /* partial_inplace */
1821 0, /* src_mask */
1822 0x7fc00, /* dst_mask */
1823 false), /* pcrel_offset */
1824
1825 /* LD/ST8: bit[11:0] of byte offset to module TLS base address. */
1826 HOWTO (AARCH64_R (TLSLE_LDST8_TPREL_LO12), /* type */
1827 0, /* rightshift */
1828 4, /* size */
1829 12, /* bitsize */
1830 false, /* pc_relative */
1831 10, /* bitpos */
1832 complain_overflow_unsigned, /* complain_on_overflow */
1833 bfd_elf_generic_reloc, /* special_function */
1834 AARCH64_R_STR (TLSLE_LDST8_TPREL_LO12), /* name */
1835 false, /* partial_inplace */
1836 0, /* src_mask */
1837 0x3ffc00, /* dst_mask */
1838 false), /* pcrel_offset */
1839
1840 /* Same as BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12, but no overflow check. */
1841 HOWTO (AARCH64_R (TLSLE_LDST8_TPREL_LO12_NC), /* type */
1842 0, /* rightshift */
1843 4, /* size */
1844 12, /* bitsize */
1845 false, /* pc_relative */
1846 10, /* bitpos */
1847 complain_overflow_dont, /* complain_on_overflow */
1848 bfd_elf_generic_reloc, /* special_function */
1849 AARCH64_R_STR (TLSLE_LDST8_TPREL_LO12_NC), /* name */
1850 false, /* partial_inplace */
1851 0, /* src_mask */
1852 0x3ffc00, /* dst_mask */
1853 false), /* pcrel_offset */
1854
1855 HOWTO (AARCH64_R (TLSDESC_LD_PREL19), /* type */
1856 2, /* rightshift */
1857 4, /* size */
1858 19, /* bitsize */
1859 true, /* pc_relative */
1860 0, /* bitpos */
1861 complain_overflow_dont, /* complain_on_overflow */
1862 bfd_elf_generic_reloc, /* special_function */
1863 AARCH64_R_STR (TLSDESC_LD_PREL19), /* name */
1864 false, /* partial_inplace */
1865 0, /* src_mask */
1866 0x0ffffe0, /* dst_mask */
1867 true), /* pcrel_offset */
1868
1869 HOWTO (AARCH64_R (TLSDESC_ADR_PREL21), /* type */
1870 0, /* rightshift */
1871 4, /* size */
1872 21, /* bitsize */
1873 true, /* pc_relative */
1874 0, /* bitpos */
1875 complain_overflow_dont, /* complain_on_overflow */
1876 bfd_elf_generic_reloc, /* special_function */
1877 AARCH64_R_STR (TLSDESC_ADR_PREL21), /* name */
1878 false, /* partial_inplace */
1879 0, /* src_mask */
1880 0x1fffff, /* dst_mask */
1881 true), /* pcrel_offset */
1882
1883 /* Get to the page for the GOT entry for the symbol
1884 (G(S) - P) using an ADRP instruction. */
1885 HOWTO (AARCH64_R (TLSDESC_ADR_PAGE21), /* type */
1886 12, /* rightshift */
1887 4, /* size */
1888 21, /* bitsize */
1889 true, /* pc_relative */
1890 0, /* bitpos */
1891 complain_overflow_dont, /* complain_on_overflow */
1892 bfd_elf_generic_reloc, /* special_function */
1893 AARCH64_R_STR (TLSDESC_ADR_PAGE21), /* name */
1894 false, /* partial_inplace */
1895 0, /* src_mask */
1896 0x1fffff, /* dst_mask */
1897 true), /* pcrel_offset */
1898
1899 /* LD64: GOT offset G(S) & 0xff8. */
1900 HOWTO64 (AARCH64_R (TLSDESC_LD64_LO12), /* type */
1901 3, /* rightshift */
1902 4, /* size */
1903 12, /* bitsize */
1904 false, /* pc_relative */
1905 0, /* bitpos */
1906 complain_overflow_dont, /* complain_on_overflow */
1907 bfd_elf_generic_reloc, /* special_function */
1908 AARCH64_R_STR (TLSDESC_LD64_LO12), /* name */
1909 false, /* partial_inplace */
1910 0, /* src_mask */
1911 0xff8, /* dst_mask */
1912 false), /* pcrel_offset */
1913
1914 /* LD32: GOT offset G(S) & 0xffc. */
1915 HOWTO32 (AARCH64_R (TLSDESC_LD32_LO12_NC), /* type */
1916 2, /* rightshift */
1917 4, /* size */
1918 12, /* bitsize */
1919 false, /* pc_relative */
1920 0, /* bitpos */
1921 complain_overflow_dont, /* complain_on_overflow */
1922 bfd_elf_generic_reloc, /* special_function */
1923 AARCH64_R_STR (TLSDESC_LD32_LO12_NC), /* name */
1924 false, /* partial_inplace */
1925 0, /* src_mask */
1926 0xffc, /* dst_mask */
1927 false), /* pcrel_offset */
1928
1929 /* ADD: GOT offset G(S) & 0xfff. */
1930 HOWTO (AARCH64_R (TLSDESC_ADD_LO12), /* type */
1931 0, /* rightshift */
1932 4, /* size */
1933 12, /* bitsize */
1934 false, /* pc_relative */
1935 0, /* bitpos */
1936 complain_overflow_dont,/* complain_on_overflow */
1937 bfd_elf_generic_reloc, /* special_function */
1938 AARCH64_R_STR (TLSDESC_ADD_LO12), /* name */
1939 false, /* partial_inplace */
1940 0, /* src_mask */
1941 0xfff, /* dst_mask */
1942 false), /* pcrel_offset */
1943
1944 HOWTO64 (AARCH64_R (TLSDESC_OFF_G1), /* type */
1945 16, /* rightshift */
1946 4, /* size */
1947 12, /* bitsize */
1948 false, /* pc_relative */
1949 0, /* bitpos */
1950 complain_overflow_unsigned, /* complain_on_overflow */
1951 bfd_elf_generic_reloc, /* special_function */
1952 AARCH64_R_STR (TLSDESC_OFF_G1), /* name */
1953 false, /* partial_inplace */
1954 0, /* src_mask */
1955 0xffff, /* dst_mask */
1956 false), /* pcrel_offset */
1957
1958 HOWTO64 (AARCH64_R (TLSDESC_OFF_G0_NC), /* type */
1959 0, /* rightshift */
1960 4, /* size */
1961 12, /* bitsize */
1962 false, /* pc_relative */
1963 0, /* bitpos */
1964 complain_overflow_dont, /* complain_on_overflow */
1965 bfd_elf_generic_reloc, /* special_function */
1966 AARCH64_R_STR (TLSDESC_OFF_G0_NC), /* name */
1967 false, /* partial_inplace */
1968 0, /* src_mask */
1969 0xffff, /* dst_mask */
1970 false), /* pcrel_offset */
1971
1972 HOWTO64 (AARCH64_R (TLSDESC_LDR), /* type */
1973 0, /* rightshift */
1974 4, /* size */
1975 12, /* bitsize */
1976 false, /* pc_relative */
1977 0, /* bitpos */
1978 complain_overflow_dont, /* complain_on_overflow */
1979 bfd_elf_generic_reloc, /* special_function */
1980 AARCH64_R_STR (TLSDESC_LDR), /* name */
1981 false, /* partial_inplace */
1982 0x0, /* src_mask */
1983 0x0, /* dst_mask */
1984 false), /* pcrel_offset */
1985
1986 HOWTO64 (AARCH64_R (TLSDESC_ADD), /* type */
1987 0, /* rightshift */
1988 4, /* size */
1989 12, /* bitsize */
1990 false, /* pc_relative */
1991 0, /* bitpos */
1992 complain_overflow_dont, /* complain_on_overflow */
1993 bfd_elf_generic_reloc, /* special_function */
1994 AARCH64_R_STR (TLSDESC_ADD), /* name */
1995 false, /* partial_inplace */
1996 0x0, /* src_mask */
1997 0x0, /* dst_mask */
1998 false), /* pcrel_offset */
1999
2000 HOWTO (AARCH64_R (TLSDESC_CALL), /* type */
2001 0, /* rightshift */
2002 4, /* size */
2003 0, /* bitsize */
2004 false, /* pc_relative */
2005 0, /* bitpos */
2006 complain_overflow_dont, /* complain_on_overflow */
2007 bfd_elf_generic_reloc, /* special_function */
2008 AARCH64_R_STR (TLSDESC_CALL), /* name */
2009 false, /* partial_inplace */
2010 0x0, /* src_mask */
2011 0x0, /* dst_mask */
2012 false), /* pcrel_offset */
2013
2014 HOWTO (AARCH64_R (COPY), /* type */
2015 0, /* rightshift */
2016 4, /* size */
2017 64, /* bitsize */
2018 false, /* pc_relative */
2019 0, /* bitpos */
2020 complain_overflow_bitfield, /* complain_on_overflow */
2021 bfd_elf_generic_reloc, /* special_function */
2022 AARCH64_R_STR (COPY), /* name */
2023 true, /* partial_inplace */
2024 0, /* src_mask */
2025 0xffffffff, /* dst_mask */
2026 false), /* pcrel_offset */
2027
2028 HOWTO (AARCH64_R (GLOB_DAT), /* type */
2029 0, /* rightshift */
2030 4, /* size */
2031 64, /* bitsize */
2032 false, /* pc_relative */
2033 0, /* bitpos */
2034 complain_overflow_bitfield, /* complain_on_overflow */
2035 bfd_elf_generic_reloc, /* special_function */
2036 AARCH64_R_STR (GLOB_DAT), /* name */
2037 true, /* partial_inplace */
2038 0, /* src_mask */
2039 0xffffffff, /* dst_mask */
2040 false), /* pcrel_offset */
2041
2042 HOWTO (AARCH64_R (JUMP_SLOT), /* type */
2043 0, /* rightshift */
2044 4, /* size */
2045 64, /* bitsize */
2046 false, /* pc_relative */
2047 0, /* bitpos */
2048 complain_overflow_bitfield, /* complain_on_overflow */
2049 bfd_elf_generic_reloc, /* special_function */
2050 AARCH64_R_STR (JUMP_SLOT), /* name */
2051 true, /* partial_inplace */
2052 0, /* src_mask */
2053 0xffffffff, /* dst_mask */
2054 false), /* pcrel_offset */
2055
2056 HOWTO (AARCH64_R (RELATIVE), /* type */
2057 0, /* rightshift */
2058 4, /* size */
2059 64, /* bitsize */
2060 false, /* pc_relative */
2061 0, /* bitpos */
2062 complain_overflow_bitfield, /* complain_on_overflow */
2063 bfd_elf_generic_reloc, /* special_function */
2064 AARCH64_R_STR (RELATIVE), /* name */
2065 true, /* partial_inplace */
2066 0, /* src_mask */
2067 ALL_ONES, /* dst_mask */
2068 false), /* pcrel_offset */
2069
2070 HOWTO (AARCH64_R (TLS_DTPMOD), /* type */
2071 0, /* rightshift */
2072 4, /* size */
2073 64, /* bitsize */
2074 false, /* pc_relative */
2075 0, /* bitpos */
2076 complain_overflow_dont, /* complain_on_overflow */
2077 bfd_elf_generic_reloc, /* special_function */
2078 #if ARCH_SIZE == 64
2079 AARCH64_R_STR (TLS_DTPMOD64), /* name */
2080 #else
2081 AARCH64_R_STR (TLS_DTPMOD), /* name */
2082 #endif
2083 false, /* partial_inplace */
2084 0, /* src_mask */
2085 ALL_ONES, /* dst_mask */
2086 false), /* pc_reloffset */
2087
2088 HOWTO (AARCH64_R (TLS_DTPREL), /* type */
2089 0, /* rightshift */
2090 4, /* size */
2091 64, /* bitsize */
2092 false, /* pc_relative */
2093 0, /* bitpos */
2094 complain_overflow_dont, /* complain_on_overflow */
2095 bfd_elf_generic_reloc, /* special_function */
2096 #if ARCH_SIZE == 64
2097 AARCH64_R_STR (TLS_DTPREL64), /* name */
2098 #else
2099 AARCH64_R_STR (TLS_DTPREL), /* name */
2100 #endif
2101 false, /* partial_inplace */
2102 0, /* src_mask */
2103 ALL_ONES, /* dst_mask */
2104 false), /* pcrel_offset */
2105
2106 HOWTO (AARCH64_R (TLS_TPREL), /* type */
2107 0, /* rightshift */
2108 4, /* size */
2109 64, /* bitsize */
2110 false, /* pc_relative */
2111 0, /* bitpos */
2112 complain_overflow_dont, /* complain_on_overflow */
2113 bfd_elf_generic_reloc, /* special_function */
2114 #if ARCH_SIZE == 64
2115 AARCH64_R_STR (TLS_TPREL64), /* name */
2116 #else
2117 AARCH64_R_STR (TLS_TPREL), /* name */
2118 #endif
2119 false, /* partial_inplace */
2120 0, /* src_mask */
2121 ALL_ONES, /* dst_mask */
2122 false), /* pcrel_offset */
2123
2124 HOWTO (AARCH64_R (TLSDESC), /* type */
2125 0, /* rightshift */
2126 4, /* size */
2127 64, /* bitsize */
2128 false, /* pc_relative */
2129 0, /* bitpos */
2130 complain_overflow_dont, /* complain_on_overflow */
2131 bfd_elf_generic_reloc, /* special_function */
2132 AARCH64_R_STR (TLSDESC), /* name */
2133 false, /* partial_inplace */
2134 0, /* src_mask */
2135 ALL_ONES, /* dst_mask */
2136 false), /* pcrel_offset */
2137
2138 HOWTO (AARCH64_R (IRELATIVE), /* type */
2139 0, /* rightshift */
2140 4, /* size */
2141 64, /* bitsize */
2142 false, /* pc_relative */
2143 0, /* bitpos */
2144 complain_overflow_bitfield, /* complain_on_overflow */
2145 bfd_elf_generic_reloc, /* special_function */
2146 AARCH64_R_STR (IRELATIVE), /* name */
2147 false, /* partial_inplace */
2148 0, /* src_mask */
2149 ALL_ONES, /* dst_mask */
2150 false), /* pcrel_offset */
2151
2152 EMPTY_HOWTO (0),
2153 };
2154
2155 static reloc_howto_type elfNN_aarch64_howto_none =
2156 HOWTO (R_AARCH64_NONE, /* type */
2157 0, /* rightshift */
2158 0, /* size */
2159 0, /* bitsize */
2160 false, /* pc_relative */
2161 0, /* bitpos */
2162 complain_overflow_dont,/* complain_on_overflow */
2163 bfd_elf_generic_reloc, /* special_function */
2164 "R_AARCH64_NONE", /* name */
2165 false, /* partial_inplace */
2166 0, /* src_mask */
2167 0, /* dst_mask */
2168 false); /* pcrel_offset */
2169
2170 /* Given HOWTO, return the bfd internal relocation enumerator. */
2171
2172 static bfd_reloc_code_real_type
2173 elfNN_aarch64_bfd_reloc_from_howto (reloc_howto_type *howto)
2174 {
2175 const int size
2176 = (int) ARRAY_SIZE (elfNN_aarch64_howto_table);
2177 const ptrdiff_t offset
2178 = howto - elfNN_aarch64_howto_table;
2179
2180 if (offset > 0 && offset < size - 1)
2181 return BFD_RELOC_AARCH64_RELOC_START + offset;
2182
2183 if (howto == &elfNN_aarch64_howto_none)
2184 return BFD_RELOC_AARCH64_NONE;
2185
2186 return BFD_RELOC_AARCH64_RELOC_START;
2187 }
2188
2189 /* Given R_TYPE, return the bfd internal relocation enumerator. */
2190
2191 static bfd_reloc_code_real_type
2192 elfNN_aarch64_bfd_reloc_from_type (bfd *abfd, unsigned int r_type)
2193 {
2194 static bool initialized_p = false;
2195 /* Indexed by R_TYPE, values are offsets in the howto_table. */
2196 static unsigned int offsets[R_AARCH64_end];
2197
2198 if (!initialized_p)
2199 {
2200 unsigned int i;
2201
2202 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
2203 if (elfNN_aarch64_howto_table[i].type != 0)
2204 offsets[elfNN_aarch64_howto_table[i].type] = i;
2205
2206 initialized_p = true;
2207 }
2208
2209 if (r_type == R_AARCH64_NONE || r_type == R_AARCH64_NULL)
2210 return BFD_RELOC_AARCH64_NONE;
2211
2212 /* PR 17512: file: b371e70a. */
2213 if (r_type >= R_AARCH64_end)
2214 {
2215 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
2216 abfd, r_type);
2217 bfd_set_error (bfd_error_bad_value);
2218 return BFD_RELOC_AARCH64_NONE;
2219 }
2220
2221 return BFD_RELOC_AARCH64_RELOC_START + offsets[r_type];
2222 }
2223
2224 struct elf_aarch64_reloc_map
2225 {
2226 bfd_reloc_code_real_type from;
2227 bfd_reloc_code_real_type to;
2228 };
2229
2230 /* Map bfd generic reloc to AArch64-specific reloc. */
2231 static const struct elf_aarch64_reloc_map elf_aarch64_reloc_map[] =
2232 {
2233 {BFD_RELOC_NONE, BFD_RELOC_AARCH64_NONE},
2234
2235 /* Basic data relocations. */
2236 {BFD_RELOC_CTOR, BFD_RELOC_AARCH64_NN},
2237 {BFD_RELOC_64, BFD_RELOC_AARCH64_64},
2238 {BFD_RELOC_32, BFD_RELOC_AARCH64_32},
2239 {BFD_RELOC_16, BFD_RELOC_AARCH64_16},
2240 {BFD_RELOC_64_PCREL, BFD_RELOC_AARCH64_64_PCREL},
2241 {BFD_RELOC_32_PCREL, BFD_RELOC_AARCH64_32_PCREL},
2242 {BFD_RELOC_16_PCREL, BFD_RELOC_AARCH64_16_PCREL},
2243 };
2244
2245 /* Given the bfd internal relocation enumerator in CODE, return the
2246 corresponding howto entry. */
2247
2248 static reloc_howto_type *
2249 elfNN_aarch64_howto_from_bfd_reloc (bfd_reloc_code_real_type code)
2250 {
2251 unsigned int i;
2252
2253 /* Convert bfd generic reloc to AArch64-specific reloc. */
2254 if (code < BFD_RELOC_AARCH64_RELOC_START
2255 || code > BFD_RELOC_AARCH64_RELOC_END)
2256 for (i = 0; i < ARRAY_SIZE (elf_aarch64_reloc_map); i++)
2257 if (elf_aarch64_reloc_map[i].from == code)
2258 {
2259 code = elf_aarch64_reloc_map[i].to;
2260 break;
2261 }
2262
2263 if (code > BFD_RELOC_AARCH64_RELOC_START
2264 && code < BFD_RELOC_AARCH64_RELOC_END)
2265 if (elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START].type)
2266 return &elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START];
2267
2268 if (code == BFD_RELOC_AARCH64_NONE)
2269 return &elfNN_aarch64_howto_none;
2270
2271 return NULL;
2272 }
2273
2274 static reloc_howto_type *
2275 elfNN_aarch64_howto_from_type (bfd *abfd, unsigned int r_type)
2276 {
2277 bfd_reloc_code_real_type val;
2278 reloc_howto_type *howto;
2279
2280 #if ARCH_SIZE == 32
2281 if (r_type > 256)
2282 {
2283 bfd_set_error (bfd_error_bad_value);
2284 return NULL;
2285 }
2286 #endif
2287
2288 if (r_type == R_AARCH64_NONE)
2289 return &elfNN_aarch64_howto_none;
2290
2291 val = elfNN_aarch64_bfd_reloc_from_type (abfd, r_type);
2292 howto = elfNN_aarch64_howto_from_bfd_reloc (val);
2293
2294 if (howto != NULL)
2295 return howto;
2296
2297 bfd_set_error (bfd_error_bad_value);
2298 return NULL;
2299 }
2300
2301 static bool
2302 elfNN_aarch64_info_to_howto (bfd *abfd, arelent *bfd_reloc,
2303 Elf_Internal_Rela *elf_reloc)
2304 {
2305 unsigned int r_type;
2306
2307 r_type = ELFNN_R_TYPE (elf_reloc->r_info);
2308 bfd_reloc->howto = elfNN_aarch64_howto_from_type (abfd, r_type);
2309
2310 if (bfd_reloc->howto == NULL)
2311 {
2312 /* xgettext:c-format */
2313 _bfd_error_handler (_("%pB: unsupported relocation type %#x"), abfd, r_type);
2314 return false;
2315 }
2316 return true;
2317 }
2318
2319 static reloc_howto_type *
2320 elfNN_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2321 bfd_reloc_code_real_type code)
2322 {
2323 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (code);
2324
2325 if (howto != NULL)
2326 return howto;
2327
2328 bfd_set_error (bfd_error_bad_value);
2329 return NULL;
2330 }
2331
2332 static reloc_howto_type *
2333 elfNN_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2334 const char *r_name)
2335 {
2336 unsigned int i;
2337
2338 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
2339 if (elfNN_aarch64_howto_table[i].name != NULL
2340 && strcasecmp (elfNN_aarch64_howto_table[i].name, r_name) == 0)
2341 return &elfNN_aarch64_howto_table[i];
2342
2343 return NULL;
2344 }
2345
2346 #define TARGET_LITTLE_SYM aarch64_elfNN_le_vec
2347 #define TARGET_LITTLE_NAME "elfNN-littleaarch64"
2348 #define TARGET_BIG_SYM aarch64_elfNN_be_vec
2349 #define TARGET_BIG_NAME "elfNN-bigaarch64"
2350
2351 /* The linker script knows the section names for placement.
2352 The entry_names are used to do simple name mangling on the stubs.
2353 Given a function name, and its type, the stub can be found. The
2354 name can be changed. The only requirement is the %s be present. */
2355 #define STUB_ENTRY_NAME "__%s_veneer"
2356
2357 /* Stub name for a BTI landing stub. */
2358 #define BTI_STUB_ENTRY_NAME "__%s_bti_veneer"
2359
2360 /* The name of the dynamic interpreter. This is put in the .interp
2361 section. */
2362 #define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
2363
2364 #define AARCH64_MAX_FWD_BRANCH_OFFSET \
2365 (((1 << 25) - 1) << 2)
2366 #define AARCH64_MAX_BWD_BRANCH_OFFSET \
2367 (-((1 << 25) << 2))
2368
2369 #define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
2370 #define AARCH64_MIN_ADRP_IMM (-(1 << 20))
2371
2372 static int
2373 aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
2374 {
2375 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
2376 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
2377 }
2378
2379 static int
2380 aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
2381 {
2382 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
2383 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
2384 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
2385 }
2386
2387 static const uint32_t aarch64_adrp_branch_stub [] =
2388 {
2389 0x90000010, /* adrp ip0, X */
2390 /* R_AARCH64_ADR_HI21_PCREL(X) */
2391 0x91000210, /* add ip0, ip0, :lo12:X */
2392 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
2393 0xd61f0200, /* br ip0 */
2394 };
2395
2396 static const uint32_t aarch64_long_branch_stub[] =
2397 {
2398 #if ARCH_SIZE == 64
2399 0x58000090, /* ldr ip0, 1f */
2400 #else
2401 0x18000090, /* ldr wip0, 1f */
2402 #endif
2403 0x10000011, /* adr ip1, #0 */
2404 0x8b110210, /* add ip0, ip0, ip1 */
2405 0xd61f0200, /* br ip0 */
2406 0x00000000, /* 1: .xword or .word
2407 R_AARCH64_PRELNN(X) + 12
2408 */
2409 0x00000000,
2410 };
2411
2412 static const uint32_t aarch64_bti_direct_branch_stub[] =
2413 {
2414 0xd503245f, /* bti c */
2415 0x14000000, /* b <label> */
2416 };
2417
2418 static const uint32_t aarch64_erratum_835769_stub[] =
2419 {
2420 0x00000000, /* Placeholder for multiply accumulate. */
2421 0x14000000, /* b <label> */
2422 };
2423
2424 static const uint32_t aarch64_erratum_843419_stub[] =
2425 {
2426 0x00000000, /* Placeholder for LDR instruction. */
2427 0x14000000, /* b <label> */
2428 };
2429
2430 /* Section name for stubs is the associated section name plus this
2431 string. */
2432 #define STUB_SUFFIX ".stub"
2433
2434 enum elf_aarch64_stub_type
2435 {
2436 aarch64_stub_none,
2437 aarch64_stub_adrp_branch,
2438 aarch64_stub_long_branch,
2439 aarch64_stub_bti_direct_branch,
2440 aarch64_stub_erratum_835769_veneer,
2441 aarch64_stub_erratum_843419_veneer,
2442 };
2443
2444 struct elf_aarch64_stub_hash_entry
2445 {
2446 /* Base hash table entry structure. */
2447 struct bfd_hash_entry root;
2448
2449 /* The stub section. */
2450 asection *stub_sec;
2451
2452 /* Offset within stub_sec of the beginning of this stub. */
2453 bfd_vma stub_offset;
2454
2455 /* Given the symbol's value and its section we can determine its final
2456 value when building the stubs (so the stub knows where to jump). */
2457 bfd_vma target_value;
2458 asection *target_section;
2459
2460 enum elf_aarch64_stub_type stub_type;
2461
2462 /* The symbol table entry, if any, that this was derived from. */
2463 struct elf_aarch64_link_hash_entry *h;
2464
2465 /* Destination symbol type */
2466 unsigned char st_type;
2467
2468 /* The target is also a stub. */
2469 bool double_stub;
2470
2471 /* Where this stub is being called from, or, in the case of combined
2472 stub sections, the first input section in the group. */
2473 asection *id_sec;
2474
2475 /* The name for the local symbol at the start of this stub. The
2476 stub name in the hash table has to be unique; this does not, so
2477 it can be friendlier. */
2478 char *output_name;
2479
2480 /* The instruction which caused this stub to be generated (only valid for
2481 erratum 835769 workaround stubs at present). */
2482 uint32_t veneered_insn;
2483
2484 /* In an erratum 843419 workaround stub, the ADRP instruction offset. */
2485 bfd_vma adrp_offset;
2486 };
2487
2488 /* Used to build a map of a section. This is required for mixed-endian
2489 code/data. */
2490
2491 typedef struct elf_elf_section_map
2492 {
2493 bfd_vma vma;
2494 char type;
2495 }
2496 elf_aarch64_section_map;
2497
2498
2499 typedef struct _aarch64_elf_section_data
2500 {
2501 struct bfd_elf_section_data elf;
2502 unsigned int mapcount;
2503 unsigned int mapsize;
2504 elf_aarch64_section_map *map;
2505 }
2506 _aarch64_elf_section_data;
2507
2508 #define elf_aarch64_section_data(sec) \
2509 ((_aarch64_elf_section_data *) elf_section_data (sec))
2510
2511 /* The size of the thread control block which is defined to be two pointers. */
2512 #define TCB_SIZE (ARCH_SIZE/8)*2
2513
2514 struct elf_aarch64_local_symbol
2515 {
2516 unsigned int got_type;
2517 bfd_signed_vma got_refcount;
2518 bfd_vma got_offset;
2519
2520 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
2521 offset is from the end of the jump table and reserved entries
2522 within the PLTGOT.
2523
2524 The magic value (bfd_vma) -1 indicates that an offset has not be
2525 allocated. */
2526 bfd_vma tlsdesc_got_jump_table_offset;
2527 };
2528
2529 struct elf_aarch64_obj_tdata
2530 {
2531 struct elf_obj_tdata root;
2532
2533 /* local symbol descriptors */
2534 struct elf_aarch64_local_symbol *locals;
2535
2536 /* Zero to warn when linking objects with incompatible enum sizes. */
2537 int no_enum_size_warning;
2538
2539 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2540 int no_wchar_size_warning;
2541
2542 /* All GNU_PROPERTY_AARCH64_FEATURE_1_AND properties. */
2543 uint32_t gnu_and_prop;
2544
2545 /* Zero to warn when linking objects with incompatible
2546 GNU_PROPERTY_AARCH64_FEATURE_1_BTI. */
2547 int no_bti_warn;
2548
2549 /* PLT type based on security. */
2550 aarch64_plt_type plt_type;
2551 };
2552
2553 #define elf_aarch64_tdata(bfd) \
2554 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
2555
2556 #define elf_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
2557
2558 #define is_aarch64_elf(bfd) \
2559 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2560 && elf_tdata (bfd) != NULL \
2561 && elf_object_id (bfd) == AARCH64_ELF_DATA)
2562
2563 static bool
2564 elfNN_aarch64_mkobject (bfd *abfd)
2565 {
2566 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
2567 AARCH64_ELF_DATA);
2568 }
2569
2570 #define elf_aarch64_hash_entry(ent) \
2571 ((struct elf_aarch64_link_hash_entry *)(ent))
2572
2573 #define GOT_UNKNOWN 0
2574 #define GOT_NORMAL 1
2575 #define GOT_TLS_GD 2
2576 #define GOT_TLS_IE 4
2577 #define GOT_TLSDESC_GD 8
2578
2579 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
2580
2581 /* AArch64 ELF linker hash entry. */
2582 struct elf_aarch64_link_hash_entry
2583 {
2584 struct elf_link_hash_entry root;
2585
2586 /* Since PLT entries have variable size, we need to record the
2587 index into .got.plt instead of recomputing it from the PLT
2588 offset. */
2589 bfd_signed_vma plt_got_offset;
2590
2591 /* Bit mask representing the type of GOT entry(s) if any required by
2592 this symbol. */
2593 unsigned int got_type;
2594
2595 /* TRUE if symbol is defined as a protected symbol. */
2596 unsigned int def_protected : 1;
2597
2598 /* A pointer to the most recently used stub hash entry against this
2599 symbol. */
2600 struct elf_aarch64_stub_hash_entry *stub_cache;
2601
2602 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
2603 is from the end of the jump table and reserved entries within the PLTGOT.
2604
2605 The magic value (bfd_vma) -1 indicates that an offset has not
2606 be allocated. */
2607 bfd_vma tlsdesc_got_jump_table_offset;
2608 };
2609
2610 static unsigned int
2611 elfNN_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
2612 bfd *abfd,
2613 unsigned long r_symndx)
2614 {
2615 if (h)
2616 return elf_aarch64_hash_entry (h)->got_type;
2617
2618 if (! elf_aarch64_locals (abfd))
2619 return GOT_UNKNOWN;
2620
2621 return elf_aarch64_locals (abfd)[r_symndx].got_type;
2622 }
2623
2624 /* Get the AArch64 elf linker hash table from a link_info structure. */
2625 #define elf_aarch64_hash_table(info) \
2626 ((struct elf_aarch64_link_hash_table *) ((info)->hash))
2627
2628 #define aarch64_stub_hash_lookup(table, string, create, copy) \
2629 ((struct elf_aarch64_stub_hash_entry *) \
2630 bfd_hash_lookup ((table), (string), (create), (copy)))
2631
2632 /* AArch64 ELF linker hash table. */
2633 struct elf_aarch64_link_hash_table
2634 {
2635 /* The main hash table. */
2636 struct elf_link_hash_table root;
2637
2638 /* Nonzero to force PIC branch veneers. */
2639 int pic_veneer;
2640
2641 /* Fix erratum 835769. */
2642 int fix_erratum_835769;
2643
2644 /* Fix erratum 843419. */
2645 erratum_84319_opts fix_erratum_843419;
2646
2647 /* Don't apply link-time values for dynamic relocations. */
2648 int no_apply_dynamic_relocs;
2649
2650 /* The number of bytes in the initial entry in the PLT. */
2651 bfd_size_type plt_header_size;
2652
2653 /* The bytes of the initial PLT entry. */
2654 const bfd_byte *plt0_entry;
2655
2656 /* The number of bytes in the subsequent PLT entries. */
2657 bfd_size_type plt_entry_size;
2658
2659 /* The bytes of the subsequent PLT entry. */
2660 const bfd_byte *plt_entry;
2661
2662 /* For convenience in allocate_dynrelocs. */
2663 bfd *obfd;
2664
2665 /* The amount of space used by the reserved portion of the sgotplt
2666 section, plus whatever space is used by the jump slots. */
2667 bfd_vma sgotplt_jump_table_size;
2668
2669 /* The stub hash table. */
2670 struct bfd_hash_table stub_hash_table;
2671
2672 /* Linker stub bfd. */
2673 bfd *stub_bfd;
2674
2675 /* Linker call-backs. */
2676 asection *(*add_stub_section) (const char *, asection *);
2677 void (*layout_sections_again) (void);
2678
2679 /* Array to keep track of which stub sections have been created, and
2680 information on stub grouping. */
2681 struct map_stub
2682 {
2683 /* This is the section to which stubs in the group will be
2684 attached. */
2685 asection *link_sec;
2686 /* The stub section. */
2687 asection *stub_sec;
2688 } *stub_group;
2689
2690 /* Assorted information used by elfNN_aarch64_size_stubs. */
2691 unsigned int bfd_count;
2692 unsigned int top_index;
2693 asection **input_list;
2694
2695 /* True when two stubs are added where one targets the other, happens
2696 when BTI stubs are inserted and then the stub layout must not change
2697 during elfNN_aarch64_build_stubs. */
2698 bool has_double_stub;
2699
2700 /* JUMP_SLOT relocs for variant PCS symbols may be present. */
2701 int variant_pcs;
2702
2703 /* The number of bytes in the PLT enty for the TLS descriptor. */
2704 bfd_size_type tlsdesc_plt_entry_size;
2705
2706 /* Used by local STT_GNU_IFUNC symbols. */
2707 htab_t loc_hash_table;
2708 void * loc_hash_memory;
2709 };
2710
2711 /* Create an entry in an AArch64 ELF linker hash table. */
2712
2713 static struct bfd_hash_entry *
2714 elfNN_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
2715 struct bfd_hash_table *table,
2716 const char *string)
2717 {
2718 struct elf_aarch64_link_hash_entry *ret =
2719 (struct elf_aarch64_link_hash_entry *) entry;
2720
2721 /* Allocate the structure if it has not already been allocated by a
2722 subclass. */
2723 if (ret == NULL)
2724 ret = bfd_hash_allocate (table,
2725 sizeof (struct elf_aarch64_link_hash_entry));
2726 if (ret == NULL)
2727 return (struct bfd_hash_entry *) ret;
2728
2729 /* Call the allocation method of the superclass. */
2730 ret = ((struct elf_aarch64_link_hash_entry *)
2731 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2732 table, string));
2733 if (ret != NULL)
2734 {
2735 ret->got_type = GOT_UNKNOWN;
2736 ret->def_protected = 0;
2737 ret->plt_got_offset = (bfd_vma) - 1;
2738 ret->stub_cache = NULL;
2739 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
2740 }
2741
2742 return (struct bfd_hash_entry *) ret;
2743 }
2744
2745 /* Initialize an entry in the stub hash table. */
2746
2747 static struct bfd_hash_entry *
2748 stub_hash_newfunc (struct bfd_hash_entry *entry,
2749 struct bfd_hash_table *table, const char *string)
2750 {
2751 /* Allocate the structure if it has not already been allocated by a
2752 subclass. */
2753 if (entry == NULL)
2754 {
2755 entry = bfd_hash_allocate (table,
2756 sizeof (struct
2757 elf_aarch64_stub_hash_entry));
2758 if (entry == NULL)
2759 return entry;
2760 }
2761
2762 /* Call the allocation method of the superclass. */
2763 entry = bfd_hash_newfunc (entry, table, string);
2764 if (entry != NULL)
2765 {
2766 struct elf_aarch64_stub_hash_entry *eh;
2767
2768 /* Initialize the local fields. */
2769 eh = (struct elf_aarch64_stub_hash_entry *) entry;
2770 memset (&eh->stub_sec, 0,
2771 (sizeof (struct elf_aarch64_stub_hash_entry)
2772 - offsetof (struct elf_aarch64_stub_hash_entry, stub_sec)));
2773 }
2774
2775 return entry;
2776 }
2777
2778 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
2779 for local symbol so that we can handle local STT_GNU_IFUNC symbols
2780 as global symbol. We reuse indx and dynstr_index for local symbol
2781 hash since they aren't used by global symbols in this backend. */
2782
2783 static hashval_t
2784 elfNN_aarch64_local_htab_hash (const void *ptr)
2785 {
2786 struct elf_link_hash_entry *h
2787 = (struct elf_link_hash_entry *) ptr;
2788 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
2789 }
2790
2791 /* Compare local hash entries. */
2792
2793 static int
2794 elfNN_aarch64_local_htab_eq (const void *ptr1, const void *ptr2)
2795 {
2796 struct elf_link_hash_entry *h1
2797 = (struct elf_link_hash_entry *) ptr1;
2798 struct elf_link_hash_entry *h2
2799 = (struct elf_link_hash_entry *) ptr2;
2800
2801 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
2802 }
2803
2804 /* Find and/or create a hash entry for local symbol. */
2805
2806 static struct elf_link_hash_entry *
2807 elfNN_aarch64_get_local_sym_hash (struct elf_aarch64_link_hash_table *htab,
2808 bfd *abfd, const Elf_Internal_Rela *rel,
2809 bool create)
2810 {
2811 struct elf_aarch64_link_hash_entry e, *ret;
2812 asection *sec = abfd->sections;
2813 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
2814 ELFNN_R_SYM (rel->r_info));
2815 void **slot;
2816
2817 e.root.indx = sec->id;
2818 e.root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2819 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
2820 create ? INSERT : NO_INSERT);
2821
2822 if (!slot)
2823 return NULL;
2824
2825 if (*slot)
2826 {
2827 ret = (struct elf_aarch64_link_hash_entry *) *slot;
2828 return &ret->root;
2829 }
2830
2831 ret = (struct elf_aarch64_link_hash_entry *)
2832 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
2833 sizeof (struct elf_aarch64_link_hash_entry));
2834 if (ret)
2835 {
2836 memset (ret, 0, sizeof (*ret));
2837 ret->root.indx = sec->id;
2838 ret->root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2839 ret->root.dynindx = -1;
2840 *slot = ret;
2841 }
2842 return &ret->root;
2843 }
2844
2845 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2846
2847 static void
2848 elfNN_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
2849 struct elf_link_hash_entry *dir,
2850 struct elf_link_hash_entry *ind)
2851 {
2852 struct elf_aarch64_link_hash_entry *edir, *eind;
2853
2854 edir = (struct elf_aarch64_link_hash_entry *) dir;
2855 eind = (struct elf_aarch64_link_hash_entry *) ind;
2856
2857 if (ind->root.type == bfd_link_hash_indirect)
2858 {
2859 /* Copy over PLT info. */
2860 if (dir->got.refcount <= 0)
2861 {
2862 edir->got_type = eind->got_type;
2863 eind->got_type = GOT_UNKNOWN;
2864 }
2865 }
2866
2867 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2868 }
2869
2870 /* Merge non-visibility st_other attributes. */
2871
2872 static void
2873 elfNN_aarch64_merge_symbol_attribute (struct elf_link_hash_entry *h,
2874 unsigned int st_other,
2875 bool definition,
2876 bool dynamic ATTRIBUTE_UNUSED)
2877 {
2878 if (definition)
2879 {
2880 struct elf_aarch64_link_hash_entry *eh
2881 = (struct elf_aarch64_link_hash_entry *)h;
2882 eh->def_protected = ELF_ST_VISIBILITY (st_other) == STV_PROTECTED;
2883 }
2884
2885 unsigned int isym_sto = st_other & ~ELF_ST_VISIBILITY (-1);
2886 unsigned int h_sto = h->other & ~ELF_ST_VISIBILITY (-1);
2887
2888 if (isym_sto == h_sto)
2889 return;
2890
2891 if (isym_sto & ~STO_AARCH64_VARIANT_PCS)
2892 /* Not fatal, this callback cannot fail. */
2893 _bfd_error_handler (_("unknown attribute for symbol `%s': 0x%02x"),
2894 h->root.root.string, isym_sto);
2895
2896 /* Note: Ideally we would warn about any attribute mismatch, but
2897 this api does not allow that without substantial changes. */
2898 if (isym_sto & STO_AARCH64_VARIANT_PCS)
2899 h->other |= STO_AARCH64_VARIANT_PCS;
2900 }
2901
2902 /* Destroy an AArch64 elf linker hash table. */
2903
2904 static void
2905 elfNN_aarch64_link_hash_table_free (bfd *obfd)
2906 {
2907 struct elf_aarch64_link_hash_table *ret
2908 = (struct elf_aarch64_link_hash_table *) obfd->link.hash;
2909
2910 if (ret->loc_hash_table)
2911 htab_delete (ret->loc_hash_table);
2912 if (ret->loc_hash_memory)
2913 objalloc_free ((struct objalloc *) ret->loc_hash_memory);
2914
2915 bfd_hash_table_free (&ret->stub_hash_table);
2916 _bfd_elf_link_hash_table_free (obfd);
2917 }
2918
2919 /* Create an AArch64 elf linker hash table. */
2920
2921 static struct bfd_link_hash_table *
2922 elfNN_aarch64_link_hash_table_create (bfd *abfd)
2923 {
2924 struct elf_aarch64_link_hash_table *ret;
2925 size_t amt = sizeof (struct elf_aarch64_link_hash_table);
2926
2927 ret = bfd_zmalloc (amt);
2928 if (ret == NULL)
2929 return NULL;
2930
2931 if (!_bfd_elf_link_hash_table_init
2932 (&ret->root, abfd, elfNN_aarch64_link_hash_newfunc,
2933 sizeof (struct elf_aarch64_link_hash_entry), AARCH64_ELF_DATA))
2934 {
2935 free (ret);
2936 return NULL;
2937 }
2938
2939 ret->plt_header_size = PLT_ENTRY_SIZE;
2940 ret->plt0_entry = elfNN_aarch64_small_plt0_entry;
2941 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
2942 ret->plt_entry = elfNN_aarch64_small_plt_entry;
2943 ret->tlsdesc_plt_entry_size = PLT_TLSDESC_ENTRY_SIZE;
2944 ret->obfd = abfd;
2945 ret->root.tlsdesc_got = (bfd_vma) - 1;
2946
2947 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2948 sizeof (struct elf_aarch64_stub_hash_entry)))
2949 {
2950 _bfd_elf_link_hash_table_free (abfd);
2951 return NULL;
2952 }
2953
2954 ret->loc_hash_table = htab_try_create (1024,
2955 elfNN_aarch64_local_htab_hash,
2956 elfNN_aarch64_local_htab_eq,
2957 NULL);
2958 ret->loc_hash_memory = objalloc_create ();
2959 if (!ret->loc_hash_table || !ret->loc_hash_memory)
2960 {
2961 elfNN_aarch64_link_hash_table_free (abfd);
2962 return NULL;
2963 }
2964 ret->root.root.hash_table_free = elfNN_aarch64_link_hash_table_free;
2965
2966 return &ret->root.root;
2967 }
2968
2969 /* Perform relocation R_TYPE. Returns TRUE upon success, FALSE otherwise. */
2970
2971 static bool
2972 aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
2973 bfd_vma offset, bfd_vma value)
2974 {
2975 reloc_howto_type *howto;
2976 bfd_vma place;
2977
2978 howto = elfNN_aarch64_howto_from_type (input_bfd, r_type);
2979 place = (input_section->output_section->vma + input_section->output_offset
2980 + offset);
2981
2982 r_type = elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type);
2983 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, r_type, place,
2984 value, 0, false);
2985 return _bfd_aarch64_elf_put_addend (input_bfd,
2986 input_section->contents + offset, r_type,
2987 howto, value) == bfd_reloc_ok;
2988 }
2989
2990 /* Determine the type of stub needed, if any, for a call. */
2991
2992 static enum elf_aarch64_stub_type
2993 aarch64_type_of_stub (asection *input_sec,
2994 const Elf_Internal_Rela *rel,
2995 asection *sym_sec,
2996 unsigned char st_type,
2997 bfd_vma destination)
2998 {
2999 bfd_vma location;
3000 bfd_signed_vma branch_offset;
3001 unsigned int r_type;
3002 enum elf_aarch64_stub_type stub_type = aarch64_stub_none;
3003
3004 if (st_type != STT_FUNC
3005 && (sym_sec == input_sec))
3006 return stub_type;
3007
3008 /* Determine where the call point is. */
3009 location = (input_sec->output_offset
3010 + input_sec->output_section->vma + rel->r_offset);
3011
3012 branch_offset = (bfd_signed_vma) (destination - location);
3013
3014 r_type = ELFNN_R_TYPE (rel->r_info);
3015
3016 /* We don't want to redirect any old unconditional jump in this way,
3017 only one which is being used for a sibcall, where it is
3018 acceptable for the IP0 and IP1 registers to be clobbered. */
3019 if ((r_type == AARCH64_R (CALL26) || r_type == AARCH64_R (JUMP26))
3020 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
3021 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
3022 {
3023 stub_type = aarch64_stub_long_branch;
3024 }
3025
3026 return stub_type;
3027 }
3028
3029 /* Build a name for an entry in the stub hash table. */
3030
3031 static char *
3032 elfNN_aarch64_stub_name (const asection *input_section,
3033 const asection *sym_sec,
3034 const struct elf_aarch64_link_hash_entry *hash,
3035 const Elf_Internal_Rela *rel)
3036 {
3037 char *stub_name;
3038 bfd_size_type len;
3039
3040 if (hash)
3041 {
3042 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
3043 stub_name = bfd_malloc (len);
3044 if (stub_name != NULL)
3045 snprintf (stub_name, len, "%08x_%s+%" PRIx64,
3046 (unsigned int) input_section->id,
3047 hash->root.root.root.string,
3048 (uint64_t) rel->r_addend);
3049 }
3050 else
3051 {
3052 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
3053 stub_name = bfd_malloc (len);
3054 if (stub_name != NULL)
3055 snprintf (stub_name, len, "%08x_%x:%x+%" PRIx64,
3056 (unsigned int) input_section->id,
3057 (unsigned int) sym_sec->id,
3058 (unsigned int) ELFNN_R_SYM (rel->r_info),
3059 (uint64_t) rel->r_addend);
3060 }
3061
3062 return stub_name;
3063 }
3064
3065 /* Return TRUE if symbol H should be hashed in the `.gnu.hash' section. For
3066 executable PLT slots where the executable never takes the address of those
3067 functions, the function symbols are not added to the hash table. */
3068
3069 static bool
3070 elf_aarch64_hash_symbol (struct elf_link_hash_entry *h)
3071 {
3072 if (h->plt.offset != (bfd_vma) -1
3073 && !h->def_regular
3074 && !h->pointer_equality_needed)
3075 return false;
3076
3077 return _bfd_elf_hash_symbol (h);
3078 }
3079
3080
3081 /* Look up an entry in the stub hash. Stub entries are cached because
3082 creating the stub name takes a bit of time. */
3083
3084 static struct elf_aarch64_stub_hash_entry *
3085 elfNN_aarch64_get_stub_entry (const asection *input_section,
3086 const asection *sym_sec,
3087 struct elf_link_hash_entry *hash,
3088 const Elf_Internal_Rela *rel,
3089 struct elf_aarch64_link_hash_table *htab)
3090 {
3091 struct elf_aarch64_stub_hash_entry *stub_entry;
3092 struct elf_aarch64_link_hash_entry *h =
3093 (struct elf_aarch64_link_hash_entry *) hash;
3094 const asection *id_sec;
3095
3096 if ((input_section->flags & SEC_CODE) == 0)
3097 return NULL;
3098
3099 /* If this input section is part of a group of sections sharing one
3100 stub section, then use the id of the first section in the group.
3101 Stub names need to include a section id, as there may well be
3102 more than one stub used to reach say, printf, and we need to
3103 distinguish between them. */
3104 id_sec = htab->stub_group[input_section->id].link_sec;
3105
3106 if (h != NULL && h->stub_cache != NULL
3107 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
3108 {
3109 stub_entry = h->stub_cache;
3110 }
3111 else
3112 {
3113 char *stub_name;
3114
3115 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, h, rel);
3116 if (stub_name == NULL)
3117 return NULL;
3118
3119 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
3120 stub_name, false, false);
3121 if (h != NULL)
3122 h->stub_cache = stub_entry;
3123
3124 free (stub_name);
3125 }
3126
3127 return stub_entry;
3128 }
3129
3130
3131 /* Create a stub section. */
3132
3133 static asection *
3134 _bfd_aarch64_create_stub_section (asection *section,
3135 struct elf_aarch64_link_hash_table *htab)
3136 {
3137 size_t namelen;
3138 bfd_size_type len;
3139 char *s_name;
3140
3141 namelen = strlen (section->name);
3142 len = namelen + sizeof (STUB_SUFFIX);
3143 s_name = bfd_alloc (htab->stub_bfd, len);
3144 if (s_name == NULL)
3145 return NULL;
3146
3147 memcpy (s_name, section->name, namelen);
3148 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3149 return (*htab->add_stub_section) (s_name, section);
3150 }
3151
3152
3153 /* Find or create a stub section for a link section.
3154
3155 Fix or create the stub section used to collect stubs attached to
3156 the specified link section. */
3157
3158 static asection *
3159 _bfd_aarch64_get_stub_for_link_section (asection *link_section,
3160 struct elf_aarch64_link_hash_table *htab)
3161 {
3162 if (htab->stub_group[link_section->id].stub_sec == NULL)
3163 htab->stub_group[link_section->id].stub_sec
3164 = _bfd_aarch64_create_stub_section (link_section, htab);
3165 return htab->stub_group[link_section->id].stub_sec;
3166 }
3167
3168
3169 /* Find or create a stub section in the stub group for an input
3170 section. */
3171
3172 static asection *
3173 _bfd_aarch64_create_or_find_stub_sec (asection *section,
3174 struct elf_aarch64_link_hash_table *htab)
3175 {
3176 asection *link_sec = htab->stub_group[section->id].link_sec;
3177 return _bfd_aarch64_get_stub_for_link_section (link_sec, htab);
3178 }
3179
3180
3181 /* Add a new stub entry in the stub group associated with an input
3182 section to the stub hash. Not all fields of the new stub entry are
3183 initialised. */
3184
3185 static struct elf_aarch64_stub_hash_entry *
3186 _bfd_aarch64_add_stub_entry_in_group (const char *stub_name,
3187 asection *section,
3188 struct elf_aarch64_link_hash_table *htab)
3189 {
3190 asection *link_sec;
3191 asection *stub_sec;
3192 struct elf_aarch64_stub_hash_entry *stub_entry;
3193
3194 link_sec = htab->stub_group[section->id].link_sec;
3195 stub_sec = _bfd_aarch64_create_or_find_stub_sec (section, htab);
3196
3197 /* Enter this entry into the linker stub hash table. */
3198 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3199 true, false);
3200 if (stub_entry == NULL)
3201 {
3202 /* xgettext:c-format */
3203 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
3204 section->owner, stub_name);
3205 return NULL;
3206 }
3207
3208 stub_entry->stub_sec = stub_sec;
3209 stub_entry->stub_offset = 0;
3210 stub_entry->id_sec = link_sec;
3211
3212 return stub_entry;
3213 }
3214
3215 /* Add a new stub entry in the final stub section to the stub hash.
3216 Not all fields of the new stub entry are initialised. */
3217
3218 static struct elf_aarch64_stub_hash_entry *
3219 _bfd_aarch64_add_stub_entry_after (const char *stub_name,
3220 asection *link_section,
3221 struct elf_aarch64_link_hash_table *htab)
3222 {
3223 asection *stub_sec;
3224 struct elf_aarch64_stub_hash_entry *stub_entry;
3225
3226 stub_sec = NULL;
3227 /* Only create the actual stub if we will end up needing it. */
3228 if (htab->fix_erratum_843419 & ERRAT_ADRP)
3229 stub_sec = _bfd_aarch64_get_stub_for_link_section (link_section, htab);
3230 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3231 true, false);
3232 if (stub_entry == NULL)
3233 {
3234 _bfd_error_handler (_("cannot create stub entry %s"), stub_name);
3235 return NULL;
3236 }
3237
3238 stub_entry->stub_sec = stub_sec;
3239 stub_entry->stub_offset = 0;
3240 stub_entry->id_sec = link_section;
3241
3242 return stub_entry;
3243 }
3244
3245
3246 static bool
3247 aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
3248 void *in_arg)
3249 {
3250 struct elf_aarch64_stub_hash_entry *stub_entry;
3251 asection *stub_sec;
3252 bfd *stub_bfd;
3253 bfd_byte *loc;
3254 bfd_vma sym_value;
3255 bfd_vma veneered_insn_loc;
3256 bfd_vma veneer_entry_loc;
3257 bfd_signed_vma branch_offset = 0;
3258 unsigned int template_size;
3259 unsigned int pad_size = 0;
3260 const uint32_t *template;
3261 unsigned int i;
3262 struct bfd_link_info *info;
3263 struct elf_aarch64_link_hash_table *htab;
3264
3265 /* Massage our args to the form they really have. */
3266 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
3267
3268 info = (struct bfd_link_info *) in_arg;
3269 htab = elf_aarch64_hash_table (info);
3270
3271 /* Fail if the target section could not be assigned to an output
3272 section. The user should fix his linker script. */
3273 if (stub_entry->target_section->output_section == NULL
3274 && info->non_contiguous_regions)
3275 info->callbacks->einfo (_("%F%P: Could not assign `%pA' to an output section. "
3276 "Retry without "
3277 "--enable-non-contiguous-regions.\n"),
3278 stub_entry->target_section);
3279
3280 stub_sec = stub_entry->stub_sec;
3281
3282 /* The layout must not change when a stub may be the target of another. */
3283 if (htab->has_double_stub)
3284 BFD_ASSERT (stub_entry->stub_offset == stub_sec->size);
3285
3286 /* Make a note of the offset within the stubs for this entry. */
3287 stub_entry->stub_offset = stub_sec->size;
3288 loc = stub_sec->contents + stub_entry->stub_offset;
3289
3290 stub_bfd = stub_sec->owner;
3291
3292 /* This is the address of the stub destination. */
3293 sym_value = (stub_entry->target_value
3294 + stub_entry->target_section->output_offset
3295 + stub_entry->target_section->output_section->vma);
3296
3297 if (stub_entry->stub_type == aarch64_stub_long_branch)
3298 {
3299 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
3300 + stub_sec->output_offset);
3301
3302 /* See if we can relax the stub. */
3303 if (aarch64_valid_for_adrp_p (sym_value, place))
3304 {
3305 stub_entry->stub_type = aarch64_stub_adrp_branch;
3306
3307 /* Avoid the relaxation changing the layout. */
3308 if (htab->has_double_stub)
3309 pad_size = sizeof (aarch64_long_branch_stub)
3310 - sizeof (aarch64_adrp_branch_stub);
3311 }
3312 }
3313
3314 switch (stub_entry->stub_type)
3315 {
3316 case aarch64_stub_adrp_branch:
3317 template = aarch64_adrp_branch_stub;
3318 template_size = sizeof (aarch64_adrp_branch_stub);
3319 break;
3320 case aarch64_stub_long_branch:
3321 template = aarch64_long_branch_stub;
3322 template_size = sizeof (aarch64_long_branch_stub);
3323 break;
3324 case aarch64_stub_bti_direct_branch:
3325 template = aarch64_bti_direct_branch_stub;
3326 template_size = sizeof (aarch64_bti_direct_branch_stub);
3327 break;
3328 case aarch64_stub_erratum_835769_veneer:
3329 template = aarch64_erratum_835769_stub;
3330 template_size = sizeof (aarch64_erratum_835769_stub);
3331 break;
3332 case aarch64_stub_erratum_843419_veneer:
3333 template = aarch64_erratum_843419_stub;
3334 template_size = sizeof (aarch64_erratum_843419_stub);
3335 break;
3336 default:
3337 abort ();
3338 }
3339
3340 for (i = 0; i < (template_size / sizeof template[0]); i++)
3341 {
3342 bfd_putl32 (template[i], loc);
3343 loc += 4;
3344 }
3345
3346 template_size += pad_size;
3347 template_size = (template_size + 7) & ~7;
3348 stub_sec->size += template_size;
3349
3350 switch (stub_entry->stub_type)
3351 {
3352 case aarch64_stub_adrp_branch:
3353 if (!aarch64_relocate (AARCH64_R (ADR_PREL_PG_HI21), stub_bfd, stub_sec,
3354 stub_entry->stub_offset, sym_value))
3355 /* The stub would not have been relaxed if the offset was out
3356 of range. */
3357 BFD_FAIL ();
3358
3359 if (!aarch64_relocate (AARCH64_R (ADD_ABS_LO12_NC), stub_bfd, stub_sec,
3360 stub_entry->stub_offset + 4, sym_value))
3361 BFD_FAIL ();
3362 break;
3363
3364 case aarch64_stub_long_branch:
3365 /* We want the value relative to the address 12 bytes back from the
3366 value itself. */
3367 if (!aarch64_relocate (AARCH64_R (PRELNN), stub_bfd, stub_sec,
3368 stub_entry->stub_offset + 16, sym_value + 12))
3369 BFD_FAIL ();
3370 break;
3371
3372 case aarch64_stub_bti_direct_branch:
3373 if (!aarch64_relocate (AARCH64_R (JUMP26), stub_bfd, stub_sec,
3374 stub_entry->stub_offset + 4, sym_value))
3375 BFD_FAIL ();
3376 break;
3377
3378 case aarch64_stub_erratum_835769_veneer:
3379 veneered_insn_loc = stub_entry->target_section->output_section->vma
3380 + stub_entry->target_section->output_offset
3381 + stub_entry->target_value;
3382 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
3383 + stub_entry->stub_sec->output_offset
3384 + stub_entry->stub_offset;
3385 branch_offset = veneered_insn_loc - veneer_entry_loc;
3386 branch_offset >>= 2;
3387 branch_offset &= 0x3ffffff;
3388 bfd_putl32 (stub_entry->veneered_insn,
3389 stub_sec->contents + stub_entry->stub_offset);
3390 bfd_putl32 (template[1] | branch_offset,
3391 stub_sec->contents + stub_entry->stub_offset + 4);
3392 break;
3393
3394 case aarch64_stub_erratum_843419_veneer:
3395 if (!aarch64_relocate (AARCH64_R (JUMP26), stub_bfd, stub_sec,
3396 stub_entry->stub_offset + 4, sym_value + 4))
3397 BFD_FAIL ();
3398 break;
3399
3400 default:
3401 abort ();
3402 }
3403
3404 return true;
3405 }
3406
3407 /* As above, but don't actually build the stub. Just bump offset so
3408 we know stub section sizes and record the offset for each stub so
3409 a stub can target another stub (needed for BTI direct branch stub). */
3410
3411 static bool
3412 aarch64_size_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
3413 {
3414 struct elf_aarch64_stub_hash_entry *stub_entry;
3415 struct elf_aarch64_link_hash_table *htab;
3416 int size;
3417
3418 /* Massage our args to the form they really have. */
3419 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
3420 htab = (struct elf_aarch64_link_hash_table *) in_arg;
3421
3422 switch (stub_entry->stub_type)
3423 {
3424 case aarch64_stub_adrp_branch:
3425 size = sizeof (aarch64_adrp_branch_stub);
3426 break;
3427 case aarch64_stub_long_branch:
3428 size = sizeof (aarch64_long_branch_stub);
3429 break;
3430 case aarch64_stub_bti_direct_branch:
3431 size = sizeof (aarch64_bti_direct_branch_stub);
3432 break;
3433 case aarch64_stub_erratum_835769_veneer:
3434 size = sizeof (aarch64_erratum_835769_stub);
3435 break;
3436 case aarch64_stub_erratum_843419_veneer:
3437 {
3438 if (htab->fix_erratum_843419 == ERRAT_ADR)
3439 return true;
3440 size = sizeof (aarch64_erratum_843419_stub);
3441 }
3442 break;
3443 default:
3444 abort ();
3445 }
3446
3447 size = (size + 7) & ~7;
3448 stub_entry->stub_offset = stub_entry->stub_sec->size;
3449 stub_entry->stub_sec->size += size;
3450 return true;
3451 }
3452
3453 /* Output is BTI compatible. */
3454
3455 static bool
3456 elf_aarch64_bti_p (bfd *output_bfd)
3457 {
3458 uint32_t prop = elf_aarch64_tdata (output_bfd)->gnu_and_prop;
3459 return prop & GNU_PROPERTY_AARCH64_FEATURE_1_BTI;
3460 }
3461
3462 /* External entry points for sizing and building linker stubs. */
3463
3464 /* Set up various things so that we can make a list of input sections
3465 for each output section included in the link. Returns -1 on error,
3466 0 when no stubs will be needed, and 1 on success. */
3467
3468 int
3469 elfNN_aarch64_setup_section_lists (bfd *output_bfd,
3470 struct bfd_link_info *info)
3471 {
3472 bfd *input_bfd;
3473 unsigned int bfd_count;
3474 unsigned int top_id, top_index;
3475 asection *section;
3476 asection **input_list, **list;
3477 size_t amt;
3478 struct elf_aarch64_link_hash_table *htab =
3479 elf_aarch64_hash_table (info);
3480
3481 if (!is_elf_hash_table (&htab->root.root))
3482 return 0;
3483
3484 /* Count the number of input BFDs and find the top input section id. */
3485 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
3486 input_bfd != NULL; input_bfd = input_bfd->link.next)
3487 {
3488 bfd_count += 1;
3489 for (section = input_bfd->sections;
3490 section != NULL; section = section->next)
3491 {
3492 if (top_id < section->id)
3493 top_id = section->id;
3494 }
3495 }
3496 htab->bfd_count = bfd_count;
3497
3498 amt = sizeof (struct map_stub) * (top_id + 1);
3499 htab->stub_group = bfd_zmalloc (amt);
3500 if (htab->stub_group == NULL)
3501 return -1;
3502
3503 /* We can't use output_bfd->section_count here to find the top output
3504 section index as some sections may have been removed, and
3505 _bfd_strip_section_from_output doesn't renumber the indices. */
3506 for (section = output_bfd->sections, top_index = 0;
3507 section != NULL; section = section->next)
3508 {
3509 if (top_index < section->index)
3510 top_index = section->index;
3511 }
3512
3513 htab->top_index = top_index;
3514 amt = sizeof (asection *) * (top_index + 1);
3515 input_list = bfd_malloc (amt);
3516 htab->input_list = input_list;
3517 if (input_list == NULL)
3518 return -1;
3519
3520 /* For sections we aren't interested in, mark their entries with a
3521 value we can check later. */
3522 list = input_list + top_index;
3523 do
3524 *list = bfd_abs_section_ptr;
3525 while (list-- != input_list);
3526
3527 for (section = output_bfd->sections;
3528 section != NULL; section = section->next)
3529 {
3530 if ((section->flags & SEC_CODE) != 0)
3531 input_list[section->index] = NULL;
3532 }
3533
3534 return 1;
3535 }
3536
3537 /* Used by elfNN_aarch64_next_input_section and group_sections. */
3538 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
3539
3540 /* The linker repeatedly calls this function for each input section,
3541 in the order that input sections are linked into output sections.
3542 Build lists of input sections to determine groupings between which
3543 we may insert linker stubs. */
3544
3545 void
3546 elfNN_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
3547 {
3548 struct elf_aarch64_link_hash_table *htab =
3549 elf_aarch64_hash_table (info);
3550
3551 if (isec->output_section->index <= htab->top_index)
3552 {
3553 asection **list = htab->input_list + isec->output_section->index;
3554
3555 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
3556 {
3557 /* Steal the link_sec pointer for our list. */
3558 /* This happens to make the list in reverse order,
3559 which is what we want. */
3560 PREV_SEC (isec) = *list;
3561 *list = isec;
3562 }
3563 }
3564 }
3565
3566 /* See whether we can group stub sections together. Grouping stub
3567 sections may result in fewer stubs. More importantly, we need to
3568 put all .init* and .fini* stubs at the beginning of the .init or
3569 .fini output sections respectively, because glibc splits the
3570 _init and _fini functions into multiple parts. Putting a stub in
3571 the middle of a function is not a good idea. */
3572
3573 static void
3574 group_sections (struct elf_aarch64_link_hash_table *htab,
3575 bfd_size_type stub_group_size,
3576 bool stubs_always_after_branch)
3577 {
3578 asection **list = htab->input_list;
3579
3580 do
3581 {
3582 asection *tail = *list;
3583 asection *head;
3584
3585 if (tail == bfd_abs_section_ptr)
3586 continue;
3587
3588 /* Reverse the list: we must avoid placing stubs at the
3589 beginning of the section because the beginning of the text
3590 section may be required for an interrupt vector in bare metal
3591 code. */
3592 #define NEXT_SEC PREV_SEC
3593 head = NULL;
3594 while (tail != NULL)
3595 {
3596 /* Pop from tail. */
3597 asection *item = tail;
3598 tail = PREV_SEC (item);
3599
3600 /* Push on head. */
3601 NEXT_SEC (item) = head;
3602 head = item;
3603 }
3604
3605 while (head != NULL)
3606 {
3607 asection *curr;
3608 asection *next;
3609 bfd_vma stub_group_start = head->output_offset;
3610 bfd_vma end_of_next;
3611
3612 curr = head;
3613 while (NEXT_SEC (curr) != NULL)
3614 {
3615 next = NEXT_SEC (curr);
3616 end_of_next = next->output_offset + next->size;
3617 if (end_of_next - stub_group_start >= stub_group_size)
3618 /* End of NEXT is too far from start, so stop. */
3619 break;
3620 /* Add NEXT to the group. */
3621 curr = next;
3622 }
3623
3624 /* OK, the size from the start to the start of CURR is less
3625 than stub_group_size and thus can be handled by one stub
3626 section. (Or the head section is itself larger than
3627 stub_group_size, in which case we may be toast.)
3628 We should really be keeping track of the total size of
3629 stubs added here, as stubs contribute to the final output
3630 section size. */
3631 do
3632 {
3633 next = NEXT_SEC (head);
3634 /* Set up this stub group. */
3635 htab->stub_group[head->id].link_sec = curr;
3636 }
3637 while (head != curr && (head = next) != NULL);
3638
3639 /* But wait, there's more! Input sections up to stub_group_size
3640 bytes after the stub section can be handled by it too. */
3641 if (!stubs_always_after_branch)
3642 {
3643 stub_group_start = curr->output_offset + curr->size;
3644
3645 while (next != NULL)
3646 {
3647 end_of_next = next->output_offset + next->size;
3648 if (end_of_next - stub_group_start >= stub_group_size)
3649 /* End of NEXT is too far from stubs, so stop. */
3650 break;
3651 /* Add NEXT to the stub group. */
3652 head = next;
3653 next = NEXT_SEC (head);
3654 htab->stub_group[head->id].link_sec = curr;
3655 }
3656 }
3657 head = next;
3658 }
3659 }
3660 while (list++ != htab->input_list + htab->top_index);
3661
3662 free (htab->input_list);
3663 }
3664
3665 #undef PREV_SEC
3666 #undef PREV_SEC
3667
3668 #define AARCH64_HINT(insn) (((insn) & 0xfffff01f) == 0xd503201f)
3669 #define AARCH64_PACIASP 0xd503233f
3670 #define AARCH64_PACIBSP 0xd503237f
3671 #define AARCH64_BTI_C 0xd503245f
3672 #define AARCH64_BTI_J 0xd503249f
3673 #define AARCH64_BTI_JC 0xd50324df
3674
3675 /* True if the inserted stub does not break BTI compatibility. */
3676
3677 static bool
3678 aarch64_bti_stub_p (struct bfd_link_info *info,
3679 struct elf_aarch64_stub_hash_entry *stub_entry)
3680 {
3681 /* Stubs without indirect branch are BTI compatible. */
3682 if (stub_entry->stub_type != aarch64_stub_adrp_branch
3683 && stub_entry->stub_type != aarch64_stub_long_branch)
3684 return true;
3685
3686 /* Return true if the target instruction is compatible with BR x16. */
3687
3688 struct elf_aarch64_link_hash_table *globals = elf_aarch64_hash_table (info);
3689 asection *section = stub_entry->target_section;
3690 bfd_byte loc[4];
3691 file_ptr off = stub_entry->target_value;
3692 bfd_size_type count = sizeof (loc);
3693
3694 /* PLT code is not generated yet, so treat it specially.
3695 Note: Checking elf_aarch64_obj_tdata.plt_type & PLT_BTI is not
3696 enough because it only implies BTI in the PLT0 and tlsdesc PLT
3697 entries. Normal PLT entries don't have BTI in a shared library
3698 (because such PLT is normally not called indirectly and adding
3699 the BTI when a stub targets a PLT would change the PLT layout
3700 and it's too late for that here). */
3701 if (section == globals->root.splt)
3702 memcpy (loc, globals->plt_entry, count);
3703 else if (!bfd_get_section_contents (section->owner, section, loc, off, count))
3704 return false;
3705
3706 uint32_t insn = bfd_getl32 (loc);
3707 if (!AARCH64_HINT (insn))
3708 return false;
3709 return insn == AARCH64_BTI_C
3710 || insn == AARCH64_PACIASP
3711 || insn == AARCH64_BTI_JC
3712 || insn == AARCH64_BTI_J
3713 || insn == AARCH64_PACIBSP;
3714 }
3715
3716 #define AARCH64_BITS(x, pos, n) (((x) >> (pos)) & ((1 << (n)) - 1))
3717
3718 #define AARCH64_RT(insn) AARCH64_BITS (insn, 0, 5)
3719 #define AARCH64_RT2(insn) AARCH64_BITS (insn, 10, 5)
3720 #define AARCH64_RA(insn) AARCH64_BITS (insn, 10, 5)
3721 #define AARCH64_RD(insn) AARCH64_BITS (insn, 0, 5)
3722 #define AARCH64_RN(insn) AARCH64_BITS (insn, 5, 5)
3723 #define AARCH64_RM(insn) AARCH64_BITS (insn, 16, 5)
3724
3725 #define AARCH64_MAC(insn) (((insn) & 0xff000000) == 0x9b000000)
3726 #define AARCH64_BIT(insn, n) AARCH64_BITS (insn, n, 1)
3727 #define AARCH64_OP31(insn) AARCH64_BITS (insn, 21, 3)
3728 #define AARCH64_ZR 0x1f
3729
3730 /* All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
3731 LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops. */
3732
3733 #define AARCH64_LD(insn) (AARCH64_BIT (insn, 22) == 1)
3734 #define AARCH64_LDST(insn) (((insn) & 0x0a000000) == 0x08000000)
3735 #define AARCH64_LDST_EX(insn) (((insn) & 0x3f000000) == 0x08000000)
3736 #define AARCH64_LDST_PCREL(insn) (((insn) & 0x3b000000) == 0x18000000)
3737 #define AARCH64_LDST_NAP(insn) (((insn) & 0x3b800000) == 0x28000000)
3738 #define AARCH64_LDSTP_PI(insn) (((insn) & 0x3b800000) == 0x28800000)
3739 #define AARCH64_LDSTP_O(insn) (((insn) & 0x3b800000) == 0x29000000)
3740 #define AARCH64_LDSTP_PRE(insn) (((insn) & 0x3b800000) == 0x29800000)
3741 #define AARCH64_LDST_UI(insn) (((insn) & 0x3b200c00) == 0x38000000)
3742 #define AARCH64_LDST_PIIMM(insn) (((insn) & 0x3b200c00) == 0x38000400)
3743 #define AARCH64_LDST_U(insn) (((insn) & 0x3b200c00) == 0x38000800)
3744 #define AARCH64_LDST_PREIMM(insn) (((insn) & 0x3b200c00) == 0x38000c00)
3745 #define AARCH64_LDST_RO(insn) (((insn) & 0x3b200c00) == 0x38200800)
3746 #define AARCH64_LDST_UIMM(insn) (((insn) & 0x3b000000) == 0x39000000)
3747 #define AARCH64_LDST_SIMD_M(insn) (((insn) & 0xbfbf0000) == 0x0c000000)
3748 #define AARCH64_LDST_SIMD_M_PI(insn) (((insn) & 0xbfa00000) == 0x0c800000)
3749 #define AARCH64_LDST_SIMD_S(insn) (((insn) & 0xbf9f0000) == 0x0d000000)
3750 #define AARCH64_LDST_SIMD_S_PI(insn) (((insn) & 0xbf800000) == 0x0d800000)
3751
3752 /* Classify an INSN if it is indeed a load/store.
3753
3754 Return TRUE if INSN is a LD/ST instruction otherwise return FALSE.
3755
3756 For scalar LD/ST instructions PAIR is FALSE, RT is returned and RT2
3757 is set equal to RT.
3758
3759 For LD/ST pair instructions PAIR is TRUE, RT and RT2 are returned. */
3760
3761 static bool
3762 aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rt2,
3763 bool *pair, bool *load)
3764 {
3765 uint32_t opcode;
3766 unsigned int r;
3767 uint32_t opc = 0;
3768 uint32_t v = 0;
3769 uint32_t opc_v = 0;
3770
3771 /* Bail out quickly if INSN doesn't fall into the load-store
3772 encoding space. */
3773 if (!AARCH64_LDST (insn))
3774 return false;
3775
3776 *pair = false;
3777 *load = false;
3778 if (AARCH64_LDST_EX (insn))
3779 {
3780 *rt = AARCH64_RT (insn);
3781 *rt2 = *rt;
3782 if (AARCH64_BIT (insn, 21) == 1)
3783 {
3784 *pair = true;
3785 *rt2 = AARCH64_RT2 (insn);
3786 }
3787 *load = AARCH64_LD (insn);
3788 return true;
3789 }
3790 else if (AARCH64_LDST_NAP (insn)
3791 || AARCH64_LDSTP_PI (insn)
3792 || AARCH64_LDSTP_O (insn)
3793 || AARCH64_LDSTP_PRE (insn))
3794 {
3795 *pair = true;
3796 *rt = AARCH64_RT (insn);
3797 *rt2 = AARCH64_RT2 (insn);
3798 *load = AARCH64_LD (insn);
3799 return true;
3800 }
3801 else if (AARCH64_LDST_PCREL (insn)
3802 || AARCH64_LDST_UI (insn)
3803 || AARCH64_LDST_PIIMM (insn)
3804 || AARCH64_LDST_U (insn)
3805 || AARCH64_LDST_PREIMM (insn)
3806 || AARCH64_LDST_RO (insn)
3807 || AARCH64_LDST_UIMM (insn))
3808 {
3809 *rt = AARCH64_RT (insn);
3810 *rt2 = *rt;
3811 if (AARCH64_LDST_PCREL (insn))
3812 *load = true;
3813 opc = AARCH64_BITS (insn, 22, 2);
3814 v = AARCH64_BIT (insn, 26);
3815 opc_v = opc | (v << 2);
3816 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
3817 || opc_v == 5 || opc_v == 7);
3818 return true;
3819 }
3820 else if (AARCH64_LDST_SIMD_M (insn)
3821 || AARCH64_LDST_SIMD_M_PI (insn))
3822 {
3823 *rt = AARCH64_RT (insn);
3824 *load = AARCH64_BIT (insn, 22);
3825 opcode = (insn >> 12) & 0xf;
3826 switch (opcode)
3827 {
3828 case 0:
3829 case 2:
3830 *rt2 = *rt + 3;
3831 break;
3832
3833 case 4:
3834 case 6:
3835 *rt2 = *rt + 2;
3836 break;
3837
3838 case 7:
3839 *rt2 = *rt;
3840 break;
3841
3842 case 8:
3843 case 10:
3844 *rt2 = *rt + 1;
3845 break;
3846
3847 default:
3848 return false;
3849 }
3850 return true;
3851 }
3852 else if (AARCH64_LDST_SIMD_S (insn)
3853 || AARCH64_LDST_SIMD_S_PI (insn))
3854 {
3855 *rt = AARCH64_RT (insn);
3856 r = (insn >> 21) & 1;
3857 *load = AARCH64_BIT (insn, 22);
3858 opcode = (insn >> 13) & 0x7;
3859 switch (opcode)
3860 {
3861 case 0:
3862 case 2:
3863 case 4:
3864 *rt2 = *rt + r;
3865 break;
3866
3867 case 1:
3868 case 3:
3869 case 5:
3870 *rt2 = *rt + (r == 0 ? 2 : 3);
3871 break;
3872
3873 case 6:
3874 *rt2 = *rt + r;
3875 break;
3876
3877 case 7:
3878 *rt2 = *rt + (r == 0 ? 2 : 3);
3879 break;
3880
3881 default:
3882 return false;
3883 }
3884 return true;
3885 }
3886
3887 return false;
3888 }
3889
3890 /* Return TRUE if INSN is multiply-accumulate. */
3891
3892 static bool
3893 aarch64_mlxl_p (uint32_t insn)
3894 {
3895 uint32_t op31 = AARCH64_OP31 (insn);
3896
3897 if (AARCH64_MAC (insn)
3898 && (op31 == 0 || op31 == 1 || op31 == 5)
3899 /* Exclude MUL instructions which are encoded as a multiple accumulate
3900 with RA = XZR. */
3901 && AARCH64_RA (insn) != AARCH64_ZR)
3902 return true;
3903
3904 return false;
3905 }
3906
3907 /* Some early revisions of the Cortex-A53 have an erratum (835769) whereby
3908 it is possible for a 64-bit multiply-accumulate instruction to generate an
3909 incorrect result. The details are quite complex and hard to
3910 determine statically, since branches in the code may exist in some
3911 circumstances, but all cases end with a memory (load, store, or
3912 prefetch) instruction followed immediately by the multiply-accumulate
3913 operation. We employ a linker patching technique, by moving the potentially
3914 affected multiply-accumulate instruction into a patch region and replacing
3915 the original instruction with a branch to the patch. This function checks
3916 if INSN_1 is the memory operation followed by a multiply-accumulate
3917 operation (INSN_2). Return TRUE if an erratum sequence is found, FALSE
3918 if INSN_1 and INSN_2 are safe. */
3919
3920 static bool
3921 aarch64_erratum_sequence (uint32_t insn_1, uint32_t insn_2)
3922 {
3923 uint32_t rt;
3924 uint32_t rt2;
3925 uint32_t rn;
3926 uint32_t rm;
3927 uint32_t ra;
3928 bool pair;
3929 bool load;
3930
3931 if (aarch64_mlxl_p (insn_2)
3932 && aarch64_mem_op_p (insn_1, &rt, &rt2, &pair, &load))
3933 {
3934 /* Any SIMD memory op is independent of the subsequent MLA
3935 by definition of the erratum. */
3936 if (AARCH64_BIT (insn_1, 26))
3937 return true;
3938
3939 /* If not SIMD, check for integer memory ops and MLA relationship. */
3940 rn = AARCH64_RN (insn_2);
3941 ra = AARCH64_RA (insn_2);
3942 rm = AARCH64_RM (insn_2);
3943
3944 /* If this is a load and there's a true(RAW) dependency, we are safe
3945 and this is not an erratum sequence. */
3946 if (load &&
3947 (rt == rn || rt == rm || rt == ra
3948 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
3949 return false;
3950
3951 /* We conservatively put out stubs for all other cases (including
3952 writebacks). */
3953 return true;
3954 }
3955
3956 return false;
3957 }
3958
3959 /* Used to order a list of mapping symbols by address. */
3960
3961 static int
3962 elf_aarch64_compare_mapping (const void *a, const void *b)
3963 {
3964 const elf_aarch64_section_map *amap = (const elf_aarch64_section_map *) a;
3965 const elf_aarch64_section_map *bmap = (const elf_aarch64_section_map *) b;
3966
3967 if (amap->vma > bmap->vma)
3968 return 1;
3969 else if (amap->vma < bmap->vma)
3970 return -1;
3971 else if (amap->type > bmap->type)
3972 /* Ensure results do not depend on the host qsort for objects with
3973 multiple mapping symbols at the same address by sorting on type
3974 after vma. */
3975 return 1;
3976 else if (amap->type < bmap->type)
3977 return -1;
3978 else
3979 return 0;
3980 }
3981
3982
3983 static char *
3984 _bfd_aarch64_erratum_835769_stub_name (unsigned num_fixes)
3985 {
3986 char *stub_name = (char *) bfd_malloc
3987 (strlen ("__erratum_835769_veneer_") + 16);
3988 if (stub_name != NULL)
3989 sprintf (stub_name,"__erratum_835769_veneer_%d", num_fixes);
3990 return stub_name;
3991 }
3992
3993 /* Scan for Cortex-A53 erratum 835769 sequence.
3994
3995 Return TRUE else FALSE on abnormal termination. */
3996
3997 static bool
3998 _bfd_aarch64_erratum_835769_scan (bfd *input_bfd,
3999 struct bfd_link_info *info,
4000 unsigned int *num_fixes_p)
4001 {
4002 asection *section;
4003 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
4004 unsigned int num_fixes = *num_fixes_p;
4005
4006 if (htab == NULL)
4007 return true;
4008
4009 for (section = input_bfd->sections;
4010 section != NULL;
4011 section = section->next)
4012 {
4013 bfd_byte *contents = NULL;
4014 struct _aarch64_elf_section_data *sec_data;
4015 unsigned int span;
4016
4017 if (elf_section_type (section) != SHT_PROGBITS
4018 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
4019 || (section->flags & SEC_EXCLUDE) != 0
4020 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
4021 || (section->output_section == bfd_abs_section_ptr))
4022 continue;
4023
4024 if (elf_section_data (section)->this_hdr.contents != NULL)
4025 contents = elf_section_data (section)->this_hdr.contents;
4026 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
4027 return false;
4028
4029 sec_data = elf_aarch64_section_data (section);
4030
4031 if (sec_data->mapcount)
4032 qsort (sec_data->map, sec_data->mapcount,
4033 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
4034
4035 for (span = 0; span < sec_data->mapcount; span++)
4036 {
4037 unsigned int span_start = sec_data->map[span].vma;
4038 unsigned int span_end = ((span == sec_data->mapcount - 1)
4039 ? sec_data->map[0].vma + section->size
4040 : sec_data->map[span + 1].vma);
4041 unsigned int i;
4042 char span_type = sec_data->map[span].type;
4043
4044 if (span_type == 'd')
4045 continue;
4046
4047 for (i = span_start; i + 4 < span_end; i += 4)
4048 {
4049 uint32_t insn_1 = bfd_getl32 (contents + i);
4050 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
4051
4052 if (aarch64_erratum_sequence (insn_1, insn_2))
4053 {
4054 struct elf_aarch64_stub_hash_entry *stub_entry;
4055 char *stub_name = _bfd_aarch64_erratum_835769_stub_name (num_fixes);
4056 if (! stub_name)
4057 return false;
4058
4059 stub_entry = _bfd_aarch64_add_stub_entry_in_group (stub_name,
4060 section,
4061 htab);
4062 if (! stub_entry)
4063 return false;
4064
4065 stub_entry->stub_type = aarch64_stub_erratum_835769_veneer;
4066 stub_entry->target_section = section;
4067 stub_entry->target_value = i + 4;
4068 stub_entry->veneered_insn = insn_2;
4069 stub_entry->output_name = stub_name;
4070 num_fixes++;
4071 }
4072 }
4073 }
4074 if (elf_section_data (section)->this_hdr.contents == NULL)
4075 free (contents);
4076 }
4077
4078 *num_fixes_p = num_fixes;
4079
4080 return true;
4081 }
4082
4083
4084 /* Test if instruction INSN is ADRP. */
4085
4086 static bool
4087 _bfd_aarch64_adrp_p (uint32_t insn)
4088 {
4089 return ((insn & AARCH64_ADRP_OP_MASK) == AARCH64_ADRP_OP);
4090 }
4091
4092
4093 /* Helper predicate to look for cortex-a53 erratum 843419 sequence 1. */
4094
4095 static bool
4096 _bfd_aarch64_erratum_843419_sequence_p (uint32_t insn_1, uint32_t insn_2,
4097 uint32_t insn_3)
4098 {
4099 uint32_t rt;
4100 uint32_t rt2;
4101 bool pair;
4102 bool load;
4103
4104 return (aarch64_mem_op_p (insn_2, &rt, &rt2, &pair, &load)
4105 && (!pair
4106 || (pair && !load))
4107 && AARCH64_LDST_UIMM (insn_3)
4108 && AARCH64_RN (insn_3) == AARCH64_RD (insn_1));
4109 }
4110
4111
4112 /* Test for the presence of Cortex-A53 erratum 843419 instruction sequence.
4113
4114 Return TRUE if section CONTENTS at offset I contains one of the
4115 erratum 843419 sequences, otherwise return FALSE. If a sequence is
4116 seen set P_VENEER_I to the offset of the final LOAD/STORE
4117 instruction in the sequence.
4118 */
4119
4120 static bool
4121 _bfd_aarch64_erratum_843419_p (bfd_byte *contents, bfd_vma vma,
4122 bfd_vma i, bfd_vma span_end,
4123 bfd_vma *p_veneer_i)
4124 {
4125 uint32_t insn_1 = bfd_getl32 (contents + i);
4126
4127 if (!_bfd_aarch64_adrp_p (insn_1))
4128 return false;
4129
4130 if (span_end < i + 12)
4131 return false;
4132
4133 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
4134 uint32_t insn_3 = bfd_getl32 (contents + i + 8);
4135
4136 if ((vma & 0xfff) != 0xff8 && (vma & 0xfff) != 0xffc)
4137 return false;
4138
4139 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_3))
4140 {
4141 *p_veneer_i = i + 8;
4142 return true;
4143 }
4144
4145 if (span_end < i + 16)
4146 return false;
4147
4148 uint32_t insn_4 = bfd_getl32 (contents + i + 12);
4149
4150 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_4))
4151 {
4152 *p_veneer_i = i + 12;
4153 return true;
4154 }
4155
4156 return false;
4157 }
4158
4159
4160 /* Resize all stub sections. */
4161
4162 static void
4163 _bfd_aarch64_resize_stubs (struct elf_aarch64_link_hash_table *htab)
4164 {
4165 asection *section;
4166
4167 /* OK, we've added some stubs. Find out the new size of the
4168 stub sections. */
4169 for (section = htab->stub_bfd->sections;
4170 section != NULL; section = section->next)
4171 {
4172 /* Ignore non-stub sections. */
4173 if (!strstr (section->name, STUB_SUFFIX))
4174 continue;
4175
4176 /* Add space for a branch. Add 8 bytes to keep section 8 byte aligned,
4177 as long branch stubs contain a 64-bit address. */
4178 section->size = 8;
4179 }
4180
4181 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
4182
4183 for (section = htab->stub_bfd->sections;
4184 section != NULL; section = section->next)
4185 {
4186 if (!strstr (section->name, STUB_SUFFIX))
4187 continue;
4188
4189 /* Empty stub section. */
4190 if (section->size == 8)
4191 section->size = 0;
4192
4193 /* Ensure all stub sections have a size which is a multiple of
4194 4096. This is important in order to ensure that the insertion
4195 of stub sections does not in itself move existing code around
4196 in such a way that new errata sequences are created. We only do this
4197 when the ADRP workaround is enabled. If only the ADR workaround is
4198 enabled then the stubs workaround won't ever be used. */
4199 if (htab->fix_erratum_843419 & ERRAT_ADRP)
4200 if (section->size)
4201 section->size = BFD_ALIGN (section->size, 0x1000);
4202 }
4203 }
4204
4205 /* Construct an erratum 843419 workaround stub name. */
4206
4207 static char *
4208 _bfd_aarch64_erratum_843419_stub_name (asection *input_section,
4209 bfd_vma offset)
4210 {
4211 const bfd_size_type len = 8 + 4 + 1 + 8 + 1 + 16 + 1;
4212 char *stub_name = bfd_malloc (len);
4213
4214 if (stub_name != NULL)
4215 snprintf (stub_name, len, "e843419@%04x_%08x_%" PRIx64,
4216 input_section->owner->id,
4217 input_section->id,
4218 (uint64_t) offset);
4219 return stub_name;
4220 }
4221
4222 /* Build a stub_entry structure describing an 843419 fixup.
4223
4224 The stub_entry constructed is populated with the bit pattern INSN
4225 of the instruction located at OFFSET within input SECTION.
4226
4227 Returns TRUE on success. */
4228
4229 static bool
4230 _bfd_aarch64_erratum_843419_fixup (uint32_t insn,
4231 bfd_vma adrp_offset,
4232 bfd_vma ldst_offset,
4233 asection *section,
4234 struct bfd_link_info *info)
4235 {
4236 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
4237 char *stub_name;
4238 struct elf_aarch64_stub_hash_entry *stub_entry;
4239
4240 stub_name = _bfd_aarch64_erratum_843419_stub_name (section, ldst_offset);
4241 if (stub_name == NULL)
4242 return false;
4243 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4244 false, false);
4245 if (stub_entry)
4246 {
4247 free (stub_name);
4248 return true;
4249 }
4250
4251 /* We always place an 843419 workaround veneer in the stub section
4252 attached to the input section in which an erratum sequence has
4253 been found. This ensures that later in the link process (in
4254 elfNN_aarch64_write_section) when we copy the veneered
4255 instruction from the input section into the stub section the
4256 copied instruction will have had any relocations applied to it.
4257 If we placed workaround veneers in any other stub section then we
4258 could not assume that all relocations have been processed on the
4259 corresponding input section at the point we output the stub
4260 section. */
4261
4262 stub_entry = _bfd_aarch64_add_stub_entry_after (stub_name, section, htab);
4263 if (stub_entry == NULL)
4264 {
4265 free (stub_name);
4266 return false;
4267 }
4268
4269 stub_entry->adrp_offset = adrp_offset;
4270 stub_entry->target_value = ldst_offset;
4271 stub_entry->target_section = section;
4272 stub_entry->stub_type = aarch64_stub_erratum_843419_veneer;
4273 stub_entry->veneered_insn = insn;
4274 stub_entry->output_name = stub_name;
4275
4276 return true;
4277 }
4278
4279
4280 /* Scan an input section looking for the signature of erratum 843419.
4281
4282 Scans input SECTION in INPUT_BFD looking for erratum 843419
4283 signatures, for each signature found a stub_entry is created
4284 describing the location of the erratum for subsequent fixup.
4285
4286 Return TRUE on successful scan, FALSE on failure to scan.
4287 */
4288
4289 static bool
4290 _bfd_aarch64_erratum_843419_scan (bfd *input_bfd, asection *section,
4291 struct bfd_link_info *info)
4292 {
4293 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
4294
4295 if (htab == NULL)
4296 return true;
4297
4298 if (elf_section_type (section) != SHT_PROGBITS
4299 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
4300 || (section->flags & SEC_EXCLUDE) != 0
4301 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
4302 || (section->output_section == bfd_abs_section_ptr))
4303 return true;
4304
4305 do
4306 {
4307 bfd_byte *contents = NULL;
4308 struct _aarch64_elf_section_data *sec_data;
4309 unsigned int span;
4310
4311 if (elf_section_data (section)->this_hdr.contents != NULL)
4312 contents = elf_section_data (section)->this_hdr.contents;
4313 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
4314 return false;
4315
4316 sec_data = elf_aarch64_section_data (section);
4317
4318 if (sec_data->mapcount)
4319 qsort (sec_data->map, sec_data->mapcount,
4320 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
4321
4322 for (span = 0; span < sec_data->mapcount; span++)
4323 {
4324 unsigned int span_start = sec_data->map[span].vma;
4325 unsigned int span_end = ((span == sec_data->mapcount - 1)
4326 ? sec_data->map[0].vma + section->size
4327 : sec_data->map[span + 1].vma);
4328 unsigned int i;
4329 char span_type = sec_data->map[span].type;
4330
4331 if (span_type == 'd')
4332 continue;
4333
4334 for (i = span_start; i + 8 < span_end; i += 4)
4335 {
4336 bfd_vma vma = (section->output_section->vma
4337 + section->output_offset
4338 + i);
4339 bfd_vma veneer_i;
4340
4341 if (_bfd_aarch64_erratum_843419_p
4342 (contents, vma, i, span_end, &veneer_i))
4343 {
4344 uint32_t insn = bfd_getl32 (contents + veneer_i);
4345
4346 if (!_bfd_aarch64_erratum_843419_fixup (insn, i, veneer_i,
4347 section, info))
4348 return false;
4349 }
4350 }
4351 }
4352
4353 if (elf_section_data (section)->this_hdr.contents == NULL)
4354 free (contents);
4355 }
4356 while (0);
4357
4358 return true;
4359 }
4360
4361
4362 /* Add stub entries for calls.
4363
4364 The basic idea here is to examine all the relocations looking for
4365 PC-relative calls to a target that is unreachable with a "bl"
4366 instruction. */
4367
4368 static bool
4369 _bfd_aarch64_add_call_stub_entries (bool *stub_changed, bfd *output_bfd,
4370 struct bfd_link_info *info)
4371 {
4372 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
4373 bool need_bti = elf_aarch64_bti_p (output_bfd);
4374 bfd *input_bfd;
4375
4376 for (input_bfd = info->input_bfds; input_bfd != NULL;
4377 input_bfd = input_bfd->link.next)
4378 {
4379 Elf_Internal_Shdr *symtab_hdr;
4380 asection *section;
4381 Elf_Internal_Sym *local_syms = NULL;
4382
4383 if (!is_aarch64_elf (input_bfd)
4384 || (input_bfd->flags & BFD_LINKER_CREATED) != 0)
4385 continue;
4386
4387 /* We'll need the symbol table in a second. */
4388 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4389 if (symtab_hdr->sh_info == 0)
4390 continue;
4391
4392 /* Walk over each section attached to the input bfd. */
4393 for (section = input_bfd->sections;
4394 section != NULL; section = section->next)
4395 {
4396 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
4397
4398 /* If there aren't any relocs, then there's nothing more to do. */
4399 if ((section->flags & SEC_RELOC) == 0
4400 || section->reloc_count == 0
4401 || (section->flags & SEC_CODE) == 0)
4402 continue;
4403
4404 /* If this section is a link-once section that will be
4405 discarded, then don't create any stubs. */
4406 if (section->output_section == NULL
4407 || section->output_section->owner != output_bfd)
4408 continue;
4409
4410 /* Get the relocs. */
4411 internal_relocs
4412 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
4413 NULL, info->keep_memory);
4414 if (internal_relocs == NULL)
4415 goto error_ret_free_local;
4416
4417 /* Now examine each relocation. */
4418 irela = internal_relocs;
4419 irelaend = irela + section->reloc_count;
4420 for (; irela < irelaend; irela++)
4421 {
4422 unsigned int r_type, r_indx;
4423 enum elf_aarch64_stub_type stub_type;
4424 struct elf_aarch64_stub_hash_entry *stub_entry;
4425 struct elf_aarch64_stub_hash_entry *stub_entry_bti;
4426 asection *sym_sec;
4427 bfd_vma sym_value;
4428 bfd_vma destination;
4429 struct elf_aarch64_link_hash_entry *hash;
4430 const char *sym_name;
4431 char *stub_name;
4432 char *stub_name_bti;
4433 const asection *id_sec;
4434 const asection *id_sec_bti;
4435 unsigned char st_type;
4436 bfd_size_type len;
4437
4438 r_type = ELFNN_R_TYPE (irela->r_info);
4439 r_indx = ELFNN_R_SYM (irela->r_info);
4440
4441 if (r_type >= (unsigned int) R_AARCH64_end)
4442 {
4443 bfd_set_error (bfd_error_bad_value);
4444 error_ret_free_internal:
4445 if (elf_section_data (section)->relocs == NULL)
4446 free (internal_relocs);
4447 goto error_ret_free_local;
4448 }
4449
4450 /* Only look for stubs on unconditional branch and
4451 branch and link instructions. */
4452 if (r_type != (unsigned int) AARCH64_R (CALL26)
4453 && r_type != (unsigned int) AARCH64_R (JUMP26))
4454 continue;
4455
4456 /* Now determine the call target, its name, value,
4457 section. */
4458 sym_sec = NULL;
4459 sym_value = 0;
4460 destination = 0;
4461 hash = NULL;
4462 sym_name = NULL;
4463 if (r_indx < symtab_hdr->sh_info)
4464 {
4465 /* It's a local symbol. */
4466 Elf_Internal_Sym *sym;
4467 Elf_Internal_Shdr *hdr;
4468
4469 if (local_syms == NULL)
4470 {
4471 local_syms
4472 = (Elf_Internal_Sym *) symtab_hdr->contents;
4473 if (local_syms == NULL)
4474 local_syms
4475 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
4476 symtab_hdr->sh_info, 0,
4477 NULL, NULL, NULL);
4478 if (local_syms == NULL)
4479 goto error_ret_free_internal;
4480 }
4481
4482 sym = local_syms + r_indx;
4483 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
4484 sym_sec = hdr->bfd_section;
4485 if (!sym_sec)
4486 /* This is an undefined symbol. It can never
4487 be resolved. */
4488 continue;
4489
4490 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
4491 sym_value = sym->st_value;
4492 destination = (sym_value + irela->r_addend
4493 + sym_sec->output_offset
4494 + sym_sec->output_section->vma);
4495 st_type = ELF_ST_TYPE (sym->st_info);
4496 sym_name
4497 = bfd_elf_string_from_elf_section (input_bfd,
4498 symtab_hdr->sh_link,
4499 sym->st_name);
4500 }
4501 else
4502 {
4503 int e_indx;
4504
4505 e_indx = r_indx - symtab_hdr->sh_info;
4506 hash = ((struct elf_aarch64_link_hash_entry *)
4507 elf_sym_hashes (input_bfd)[e_indx]);
4508
4509 while (hash->root.root.type == bfd_link_hash_indirect
4510 || hash->root.root.type == bfd_link_hash_warning)
4511 hash = ((struct elf_aarch64_link_hash_entry *)
4512 hash->root.root.u.i.link);
4513
4514 if (hash->root.root.type == bfd_link_hash_defined
4515 || hash->root.root.type == bfd_link_hash_defweak)
4516 {
4517 struct elf_aarch64_link_hash_table *globals =
4518 elf_aarch64_hash_table (info);
4519 sym_sec = hash->root.root.u.def.section;
4520 sym_value = hash->root.root.u.def.value;
4521 /* For a destination in a shared library,
4522 use the PLT stub as target address to
4523 decide whether a branch stub is
4524 needed. */
4525 if (globals->root.splt != NULL && hash != NULL
4526 && hash->root.plt.offset != (bfd_vma) - 1)
4527 {
4528 sym_sec = globals->root.splt;
4529 sym_value = hash->root.plt.offset;
4530 if (sym_sec->output_section != NULL)
4531 destination = (sym_value
4532 + sym_sec->output_offset
4533 + sym_sec->output_section->vma);
4534 }
4535 else if (sym_sec->output_section != NULL)
4536 destination = (sym_value + irela->r_addend
4537 + sym_sec->output_offset
4538 + sym_sec->output_section->vma);
4539 }
4540 else if (hash->root.root.type == bfd_link_hash_undefined
4541 || (hash->root.root.type
4542 == bfd_link_hash_undefweak))
4543 {
4544 /* For a shared library, use the PLT stub as
4545 target address to decide whether a long
4546 branch stub is needed.
4547 For absolute code, they cannot be handled. */
4548 struct elf_aarch64_link_hash_table *globals =
4549 elf_aarch64_hash_table (info);
4550
4551 if (globals->root.splt != NULL && hash != NULL
4552 && hash->root.plt.offset != (bfd_vma) - 1)
4553 {
4554 sym_sec = globals->root.splt;
4555 sym_value = hash->root.plt.offset;
4556 if (sym_sec->output_section != NULL)
4557 destination = (sym_value
4558 + sym_sec->output_offset
4559 + sym_sec->output_section->vma);
4560 }
4561 else
4562 continue;
4563 }
4564 else
4565 {
4566 bfd_set_error (bfd_error_bad_value);
4567 goto error_ret_free_internal;
4568 }
4569 st_type = ELF_ST_TYPE (hash->root.type);
4570 sym_name = hash->root.root.root.string;
4571 }
4572
4573 /* Determine what (if any) linker stub is needed. */
4574 stub_type = aarch64_type_of_stub (section, irela, sym_sec,
4575 st_type, destination);
4576 if (stub_type == aarch64_stub_none)
4577 continue;
4578
4579 /* Support for grouping stub sections. */
4580 id_sec = htab->stub_group[section->id].link_sec;
4581
4582 /* Get the name of this stub. */
4583 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, hash,
4584 irela);
4585 if (!stub_name)
4586 goto error_ret_free_internal;
4587
4588 stub_entry =
4589 aarch64_stub_hash_lookup (&htab->stub_hash_table,
4590 stub_name, false, false);
4591 if (stub_entry != NULL)
4592 {
4593 /* The proper stub has already been created. */
4594 free (stub_name);
4595
4596 /* Always update this stub's target since it may have
4597 changed after layout. */
4598 stub_entry->target_value = sym_value + irela->r_addend;
4599
4600 if (stub_entry->double_stub)
4601 {
4602 /* Update the target of both stubs. */
4603
4604 id_sec_bti = htab->stub_group[sym_sec->id].link_sec;
4605 stub_name_bti =
4606 elfNN_aarch64_stub_name (id_sec_bti, sym_sec, hash,
4607 irela);
4608 if (!stub_name_bti)
4609 goto error_ret_free_internal;
4610 stub_entry_bti =
4611 aarch64_stub_hash_lookup (&htab->stub_hash_table,
4612 stub_name_bti, false, false);
4613 BFD_ASSERT (stub_entry_bti != NULL);
4614 free (stub_name_bti);
4615 stub_entry_bti->target_value = stub_entry->target_value;
4616 stub_entry->target_value = stub_entry_bti->stub_offset;
4617 }
4618 continue;
4619 }
4620
4621 stub_entry = _bfd_aarch64_add_stub_entry_in_group
4622 (stub_name, section, htab);
4623 if (stub_entry == NULL)
4624 {
4625 free (stub_name);
4626 goto error_ret_free_internal;
4627 }
4628
4629 stub_entry->target_value = sym_value + irela->r_addend;
4630 stub_entry->target_section = sym_sec;
4631 stub_entry->stub_type = stub_type;
4632 stub_entry->h = hash;
4633 stub_entry->st_type = st_type;
4634
4635 if (sym_name == NULL)
4636 sym_name = "unnamed";
4637 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
4638 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
4639 if (stub_entry->output_name == NULL)
4640 {
4641 free (stub_name);
4642 goto error_ret_free_internal;
4643 }
4644
4645 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
4646 sym_name);
4647
4648 /* A stub with indirect jump may break BTI compatibility, so
4649 insert another stub with direct jump near the target then. */
4650 if (need_bti && !aarch64_bti_stub_p (info, stub_entry))
4651 {
4652 id_sec_bti = htab->stub_group[sym_sec->id].link_sec;
4653
4654 /* If the stub with indirect jump and the BTI stub are in
4655 the same stub group: change the indirect jump stub into
4656 a BTI stub since a direct branch can reach the target.
4657 The BTI landing pad is still needed in case another
4658 stub indirectly jumps to it. */
4659 if (id_sec_bti == id_sec)
4660 {
4661 stub_entry->stub_type = aarch64_stub_bti_direct_branch;
4662 goto skip_double_stub;
4663 }
4664
4665 stub_entry->double_stub = true;
4666 htab->has_double_stub = true;
4667
4668 stub_name_bti =
4669 elfNN_aarch64_stub_name (id_sec_bti, sym_sec, hash, irela);
4670 if (!stub_name_bti)
4671 {
4672 free (stub_name);
4673 goto error_ret_free_internal;
4674 }
4675
4676 stub_entry_bti =
4677 aarch64_stub_hash_lookup (&htab->stub_hash_table,
4678 stub_name_bti, false, false);
4679 if (stub_entry_bti != NULL)
4680 BFD_ASSERT (stub_entry_bti->stub_type
4681 == aarch64_stub_bti_direct_branch);
4682 else
4683 {
4684 stub_entry_bti =
4685 _bfd_aarch64_add_stub_entry_in_group (stub_name_bti,
4686 sym_sec, htab);
4687 if (stub_entry_bti == NULL)
4688 {
4689 free (stub_name);
4690 free (stub_name_bti);
4691 goto error_ret_free_internal;
4692 }
4693
4694 stub_entry_bti->target_value =
4695 sym_value + irela->r_addend;
4696 stub_entry_bti->target_section = sym_sec;
4697 stub_entry_bti->stub_type =
4698 aarch64_stub_bti_direct_branch;
4699 stub_entry_bti->h = hash;
4700 stub_entry_bti->st_type = st_type;
4701
4702 len = sizeof (BTI_STUB_ENTRY_NAME) + strlen (sym_name);
4703 stub_entry_bti->output_name = bfd_alloc (htab->stub_bfd,
4704 len);
4705 if (stub_entry_bti->output_name == NULL)
4706 {
4707 free (stub_name);
4708 free (stub_name_bti);
4709 goto error_ret_free_internal;
4710 }
4711 snprintf (stub_entry_bti->output_name, len,
4712 BTI_STUB_ENTRY_NAME, sym_name);
4713 }
4714
4715 /* Update the indirect call stub to target the BTI stub. */
4716 stub_entry->target_value = 0;
4717 stub_entry->target_section = stub_entry_bti->stub_sec;
4718 stub_entry->stub_type = stub_type;
4719 stub_entry->h = NULL;
4720 stub_entry->st_type = STT_FUNC;
4721 }
4722 skip_double_stub:
4723 *stub_changed = true;
4724 }
4725
4726 /* We're done with the internal relocs, free them. */
4727 if (elf_section_data (section)->relocs == NULL)
4728 free (internal_relocs);
4729 }
4730 }
4731 return true;
4732 error_ret_free_local:
4733 return false;
4734 }
4735
4736
4737 /* Determine and set the size of the stub section for a final link. */
4738
4739 bool
4740 elfNN_aarch64_size_stubs (bfd *output_bfd,
4741 bfd *stub_bfd,
4742 struct bfd_link_info *info,
4743 bfd_signed_vma group_size,
4744 asection * (*add_stub_section) (const char *,
4745 asection *),
4746 void (*layout_sections_again) (void))
4747 {
4748 bfd_size_type stub_group_size;
4749 bool stubs_always_before_branch;
4750 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
4751 unsigned int num_erratum_835769_fixes = 0;
4752
4753 /* Propagate mach to stub bfd, because it may not have been
4754 finalized when we created stub_bfd. */
4755 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4756 bfd_get_mach (output_bfd));
4757
4758 /* Stash our params away. */
4759 htab->stub_bfd = stub_bfd;
4760 htab->add_stub_section = add_stub_section;
4761 htab->layout_sections_again = layout_sections_again;
4762 stubs_always_before_branch = group_size < 0;
4763 if (group_size < 0)
4764 stub_group_size = -group_size;
4765 else
4766 stub_group_size = group_size;
4767
4768 if (stub_group_size == 1)
4769 {
4770 /* Default values. */
4771 /* AArch64 branch range is +-128MB. The value used is 1MB less. */
4772 stub_group_size = 127 * 1024 * 1024;
4773 }
4774
4775 group_sections (htab, stub_group_size, stubs_always_before_branch);
4776
4777 (*htab->layout_sections_again) ();
4778
4779 if (htab->fix_erratum_835769)
4780 {
4781 bfd *input_bfd;
4782
4783 for (input_bfd = info->input_bfds;
4784 input_bfd != NULL; input_bfd = input_bfd->link.next)
4785 {
4786 if (!is_aarch64_elf (input_bfd)
4787 || (input_bfd->flags & BFD_LINKER_CREATED) != 0)
4788 continue;
4789
4790 if (!_bfd_aarch64_erratum_835769_scan (input_bfd, info,
4791 &num_erratum_835769_fixes))
4792 return false;
4793 }
4794
4795 _bfd_aarch64_resize_stubs (htab);
4796 (*htab->layout_sections_again) ();
4797 }
4798
4799 if (htab->fix_erratum_843419 != ERRAT_NONE)
4800 {
4801 bfd *input_bfd;
4802
4803 for (input_bfd = info->input_bfds;
4804 input_bfd != NULL;
4805 input_bfd = input_bfd->link.next)
4806 {
4807 asection *section;
4808
4809 if (!is_aarch64_elf (input_bfd)
4810 || (input_bfd->flags & BFD_LINKER_CREATED) != 0)
4811 continue;
4812
4813 for (section = input_bfd->sections;
4814 section != NULL;
4815 section = section->next)
4816 if (!_bfd_aarch64_erratum_843419_scan (input_bfd, section, info))
4817 return false;
4818 }
4819
4820 _bfd_aarch64_resize_stubs (htab);
4821 (*htab->layout_sections_again) ();
4822 }
4823
4824 for (;;)
4825 {
4826 bool stub_changed = false;
4827
4828 if (!_bfd_aarch64_add_call_stub_entries (&stub_changed, output_bfd, info))
4829 return false;
4830
4831 if (!stub_changed)
4832 return true;
4833
4834 _bfd_aarch64_resize_stubs (htab);
4835 (*htab->layout_sections_again) ();
4836 }
4837 }
4838
4839 /* Build all the stubs associated with the current output file. The
4840 stubs are kept in a hash table attached to the main linker hash
4841 table. We also set up the .plt entries for statically linked PIC
4842 functions here. This function is called via aarch64_elf_finish in the
4843 linker. */
4844
4845 bool
4846 elfNN_aarch64_build_stubs (struct bfd_link_info *info)
4847 {
4848 asection *stub_sec;
4849 struct bfd_hash_table *table;
4850 struct elf_aarch64_link_hash_table *htab;
4851
4852 htab = elf_aarch64_hash_table (info);
4853
4854 for (stub_sec = htab->stub_bfd->sections;
4855 stub_sec != NULL; stub_sec = stub_sec->next)
4856 {
4857 bfd_size_type size;
4858
4859 /* Ignore non-stub sections. */
4860 if (!strstr (stub_sec->name, STUB_SUFFIX))
4861 continue;
4862
4863 /* Allocate memory to hold the linker stubs. */
4864 size = stub_sec->size;
4865 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
4866 if (stub_sec->contents == NULL && size != 0)
4867 return false;
4868 stub_sec->size = 0;
4869
4870 /* Add a branch around the stub section, and a nop, to keep it 8 byte
4871 aligned, as long branch stubs contain a 64-bit address. */
4872 bfd_putl32 (0x14000000 | (size >> 2), stub_sec->contents);
4873 bfd_putl32 (INSN_NOP, stub_sec->contents + 4);
4874 stub_sec->size += 8;
4875 }
4876
4877 /* Build the stubs as directed by the stub hash table. */
4878 table = &htab->stub_hash_table;
4879 bfd_hash_traverse (table, aarch64_build_one_stub, info);
4880
4881 return true;
4882 }
4883
4884
4885 /* Add an entry to the code/data map for section SEC. */
4886
4887 static void
4888 elfNN_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
4889 {
4890 struct _aarch64_elf_section_data *sec_data =
4891 elf_aarch64_section_data (sec);
4892 unsigned int newidx;
4893
4894 if (sec_data->map == NULL)
4895 {
4896 sec_data->map = bfd_malloc (sizeof (elf_aarch64_section_map));
4897 sec_data->mapcount = 0;
4898 sec_data->mapsize = 1;
4899 }
4900
4901 newidx = sec_data->mapcount++;
4902
4903 if (sec_data->mapcount > sec_data->mapsize)
4904 {
4905 sec_data->mapsize *= 2;
4906 sec_data->map = bfd_realloc_or_free
4907 (sec_data->map, sec_data->mapsize * sizeof (elf_aarch64_section_map));
4908 }
4909
4910 if (sec_data->map)
4911 {
4912 sec_data->map[newidx].vma = vma;
4913 sec_data->map[newidx].type = type;
4914 }
4915 }
4916
4917
4918 /* Initialise maps of insn/data for input BFDs. */
4919 void
4920 bfd_elfNN_aarch64_init_maps (bfd *abfd)
4921 {
4922 Elf_Internal_Sym *isymbuf;
4923 Elf_Internal_Shdr *hdr;
4924 unsigned int i, localsyms;
4925
4926 /* Make sure that we are dealing with an AArch64 elf binary. */
4927 if (!is_aarch64_elf (abfd))
4928 return;
4929
4930 if ((abfd->flags & DYNAMIC) != 0)
4931 return;
4932
4933 hdr = &elf_symtab_hdr (abfd);
4934 localsyms = hdr->sh_info;
4935
4936 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
4937 should contain the number of local symbols, which should come before any
4938 global symbols. Mapping symbols are always local. */
4939 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
4940
4941 /* No internal symbols read? Skip this BFD. */
4942 if (isymbuf == NULL)
4943 return;
4944
4945 for (i = 0; i < localsyms; i++)
4946 {
4947 Elf_Internal_Sym *isym = &isymbuf[i];
4948 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
4949 const char *name;
4950
4951 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
4952 {
4953 name = bfd_elf_string_from_elf_section (abfd,
4954 hdr->sh_link,
4955 isym->st_name);
4956
4957 if (bfd_is_aarch64_special_symbol_name
4958 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
4959 elfNN_aarch64_section_map_add (sec, name[1], isym->st_value);
4960 }
4961 }
4962 }
4963
4964 static void
4965 setup_plt_values (struct bfd_link_info *link_info,
4966 aarch64_plt_type plt_type)
4967 {
4968 struct elf_aarch64_link_hash_table *globals;
4969 globals = elf_aarch64_hash_table (link_info);
4970
4971 if (plt_type == PLT_BTI_PAC)
4972 {
4973 globals->plt0_entry = elfNN_aarch64_small_plt0_bti_entry;
4974
4975 /* Only in ET_EXEC we need PLTn with BTI. */
4976 if (bfd_link_pde (link_info))
4977 {
4978 globals->plt_entry_size = PLT_BTI_PAC_SMALL_ENTRY_SIZE;
4979 globals->plt_entry = elfNN_aarch64_small_plt_bti_pac_entry;
4980 }
4981 else
4982 {
4983 globals->plt_entry_size = PLT_PAC_SMALL_ENTRY_SIZE;
4984 globals->plt_entry = elfNN_aarch64_small_plt_pac_entry;
4985 }
4986 }
4987 else if (plt_type == PLT_BTI)
4988 {
4989 globals->plt0_entry = elfNN_aarch64_small_plt0_bti_entry;
4990
4991 /* Only in ET_EXEC we need PLTn with BTI. */
4992 if (bfd_link_pde (link_info))
4993 {
4994 globals->plt_entry_size = PLT_BTI_SMALL_ENTRY_SIZE;
4995 globals->plt_entry = elfNN_aarch64_small_plt_bti_entry;
4996 }
4997 }
4998 else if (plt_type == PLT_PAC)
4999 {
5000 globals->plt_entry_size = PLT_PAC_SMALL_ENTRY_SIZE;
5001 globals->plt_entry = elfNN_aarch64_small_plt_pac_entry;
5002 }
5003 }
5004
5005 /* Set option values needed during linking. */
5006 void
5007 bfd_elfNN_aarch64_set_options (struct bfd *output_bfd,
5008 struct bfd_link_info *link_info,
5009 int no_enum_warn,
5010 int no_wchar_warn, int pic_veneer,
5011 int fix_erratum_835769,
5012 erratum_84319_opts fix_erratum_843419,
5013 int no_apply_dynamic_relocs,
5014 aarch64_bti_pac_info bp_info)
5015 {
5016 struct elf_aarch64_link_hash_table *globals;
5017
5018 globals = elf_aarch64_hash_table (link_info);
5019 globals->pic_veneer = pic_veneer;
5020 globals->fix_erratum_835769 = fix_erratum_835769;
5021 /* If the default options are used, then ERRAT_ADR will be set by default
5022 which will enable the ADRP->ADR workaround for the erratum 843419
5023 workaround. */
5024 globals->fix_erratum_843419 = fix_erratum_843419;
5025 globals->no_apply_dynamic_relocs = no_apply_dynamic_relocs;
5026
5027 BFD_ASSERT (is_aarch64_elf (output_bfd));
5028 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
5029 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
5030
5031 switch (bp_info.bti_type)
5032 {
5033 case BTI_WARN:
5034 elf_aarch64_tdata (output_bfd)->no_bti_warn = 0;
5035 elf_aarch64_tdata (output_bfd)->gnu_and_prop
5036 |= GNU_PROPERTY_AARCH64_FEATURE_1_BTI;
5037 break;
5038
5039 default:
5040 break;
5041 }
5042 elf_aarch64_tdata (output_bfd)->plt_type = bp_info.plt_type;
5043 setup_plt_values (link_info, bp_info.plt_type);
5044 }
5045
5046 static bfd_vma
5047 aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
5048 struct elf_aarch64_link_hash_table
5049 *globals, struct bfd_link_info *info,
5050 bfd_vma value, bfd *output_bfd,
5051 bool *unresolved_reloc_p)
5052 {
5053 bfd_vma off = (bfd_vma) - 1;
5054 asection *basegot = globals->root.sgot;
5055 bool dyn = globals->root.dynamic_sections_created;
5056
5057 if (h != NULL)
5058 {
5059 BFD_ASSERT (basegot != NULL);
5060 off = h->got.offset;
5061 BFD_ASSERT (off != (bfd_vma) - 1);
5062 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h)
5063 || (bfd_link_pic (info)
5064 && SYMBOL_REFERENCES_LOCAL (info, h))
5065 || (ELF_ST_VISIBILITY (h->other)
5066 && h->root.type == bfd_link_hash_undefweak))
5067 {
5068 /* This is actually a static link, or it is a -Bsymbolic link
5069 and the symbol is defined locally. We must initialize this
5070 entry in the global offset table. Since the offset must
5071 always be a multiple of 8 (4 in the case of ILP32), we use
5072 the least significant bit to record whether we have
5073 initialized it already.
5074 When doing a dynamic link, we create a .rel(a).got relocation
5075 entry to initialize the value. This is done in the
5076 finish_dynamic_symbol routine. */
5077 if ((off & 1) != 0)
5078 off &= ~1;
5079 else
5080 {
5081 bfd_put_NN (output_bfd, value, basegot->contents + off);
5082 h->got.offset |= 1;
5083 }
5084 }
5085 else
5086 *unresolved_reloc_p = false;
5087
5088 off = off + basegot->output_section->vma + basegot->output_offset;
5089 }
5090
5091 return off;
5092 }
5093
5094 /* Change R_TYPE to a more efficient access model where possible,
5095 return the new reloc type. */
5096
5097 static bfd_reloc_code_real_type
5098 aarch64_tls_transition_without_check (bfd_reloc_code_real_type r_type,
5099 struct elf_link_hash_entry *h,
5100 struct bfd_link_info *info)
5101 {
5102 bool local_exec = bfd_link_executable (info)
5103 && SYMBOL_REFERENCES_LOCAL (info, h);
5104
5105 switch (r_type)
5106 {
5107 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5108 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5109 return (local_exec
5110 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
5111 : BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21);
5112
5113 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5114 return (local_exec
5115 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
5116 : r_type);
5117
5118 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5119 return (local_exec
5120 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
5121 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
5122
5123 case BFD_RELOC_AARCH64_TLSDESC_LDR:
5124 return (local_exec
5125 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
5126 : BFD_RELOC_AARCH64_NONE);
5127
5128 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5129 return (local_exec
5130 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC
5131 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC);
5132
5133 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5134 return (local_exec
5135 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2
5136 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1);
5137
5138 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
5139 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5140 return (local_exec
5141 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
5142 : BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC);
5143
5144 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5145 return local_exec ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
5146
5147 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
5148 return local_exec ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
5149
5150 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5151 return r_type;
5152
5153 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5154 return (local_exec
5155 ? BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12
5156 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
5157
5158 case BFD_RELOC_AARCH64_TLSDESC_ADD:
5159 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
5160 case BFD_RELOC_AARCH64_TLSDESC_CALL:
5161 /* Instructions with these relocations will become NOPs. */
5162 return BFD_RELOC_AARCH64_NONE;
5163
5164 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
5165 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
5166 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
5167 return local_exec ? BFD_RELOC_AARCH64_NONE : r_type;
5168
5169 #if ARCH_SIZE == 64
5170 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5171 return local_exec
5172 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC
5173 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC;
5174
5175 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5176 return local_exec
5177 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2
5178 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1;
5179 #endif
5180
5181 default:
5182 break;
5183 }
5184
5185 return r_type;
5186 }
5187
5188 static unsigned int
5189 aarch64_reloc_got_type (bfd_reloc_code_real_type r_type)
5190 {
5191 switch (r_type)
5192 {
5193 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5194 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5195 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
5196 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5197 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
5198 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
5199 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5200 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5201 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5202 return GOT_NORMAL;
5203
5204 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5205 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5206 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5207 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5208 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5209 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
5210 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
5211 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
5212 return GOT_TLS_GD;
5213
5214 case BFD_RELOC_AARCH64_TLSDESC_ADD:
5215 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
5216 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5217 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5218 case BFD_RELOC_AARCH64_TLSDESC_CALL:
5219 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
5220 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
5221 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5222 case BFD_RELOC_AARCH64_TLSDESC_LDR:
5223 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5224 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5225 return GOT_TLSDESC_GD;
5226
5227 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5228 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
5229 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5230 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5231 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5232 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5233 return GOT_TLS_IE;
5234
5235 default:
5236 break;
5237 }
5238 return GOT_UNKNOWN;
5239 }
5240
5241 static bool
5242 aarch64_can_relax_tls (bfd *input_bfd,
5243 struct bfd_link_info *info,
5244 bfd_reloc_code_real_type r_type,
5245 struct elf_link_hash_entry *h,
5246 unsigned long r_symndx)
5247 {
5248 unsigned int symbol_got_type;
5249 unsigned int reloc_got_type;
5250
5251 if (! IS_AARCH64_TLS_RELAX_RELOC (r_type))
5252 return false;
5253
5254 symbol_got_type = elfNN_aarch64_symbol_got_type (h, input_bfd, r_symndx);
5255 reloc_got_type = aarch64_reloc_got_type (r_type);
5256
5257 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
5258 return true;
5259
5260 if (!bfd_link_executable (info))
5261 return false;
5262
5263 if (h && h->root.type == bfd_link_hash_undefweak)
5264 return false;
5265
5266 return true;
5267 }
5268
5269 /* Given the relocation code R_TYPE, return the relaxed bfd reloc
5270 enumerator. */
5271
5272 static bfd_reloc_code_real_type
5273 aarch64_tls_transition (bfd *input_bfd,
5274 struct bfd_link_info *info,
5275 unsigned int r_type,
5276 struct elf_link_hash_entry *h,
5277 unsigned long r_symndx)
5278 {
5279 bfd_reloc_code_real_type bfd_r_type
5280 = elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type);
5281
5282 if (! aarch64_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx))
5283 return bfd_r_type;
5284
5285 return aarch64_tls_transition_without_check (bfd_r_type, h, info);
5286 }
5287
5288 /* Return the base VMA address which should be subtracted from real addresses
5289 when resolving R_AARCH64_TLS_DTPREL relocation. */
5290
5291 static bfd_vma
5292 dtpoff_base (struct bfd_link_info *info)
5293 {
5294 /* If tls_sec is NULL, we should have signalled an error already. */
5295 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
5296 return elf_hash_table (info)->tls_sec->vma;
5297 }
5298
5299 /* Return the base VMA address which should be subtracted from real addresses
5300 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
5301
5302 static bfd_vma
5303 tpoff_base (struct bfd_link_info *info)
5304 {
5305 struct elf_link_hash_table *htab = elf_hash_table (info);
5306
5307 /* If tls_sec is NULL, we should have signalled an error already. */
5308 BFD_ASSERT (htab->tls_sec != NULL);
5309
5310 bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
5311 htab->tls_sec->alignment_power);
5312 return htab->tls_sec->vma - base;
5313 }
5314
5315 static bfd_vma *
5316 symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
5317 unsigned long r_symndx)
5318 {
5319 /* Calculate the address of the GOT entry for symbol
5320 referred to in h. */
5321 if (h != NULL)
5322 return &h->got.offset;
5323 else
5324 {
5325 /* local symbol */
5326 struct elf_aarch64_local_symbol *l;
5327
5328 l = elf_aarch64_locals (input_bfd);
5329 return &l[r_symndx].got_offset;
5330 }
5331 }
5332
5333 static void
5334 symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
5335 unsigned long r_symndx)
5336 {
5337 bfd_vma *p;
5338 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
5339 *p |= 1;
5340 }
5341
5342 static int
5343 symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
5344 unsigned long r_symndx)
5345 {
5346 bfd_vma value;
5347 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
5348 return value & 1;
5349 }
5350
5351 static bfd_vma
5352 symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
5353 unsigned long r_symndx)
5354 {
5355 bfd_vma value;
5356 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
5357 value &= ~1;
5358 return value;
5359 }
5360
5361 static bfd_vma *
5362 symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
5363 unsigned long r_symndx)
5364 {
5365 /* Calculate the address of the GOT entry for symbol
5366 referred to in h. */
5367 if (h != NULL)
5368 {
5369 struct elf_aarch64_link_hash_entry *eh;
5370 eh = (struct elf_aarch64_link_hash_entry *) h;
5371 return &eh->tlsdesc_got_jump_table_offset;
5372 }
5373 else
5374 {
5375 /* local symbol */
5376 struct elf_aarch64_local_symbol *l;
5377
5378 l = elf_aarch64_locals (input_bfd);
5379 return &l[r_symndx].tlsdesc_got_jump_table_offset;
5380 }
5381 }
5382
5383 static void
5384 symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
5385 unsigned long r_symndx)
5386 {
5387 bfd_vma *p;
5388 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
5389 *p |= 1;
5390 }
5391
5392 static int
5393 symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
5394 struct elf_link_hash_entry *h,
5395 unsigned long r_symndx)
5396 {
5397 bfd_vma value;
5398 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
5399 return value & 1;
5400 }
5401
5402 static bfd_vma
5403 symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
5404 unsigned long r_symndx)
5405 {
5406 bfd_vma value;
5407 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
5408 value &= ~1;
5409 return value;
5410 }
5411
5412 /* Data for make_branch_to_erratum_835769_stub(). */
5413
5414 struct erratum_835769_branch_to_stub_data
5415 {
5416 struct bfd_link_info *info;
5417 asection *output_section;
5418 bfd_byte *contents;
5419 };
5420
5421 /* Helper to insert branches to erratum 835769 stubs in the right
5422 places for a particular section. */
5423
5424 static bool
5425 make_branch_to_erratum_835769_stub (struct bfd_hash_entry *gen_entry,
5426 void *in_arg)
5427 {
5428 struct elf_aarch64_stub_hash_entry *stub_entry;
5429 struct erratum_835769_branch_to_stub_data *data;
5430 bfd_byte *contents;
5431 unsigned long branch_insn = 0;
5432 bfd_vma veneered_insn_loc, veneer_entry_loc;
5433 bfd_signed_vma branch_offset;
5434 unsigned int target;
5435 bfd *abfd;
5436
5437 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
5438 data = (struct erratum_835769_branch_to_stub_data *) in_arg;
5439
5440 if (stub_entry->target_section != data->output_section
5441 || stub_entry->stub_type != aarch64_stub_erratum_835769_veneer)
5442 return true;
5443
5444 contents = data->contents;
5445 veneered_insn_loc = stub_entry->target_section->output_section->vma
5446 + stub_entry->target_section->output_offset
5447 + stub_entry->target_value;
5448 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
5449 + stub_entry->stub_sec->output_offset
5450 + stub_entry->stub_offset;
5451 branch_offset = veneer_entry_loc - veneered_insn_loc;
5452
5453 abfd = stub_entry->target_section->owner;
5454 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
5455 _bfd_error_handler
5456 (_("%pB: error: erratum 835769 stub out "
5457 "of range (input file too large)"), abfd);
5458
5459 target = stub_entry->target_value;
5460 branch_insn = 0x14000000;
5461 branch_offset >>= 2;
5462 branch_offset &= 0x3ffffff;
5463 branch_insn |= branch_offset;
5464 bfd_putl32 (branch_insn, &contents[target]);
5465
5466 return true;
5467 }
5468
5469
5470 static bool
5471 _bfd_aarch64_erratum_843419_branch_to_stub (struct bfd_hash_entry *gen_entry,
5472 void *in_arg)
5473 {
5474 struct elf_aarch64_stub_hash_entry *stub_entry
5475 = (struct elf_aarch64_stub_hash_entry *) gen_entry;
5476 struct erratum_835769_branch_to_stub_data *data
5477 = (struct erratum_835769_branch_to_stub_data *) in_arg;
5478 struct bfd_link_info *info;
5479 struct elf_aarch64_link_hash_table *htab;
5480 bfd_byte *contents;
5481 asection *section;
5482 bfd *abfd;
5483 bfd_vma place;
5484 uint32_t insn;
5485
5486 info = data->info;
5487 contents = data->contents;
5488 section = data->output_section;
5489
5490 htab = elf_aarch64_hash_table (info);
5491
5492 if (stub_entry->target_section != section
5493 || stub_entry->stub_type != aarch64_stub_erratum_843419_veneer)
5494 return true;
5495
5496 BFD_ASSERT (((htab->fix_erratum_843419 & ERRAT_ADRP) && stub_entry->stub_sec)
5497 || (htab->fix_erratum_843419 & ERRAT_ADR));
5498
5499 /* Only update the stub section if we have one. We should always have one if
5500 we're allowed to use the ADRP errata workaround, otherwise it is not
5501 required. */
5502 if (stub_entry->stub_sec)
5503 {
5504 insn = bfd_getl32 (contents + stub_entry->target_value);
5505 bfd_putl32 (insn,
5506 stub_entry->stub_sec->contents + stub_entry->stub_offset);
5507 }
5508
5509 place = (section->output_section->vma + section->output_offset
5510 + stub_entry->adrp_offset);
5511 insn = bfd_getl32 (contents + stub_entry->adrp_offset);
5512
5513 if (!_bfd_aarch64_adrp_p (insn))
5514 abort ();
5515
5516 bfd_signed_vma imm =
5517 (_bfd_aarch64_sign_extend
5518 ((bfd_vma) _bfd_aarch64_decode_adrp_imm (insn) << 12, 33)
5519 - (place & 0xfff));
5520
5521 if ((htab->fix_erratum_843419 & ERRAT_ADR)
5522 && (imm >= AARCH64_MIN_ADRP_IMM && imm <= AARCH64_MAX_ADRP_IMM))
5523 {
5524 insn = (_bfd_aarch64_reencode_adr_imm (AARCH64_ADR_OP, imm)
5525 | AARCH64_RT (insn));
5526 bfd_putl32 (insn, contents + stub_entry->adrp_offset);
5527 /* Stub is not needed, don't map it out. */
5528 stub_entry->stub_type = aarch64_stub_none;
5529 }
5530 else if (htab->fix_erratum_843419 & ERRAT_ADRP)
5531 {
5532 bfd_vma veneered_insn_loc;
5533 bfd_vma veneer_entry_loc;
5534 bfd_signed_vma branch_offset;
5535 uint32_t branch_insn;
5536
5537 veneered_insn_loc = stub_entry->target_section->output_section->vma
5538 + stub_entry->target_section->output_offset
5539 + stub_entry->target_value;
5540 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
5541 + stub_entry->stub_sec->output_offset
5542 + stub_entry->stub_offset;
5543 branch_offset = veneer_entry_loc - veneered_insn_loc;
5544
5545 abfd = stub_entry->target_section->owner;
5546 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
5547 _bfd_error_handler
5548 (_("%pB: error: erratum 843419 stub out "
5549 "of range (input file too large)"), abfd);
5550
5551 branch_insn = 0x14000000;
5552 branch_offset >>= 2;
5553 branch_offset &= 0x3ffffff;
5554 branch_insn |= branch_offset;
5555 bfd_putl32 (branch_insn, contents + stub_entry->target_value);
5556 }
5557 else
5558 {
5559 abfd = stub_entry->target_section->owner;
5560 _bfd_error_handler
5561 (_("%pB: error: erratum 843419 immediate 0x%" PRIx64
5562 " out of range for ADR (input file too large) and "
5563 "--fix-cortex-a53-843419=adr used. Run the linker with "
5564 "--fix-cortex-a53-843419=full instead"),
5565 abfd, (uint64_t) (bfd_vma) imm);
5566 bfd_set_error (bfd_error_bad_value);
5567 /* This function is called inside a hashtable traversal and the error
5568 handlers called above turn into non-fatal errors. Which means this
5569 case ld returns an exit code 0 and also produces a broken object file.
5570 To prevent this, issue a hard abort. */
5571 BFD_FAIL ();
5572 }
5573 return true;
5574 }
5575
5576
5577 static bool
5578 elfNN_aarch64_write_section (bfd *output_bfd ATTRIBUTE_UNUSED,
5579 struct bfd_link_info *link_info,
5580 asection *sec,
5581 bfd_byte *contents)
5582
5583 {
5584 struct elf_aarch64_link_hash_table *globals =
5585 elf_aarch64_hash_table (link_info);
5586
5587 if (globals == NULL)
5588 return false;
5589
5590 /* Fix code to point to erratum 835769 stubs. */
5591 if (globals->fix_erratum_835769)
5592 {
5593 struct erratum_835769_branch_to_stub_data data;
5594
5595 data.info = link_info;
5596 data.output_section = sec;
5597 data.contents = contents;
5598 bfd_hash_traverse (&globals->stub_hash_table,
5599 make_branch_to_erratum_835769_stub, &data);
5600 }
5601
5602 if (globals->fix_erratum_843419)
5603 {
5604 struct erratum_835769_branch_to_stub_data data;
5605
5606 data.info = link_info;
5607 data.output_section = sec;
5608 data.contents = contents;
5609 bfd_hash_traverse (&globals->stub_hash_table,
5610 _bfd_aarch64_erratum_843419_branch_to_stub, &data);
5611 }
5612
5613 return false;
5614 }
5615
5616 /* Return TRUE if RELOC is a relocation against the base of GOT table. */
5617
5618 static bool
5619 aarch64_relocation_aginst_gp_p (bfd_reloc_code_real_type reloc)
5620 {
5621 return (reloc == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14
5622 || reloc == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
5623 || reloc == BFD_RELOC_AARCH64_LD64_GOTOFF_LO15
5624 || reloc == BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC
5625 || reloc == BFD_RELOC_AARCH64_MOVW_GOTOFF_G1);
5626 }
5627
5628 /* Perform a relocation as part of a final link. The input relocation type
5629 should be TLS relaxed. */
5630
5631 static bfd_reloc_status_type
5632 elfNN_aarch64_final_link_relocate (reloc_howto_type *howto,
5633 bfd *input_bfd,
5634 bfd *output_bfd,
5635 asection *input_section,
5636 bfd_byte *contents,
5637 Elf_Internal_Rela *rel,
5638 bfd_vma value,
5639 struct bfd_link_info *info,
5640 asection *sym_sec,
5641 struct elf_link_hash_entry *h,
5642 bool *unresolved_reloc_p,
5643 bool save_addend,
5644 bfd_vma *saved_addend,
5645 Elf_Internal_Sym *sym)
5646 {
5647 Elf_Internal_Shdr *symtab_hdr;
5648 unsigned int r_type = howto->type;
5649 bfd_reloc_code_real_type bfd_r_type
5650 = elfNN_aarch64_bfd_reloc_from_howto (howto);
5651 unsigned long r_symndx;
5652 bfd_byte *hit_data = contents + rel->r_offset;
5653 bfd_vma place, off, got_entry_addr = 0;
5654 bfd_signed_vma signed_addend;
5655 struct elf_aarch64_link_hash_table *globals;
5656 bool weak_undef_p;
5657 bool relative_reloc;
5658 asection *base_got;
5659 bfd_vma orig_value = value;
5660 bool resolved_to_zero;
5661 bool abs_symbol_p;
5662
5663 globals = elf_aarch64_hash_table (info);
5664
5665 symtab_hdr = &elf_symtab_hdr (input_bfd);
5666
5667 BFD_ASSERT (is_aarch64_elf (input_bfd));
5668
5669 r_symndx = ELFNN_R_SYM (rel->r_info);
5670
5671 place = input_section->output_section->vma
5672 + input_section->output_offset + rel->r_offset;
5673
5674 /* Get addend, accumulating the addend for consecutive relocs
5675 which refer to the same offset. */
5676 signed_addend = saved_addend ? *saved_addend : 0;
5677 signed_addend += rel->r_addend;
5678
5679 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
5680 : bfd_is_und_section (sym_sec));
5681 abs_symbol_p = h != NULL && bfd_is_abs_symbol (&h->root);
5682
5683
5684 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
5685 it here if it is defined in a non-shared object. */
5686 if (h != NULL
5687 && h->type == STT_GNU_IFUNC
5688 && h->def_regular)
5689 {
5690 asection *plt;
5691 const char *name;
5692 bfd_vma addend = 0;
5693
5694 if ((input_section->flags & SEC_ALLOC) == 0)
5695 {
5696 /* If this is a SHT_NOTE section without SHF_ALLOC, treat
5697 STT_GNU_IFUNC symbol as STT_FUNC. */
5698 if (elf_section_type (input_section) == SHT_NOTE)
5699 goto skip_ifunc;
5700
5701 /* Dynamic relocs are not propagated for SEC_DEBUGGING
5702 sections because such sections are not SEC_ALLOC and
5703 thus ld.so will not process them. */
5704 if ((input_section->flags & SEC_DEBUGGING) != 0)
5705 return bfd_reloc_ok;
5706
5707 if (h->root.root.string)
5708 name = h->root.root.string;
5709 else
5710 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, NULL);
5711 _bfd_error_handler
5712 /* xgettext:c-format */
5713 (_("%pB(%pA+%#" PRIx64 "): "
5714 "unresolvable %s relocation against symbol `%s'"),
5715 input_bfd, input_section, (uint64_t) rel->r_offset,
5716 howto->name, name);
5717 bfd_set_error (bfd_error_bad_value);
5718 return bfd_reloc_notsupported;
5719 }
5720 else if (h->plt.offset == (bfd_vma) -1)
5721 goto bad_ifunc_reloc;
5722
5723 /* STT_GNU_IFUNC symbol must go through PLT. */
5724 plt = globals->root.splt ? globals->root.splt : globals->root.iplt;
5725 value = (plt->output_section->vma + plt->output_offset + h->plt.offset);
5726
5727 switch (bfd_r_type)
5728 {
5729 default:
5730 bad_ifunc_reloc:
5731 if (h->root.root.string)
5732 name = h->root.root.string;
5733 else
5734 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
5735 NULL);
5736 _bfd_error_handler
5737 /* xgettext:c-format */
5738 (_("%pB: relocation %s against STT_GNU_IFUNC "
5739 "symbol `%s' isn't handled by %s"), input_bfd,
5740 howto->name, name, __func__);
5741 bfd_set_error (bfd_error_bad_value);
5742 return bfd_reloc_notsupported;
5743
5744 case BFD_RELOC_AARCH64_NN:
5745 if (rel->r_addend != 0)
5746 {
5747 if (h->root.root.string)
5748 name = h->root.root.string;
5749 else
5750 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
5751 sym, NULL);
5752 _bfd_error_handler
5753 /* xgettext:c-format */
5754 (_("%pB: relocation %s against STT_GNU_IFUNC "
5755 "symbol `%s' has non-zero addend: %" PRId64),
5756 input_bfd, howto->name, name, (int64_t) rel->r_addend);
5757 bfd_set_error (bfd_error_bad_value);
5758 return bfd_reloc_notsupported;
5759 }
5760
5761 /* Generate dynamic relocation only when there is a
5762 non-GOT reference in a shared object. */
5763 if (bfd_link_pic (info) && h->non_got_ref)
5764 {
5765 Elf_Internal_Rela outrel;
5766 asection *sreloc;
5767
5768 /* Need a dynamic relocation to get the real function
5769 address. */
5770 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
5771 info,
5772 input_section,
5773 rel->r_offset);
5774 if (outrel.r_offset == (bfd_vma) -1
5775 || outrel.r_offset == (bfd_vma) -2)
5776 abort ();
5777
5778 outrel.r_offset += (input_section->output_section->vma
5779 + input_section->output_offset);
5780
5781 if (h->dynindx == -1
5782 || h->forced_local
5783 || bfd_link_executable (info))
5784 {
5785 /* This symbol is resolved locally. */
5786 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
5787 outrel.r_addend = (h->root.u.def.value
5788 + h->root.u.def.section->output_section->vma
5789 + h->root.u.def.section->output_offset);
5790 }
5791 else
5792 {
5793 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
5794 outrel.r_addend = 0;
5795 }
5796
5797 sreloc = globals->root.irelifunc;
5798 elf_append_rela (output_bfd, sreloc, &outrel);
5799
5800 /* If this reloc is against an external symbol, we
5801 do not want to fiddle with the addend. Otherwise,
5802 we need to include the symbol value so that it
5803 becomes an addend for the dynamic reloc. For an
5804 internal symbol, we have updated addend. */
5805 return bfd_reloc_ok;
5806 }
5807 /* FALLTHROUGH */
5808 case BFD_RELOC_AARCH64_CALL26:
5809 case BFD_RELOC_AARCH64_JUMP26:
5810 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
5811 place, value,
5812 signed_addend,
5813 weak_undef_p);
5814 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
5815 howto, value);
5816 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5817 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5818 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
5819 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5820 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
5821 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5822 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5823 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
5824 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5825 base_got = globals->root.sgot;
5826 off = h->got.offset;
5827
5828 if (base_got == NULL)
5829 abort ();
5830
5831 if (off == (bfd_vma) -1)
5832 {
5833 bfd_vma plt_index;
5834
5835 /* We can't use h->got.offset here to save state, or
5836 even just remember the offset, as finish_dynamic_symbol
5837 would use that as offset into .got. */
5838
5839 if (globals->root.splt != NULL)
5840 {
5841 plt_index = ((h->plt.offset - globals->plt_header_size) /
5842 globals->plt_entry_size);
5843 off = (plt_index + 3) * GOT_ENTRY_SIZE;
5844 base_got = globals->root.sgotplt;
5845 }
5846 else
5847 {
5848 plt_index = h->plt.offset / globals->plt_entry_size;
5849 off = plt_index * GOT_ENTRY_SIZE;
5850 base_got = globals->root.igotplt;
5851 }
5852
5853 if (h->dynindx == -1
5854 || h->forced_local
5855 || info->symbolic)
5856 {
5857 /* This references the local definition. We must
5858 initialize this entry in the global offset table.
5859 Since the offset must always be a multiple of 8,
5860 we use the least significant bit to record
5861 whether we have initialized it already.
5862
5863 When doing a dynamic link, we create a .rela.got
5864 relocation entry to initialize the value. This
5865 is done in the finish_dynamic_symbol routine. */
5866 if ((off & 1) != 0)
5867 off &= ~1;
5868 else
5869 {
5870 bfd_put_NN (output_bfd, value,
5871 base_got->contents + off);
5872 /* Note that this is harmless as -1 | 1 still is -1. */
5873 h->got.offset |= 1;
5874 }
5875 }
5876 value = (base_got->output_section->vma
5877 + base_got->output_offset + off);
5878 }
5879 else
5880 value = aarch64_calculate_got_entry_vma (h, globals, info,
5881 value, output_bfd,
5882 unresolved_reloc_p);
5883
5884 if (aarch64_relocation_aginst_gp_p (bfd_r_type))
5885 addend = (globals->root.sgot->output_section->vma
5886 + globals->root.sgot->output_offset);
5887
5888 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
5889 place, value,
5890 addend, weak_undef_p);
5891 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type, howto, value);
5892 case BFD_RELOC_AARCH64_ADD_LO12:
5893 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
5894 break;
5895 }
5896 }
5897
5898 skip_ifunc:
5899 resolved_to_zero = (h != NULL
5900 && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h));
5901
5902 switch (bfd_r_type)
5903 {
5904 case BFD_RELOC_AARCH64_NONE:
5905 case BFD_RELOC_AARCH64_TLSDESC_ADD:
5906 case BFD_RELOC_AARCH64_TLSDESC_CALL:
5907 case BFD_RELOC_AARCH64_TLSDESC_LDR:
5908 *unresolved_reloc_p = false;
5909 return bfd_reloc_ok;
5910
5911 case BFD_RELOC_AARCH64_NN:
5912
5913 /* When generating a shared object or relocatable executable, these
5914 relocations are copied into the output file to be resolved at
5915 run time. */
5916 if (((bfd_link_pic (info)
5917 || globals->root.is_relocatable_executable)
5918 && (input_section->flags & SEC_ALLOC)
5919 && (h == NULL
5920 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5921 && !resolved_to_zero)
5922 || h->root.type != bfd_link_hash_undefweak))
5923 /* Or we are creating an executable, we may need to keep relocations
5924 for symbols satisfied by a dynamic library if we manage to avoid
5925 copy relocs for the symbol. */
5926 || (ELIMINATE_COPY_RELOCS
5927 && !bfd_link_pic (info)
5928 && h != NULL
5929 && (input_section->flags & SEC_ALLOC)
5930 && h->dynindx != -1
5931 && !h->non_got_ref
5932 && ((h->def_dynamic
5933 && !h->def_regular)
5934 || h->root.type == bfd_link_hash_undefweak
5935 || h->root.type == bfd_link_hash_undefined)))
5936 {
5937 Elf_Internal_Rela outrel;
5938 bfd_byte *loc;
5939 bool skip, relocate;
5940 asection *sreloc;
5941
5942 *unresolved_reloc_p = false;
5943
5944 skip = false;
5945 relocate = false;
5946
5947 outrel.r_addend = signed_addend;
5948 outrel.r_offset =
5949 _bfd_elf_section_offset (output_bfd, info, input_section,
5950 rel->r_offset);
5951 if (outrel.r_offset == (bfd_vma) - 1)
5952 skip = true;
5953 else if (outrel.r_offset == (bfd_vma) - 2)
5954 {
5955 skip = true;
5956 relocate = true;
5957 }
5958 else if (abs_symbol_p)
5959 {
5960 /* Local absolute symbol. */
5961 skip = (h->forced_local || (h->dynindx == -1));
5962 relocate = skip;
5963 }
5964
5965 outrel.r_offset += (input_section->output_section->vma
5966 + input_section->output_offset);
5967
5968 if (skip)
5969 memset (&outrel, 0, sizeof outrel);
5970 else if (h != NULL
5971 && h->dynindx != -1
5972 && (!bfd_link_pic (info)
5973 || !(bfd_link_pie (info) || SYMBOLIC_BIND (info, h))
5974 || !h->def_regular))
5975 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
5976 else
5977 {
5978 int symbol;
5979
5980 /* On SVR4-ish systems, the dynamic loader cannot
5981 relocate the text and data segments independently,
5982 so the symbol does not matter. */
5983 symbol = 0;
5984 relocate = !globals->no_apply_dynamic_relocs;
5985 outrel.r_info = ELFNN_R_INFO (symbol, AARCH64_R (RELATIVE));
5986 outrel.r_addend += value;
5987 }
5988
5989 sreloc = elf_section_data (input_section)->sreloc;
5990 if (sreloc == NULL || sreloc->contents == NULL)
5991 return bfd_reloc_notsupported;
5992
5993 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals);
5994 bfd_elfNN_swap_reloca_out (output_bfd, &outrel, loc);
5995
5996 if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size)
5997 {
5998 /* Sanity to check that we have previously allocated
5999 sufficient space in the relocation section for the
6000 number of relocations we actually want to emit. */
6001 abort ();
6002 }
6003
6004 /* If this reloc is against an external symbol, we do not want to
6005 fiddle with the addend. Otherwise, we need to include the symbol
6006 value so that it becomes an addend for the dynamic reloc. */
6007 if (!relocate)
6008 return bfd_reloc_ok;
6009
6010 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6011 contents, rel->r_offset, value,
6012 signed_addend);
6013 }
6014 else
6015 value += signed_addend;
6016 break;
6017
6018 case BFD_RELOC_AARCH64_CALL26:
6019 case BFD_RELOC_AARCH64_JUMP26:
6020 {
6021 asection *splt = globals->root.splt;
6022 bool via_plt_p =
6023 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
6024
6025 /* A call to an undefined weak symbol is converted to a jump to
6026 the next instruction unless a PLT entry will be created.
6027 The jump to the next instruction is optimized as a NOP.
6028 Do the same for local undefined symbols. */
6029 if (weak_undef_p && ! via_plt_p)
6030 {
6031 bfd_putl32 (INSN_NOP, hit_data);
6032 return bfd_reloc_ok;
6033 }
6034
6035 /* If the call goes through a PLT entry, make sure to
6036 check distance to the right destination address. */
6037 if (via_plt_p)
6038 value = (splt->output_section->vma
6039 + splt->output_offset + h->plt.offset);
6040
6041 /* Check if a stub has to be inserted because the destination
6042 is too far away. */
6043 struct elf_aarch64_stub_hash_entry *stub_entry = NULL;
6044
6045 /* If the branch destination is directed to plt stub, "value" will be
6046 the final destination, otherwise we should plus signed_addend, it may
6047 contain non-zero value, for example call to local function symbol
6048 which are turned into "sec_sym + sec_off", and sec_off is kept in
6049 signed_addend. */
6050 if (! aarch64_valid_branch_p (via_plt_p ? value : value + signed_addend,
6051 place))
6052 /* The target is out of reach, so redirect the branch to
6053 the local stub for this function. */
6054 stub_entry = elfNN_aarch64_get_stub_entry (input_section, sym_sec, h,
6055 rel, globals);
6056 if (stub_entry != NULL)
6057 {
6058 value = (stub_entry->stub_offset
6059 + stub_entry->stub_sec->output_offset
6060 + stub_entry->stub_sec->output_section->vma);
6061
6062 /* We have redirected the destination to stub entry address,
6063 so ignore any addend record in the original rela entry. */
6064 signed_addend = 0;
6065 }
6066 }
6067 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6068 place, value,
6069 signed_addend, weak_undef_p);
6070 *unresolved_reloc_p = false;
6071 break;
6072
6073 case BFD_RELOC_AARCH64_16_PCREL:
6074 case BFD_RELOC_AARCH64_32_PCREL:
6075 case BFD_RELOC_AARCH64_64_PCREL:
6076 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6077 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6078 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6079 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
6080 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6081 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
6082 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6083 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
6084 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6085 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
6086 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6087 if (bfd_link_pic (info)
6088 && (input_section->flags & SEC_ALLOC) != 0
6089 && (input_section->flags & SEC_READONLY) != 0
6090 && !_bfd_elf_symbol_refs_local_p (h, info, 1))
6091 {
6092 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
6093
6094 _bfd_error_handler
6095 /* xgettext:c-format */
6096 (_("%pB: relocation %s against symbol `%s' which may bind "
6097 "externally can not be used when making a shared object; "
6098 "recompile with -fPIC"),
6099 input_bfd, elfNN_aarch64_howto_table[howto_index].name,
6100 h->root.root.string);
6101 bfd_set_error (bfd_error_bad_value);
6102 return bfd_reloc_notsupported;
6103 }
6104 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6105 place, value,
6106 signed_addend,
6107 weak_undef_p);
6108 break;
6109
6110 case BFD_RELOC_AARCH64_BRANCH19:
6111 case BFD_RELOC_AARCH64_TSTBR14:
6112 if (h && h->root.type == bfd_link_hash_undefined)
6113 {
6114 _bfd_error_handler
6115 /* xgettext:c-format */
6116 (_("%pB: conditional branch to undefined symbol `%s' "
6117 "not allowed"), input_bfd, h->root.root.string);
6118 bfd_set_error (bfd_error_bad_value);
6119 return bfd_reloc_notsupported;
6120 }
6121 /* Fall through. */
6122
6123 case BFD_RELOC_AARCH64_16:
6124 #if ARCH_SIZE == 64
6125 case BFD_RELOC_AARCH64_32:
6126 #endif
6127 case BFD_RELOC_AARCH64_ADD_LO12:
6128 case BFD_RELOC_AARCH64_LDST128_LO12:
6129 case BFD_RELOC_AARCH64_LDST16_LO12:
6130 case BFD_RELOC_AARCH64_LDST32_LO12:
6131 case BFD_RELOC_AARCH64_LDST64_LO12:
6132 case BFD_RELOC_AARCH64_LDST8_LO12:
6133 case BFD_RELOC_AARCH64_MOVW_G0:
6134 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6135 case BFD_RELOC_AARCH64_MOVW_G0_S:
6136 case BFD_RELOC_AARCH64_MOVW_G1:
6137 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6138 case BFD_RELOC_AARCH64_MOVW_G1_S:
6139 case BFD_RELOC_AARCH64_MOVW_G2:
6140 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6141 case BFD_RELOC_AARCH64_MOVW_G2_S:
6142 case BFD_RELOC_AARCH64_MOVW_G3:
6143 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6144 place, value,
6145 signed_addend, weak_undef_p);
6146 break;
6147
6148 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6149 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6150 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6151 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6152 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6153 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6154 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
6155 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6156 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6157 if (globals->root.sgot == NULL)
6158 BFD_ASSERT (h != NULL);
6159
6160 relative_reloc = false;
6161 if (h != NULL)
6162 {
6163 bfd_vma addend = 0;
6164
6165 /* If a symbol is not dynamic and is not undefined weak, bind it
6166 locally and generate a RELATIVE relocation under PIC mode.
6167
6168 NOTE: one symbol may be referenced by several relocations, we
6169 should only generate one RELATIVE relocation for that symbol.
6170 Therefore, check GOT offset mark first. */
6171 if (h->dynindx == -1
6172 && !h->forced_local
6173 && h->root.type != bfd_link_hash_undefweak
6174 && bfd_link_pic (info)
6175 && !symbol_got_offset_mark_p (input_bfd, h, r_symndx))
6176 relative_reloc = true;
6177
6178 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
6179 output_bfd,
6180 unresolved_reloc_p);
6181 /* Record the GOT entry address which will be used when generating
6182 RELATIVE relocation. */
6183 if (relative_reloc)
6184 got_entry_addr = value;
6185
6186 if (aarch64_relocation_aginst_gp_p (bfd_r_type))
6187 addend = (globals->root.sgot->output_section->vma
6188 + globals->root.sgot->output_offset);
6189 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6190 place, value,
6191 addend, weak_undef_p);
6192 }
6193 else
6194 {
6195 bfd_vma addend = 0;
6196 struct elf_aarch64_local_symbol *locals
6197 = elf_aarch64_locals (input_bfd);
6198
6199 if (locals == NULL)
6200 {
6201 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
6202 _bfd_error_handler
6203 /* xgettext:c-format */
6204 (_("%pB: local symbol descriptor table be NULL when applying "
6205 "relocation %s against local symbol"),
6206 input_bfd, elfNN_aarch64_howto_table[howto_index].name);
6207 abort ();
6208 }
6209
6210 off = symbol_got_offset (input_bfd, h, r_symndx);
6211 base_got = globals->root.sgot;
6212 got_entry_addr = (base_got->output_section->vma
6213 + base_got->output_offset + off);
6214
6215 if (!symbol_got_offset_mark_p (input_bfd, h, r_symndx))
6216 {
6217 bfd_put_64 (output_bfd, value, base_got->contents + off);
6218
6219 /* For local symbol, we have done absolute relocation in static
6220 linking stage. While for shared library, we need to update the
6221 content of GOT entry according to the shared object's runtime
6222 base address. So, we need to generate a R_AARCH64_RELATIVE reloc
6223 for dynamic linker. */
6224 if (bfd_link_pic (info))
6225 relative_reloc = true;
6226
6227 symbol_got_offset_mark (input_bfd, h, r_symndx);
6228 }
6229
6230 /* Update the relocation value to GOT entry addr as we have transformed
6231 the direct data access into indirect data access through GOT. */
6232 value = got_entry_addr;
6233
6234 if (aarch64_relocation_aginst_gp_p (bfd_r_type))
6235 addend = base_got->output_section->vma + base_got->output_offset;
6236
6237 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6238 place, value,
6239 addend, weak_undef_p);
6240 }
6241
6242 if (relative_reloc)
6243 {
6244 asection *s;
6245 Elf_Internal_Rela outrel;
6246
6247 s = globals->root.srelgot;
6248 if (s == NULL)
6249 abort ();
6250
6251 outrel.r_offset = got_entry_addr;
6252 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
6253 outrel.r_addend = orig_value;
6254 elf_append_rela (output_bfd, s, &outrel);
6255 }
6256 break;
6257
6258 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6259 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6260 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6261 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6262 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6263 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6264 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6265 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
6266 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
6267 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6268 if (globals->root.sgot == NULL)
6269 return bfd_reloc_notsupported;
6270
6271 value = (symbol_got_offset (input_bfd, h, r_symndx)
6272 + globals->root.sgot->output_section->vma
6273 + globals->root.sgot->output_offset);
6274
6275 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6276 place, value,
6277 0, weak_undef_p);
6278 *unresolved_reloc_p = false;
6279 break;
6280
6281 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6282 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6283 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6284 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6285 if (globals->root.sgot == NULL)
6286 return bfd_reloc_notsupported;
6287
6288 value = symbol_got_offset (input_bfd, h, r_symndx);
6289 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6290 place, value,
6291 0, weak_undef_p);
6292 *unresolved_reloc_p = false;
6293 break;
6294
6295 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
6296 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
6297 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
6298 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
6299 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
6300 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
6301 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
6302 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
6303 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
6304 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
6305 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
6306 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
6307 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6308 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
6309 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
6310 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
6311 {
6312 if (!(weak_undef_p || elf_hash_table (info)->tls_sec))
6313 {
6314 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
6315 _bfd_error_handler
6316 /* xgettext:c-format */
6317 (_("%pB: TLS relocation %s against undefined symbol `%s'"),
6318 input_bfd, elfNN_aarch64_howto_table[howto_index].name,
6319 h->root.root.string);
6320 bfd_set_error (bfd_error_bad_value);
6321 return bfd_reloc_notsupported;
6322 }
6323
6324 bfd_vma def_value
6325 = weak_undef_p ? 0 : signed_addend - dtpoff_base (info);
6326 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6327 place, value,
6328 def_value, weak_undef_p);
6329 break;
6330 }
6331
6332 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6333 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6334 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6335 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
6336 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
6337 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
6338 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
6339 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
6340 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
6341 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
6342 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
6343 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6344 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6345 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6346 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6347 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6348 {
6349 if (!(weak_undef_p || elf_hash_table (info)->tls_sec))
6350 {
6351 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
6352 _bfd_error_handler
6353 /* xgettext:c-format */
6354 (_("%pB: TLS relocation %s against undefined symbol `%s'"),
6355 input_bfd, elfNN_aarch64_howto_table[howto_index].name,
6356 h->root.root.string);
6357 bfd_set_error (bfd_error_bad_value);
6358 return bfd_reloc_notsupported;
6359 }
6360
6361 bfd_vma def_value
6362 = weak_undef_p ? 0 : signed_addend - tpoff_base (info);
6363 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6364 place, value,
6365 def_value, weak_undef_p);
6366 *unresolved_reloc_p = false;
6367 break;
6368 }
6369
6370 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
6371 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6372 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6373 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6374 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
6375 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6376 if (globals->root.sgot == NULL)
6377 return bfd_reloc_notsupported;
6378 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
6379 + globals->root.sgotplt->output_section->vma
6380 + globals->root.sgotplt->output_offset
6381 + globals->sgotplt_jump_table_size);
6382
6383 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6384 place, value,
6385 0, weak_undef_p);
6386 *unresolved_reloc_p = false;
6387 break;
6388
6389 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6390 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6391 if (globals->root.sgot == NULL)
6392 return bfd_reloc_notsupported;
6393
6394 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
6395 + globals->root.sgotplt->output_section->vma
6396 + globals->root.sgotplt->output_offset
6397 + globals->sgotplt_jump_table_size);
6398
6399 value -= (globals->root.sgot->output_section->vma
6400 + globals->root.sgot->output_offset);
6401
6402 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6403 place, value,
6404 0, weak_undef_p);
6405 *unresolved_reloc_p = false;
6406 break;
6407
6408 default:
6409 return bfd_reloc_notsupported;
6410 }
6411
6412 if (saved_addend)
6413 *saved_addend = value;
6414
6415 /* Only apply the final relocation in a sequence. */
6416 if (save_addend)
6417 return bfd_reloc_continue;
6418
6419 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
6420 howto, value);
6421 }
6422
6423 /* LP64 and ILP32 operates on x- and w-registers respectively.
6424 Next definitions take into account the difference between
6425 corresponding machine codes. R means x-register if the target
6426 arch is LP64, and w-register if the target is ILP32. */
6427
6428 #if ARCH_SIZE == 64
6429 # define add_R0_R0 (0x91000000)
6430 # define add_R0_R0_R1 (0x8b000020)
6431 # define add_R0_R1 (0x91400020)
6432 # define ldr_R0 (0x58000000)
6433 # define ldr_R0_mask(i) (i & 0xffffffe0)
6434 # define ldr_R0_x0 (0xf9400000)
6435 # define ldr_hw_R0 (0xf2a00000)
6436 # define movk_R0 (0xf2800000)
6437 # define movz_R0 (0xd2a00000)
6438 # define movz_hw_R0 (0xd2c00000)
6439 #else /*ARCH_SIZE == 32 */
6440 # define add_R0_R0 (0x11000000)
6441 # define add_R0_R0_R1 (0x0b000020)
6442 # define add_R0_R1 (0x11400020)
6443 # define ldr_R0 (0x18000000)
6444 # define ldr_R0_mask(i) (i & 0xbfffffe0)
6445 # define ldr_R0_x0 (0xb9400000)
6446 # define ldr_hw_R0 (0x72a00000)
6447 # define movk_R0 (0x72800000)
6448 # define movz_R0 (0x52a00000)
6449 # define movz_hw_R0 (0x52c00000)
6450 #endif
6451
6452 /* Structure to hold payload for _bfd_aarch64_erratum_843419_clear_stub,
6453 it is used to identify the stub information to reset. */
6454
6455 struct erratum_843419_branch_to_stub_clear_data
6456 {
6457 bfd_vma adrp_offset;
6458 asection *output_section;
6459 };
6460
6461 /* Clear the erratum information for GEN_ENTRY if the ADRP_OFFSET and
6462 section inside IN_ARG matches. The clearing is done by setting the
6463 stub_type to none. */
6464
6465 static bool
6466 _bfd_aarch64_erratum_843419_clear_stub (struct bfd_hash_entry *gen_entry,
6467 void *in_arg)
6468 {
6469 struct elf_aarch64_stub_hash_entry *stub_entry
6470 = (struct elf_aarch64_stub_hash_entry *) gen_entry;
6471 struct erratum_843419_branch_to_stub_clear_data *data
6472 = (struct erratum_843419_branch_to_stub_clear_data *) in_arg;
6473
6474 if (stub_entry->target_section != data->output_section
6475 || stub_entry->stub_type != aarch64_stub_erratum_843419_veneer
6476 || stub_entry->adrp_offset != data->adrp_offset)
6477 return true;
6478
6479 /* Change the stub type instead of removing the entry, removing from the hash
6480 table would be slower and we have already reserved the memory for the entry
6481 so there wouldn't be much gain. Changing the stub also keeps around a
6482 record of what was there before. */
6483 stub_entry->stub_type = aarch64_stub_none;
6484
6485 /* We're done and there could have been only one matching stub at that
6486 particular offset, so abort further traversal. */
6487 return false;
6488 }
6489
6490 /* TLS Relaxations may relax an adrp sequence that matches the erratum 843419
6491 sequence. In this case the erratum no longer applies and we need to remove
6492 the entry from the pending stub generation. This clears matching adrp insn
6493 at ADRP_OFFSET in INPUT_SECTION in the stub table defined in GLOBALS. */
6494
6495 static void
6496 clear_erratum_843419_entry (struct elf_aarch64_link_hash_table *globals,
6497 bfd_vma adrp_offset, asection *input_section)
6498 {
6499 if (globals->fix_erratum_843419 & ERRAT_ADRP)
6500 {
6501 struct erratum_843419_branch_to_stub_clear_data data;
6502 data.adrp_offset = adrp_offset;
6503 data.output_section = input_section;
6504
6505 bfd_hash_traverse (&globals->stub_hash_table,
6506 _bfd_aarch64_erratum_843419_clear_stub, &data);
6507 }
6508 }
6509
6510 /* Handle TLS relaxations. Relaxing is possible for symbols that use
6511 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
6512 link.
6513
6514 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
6515 is to then call final_link_relocate. Return other values in the
6516 case of error. */
6517
6518 static bfd_reloc_status_type
6519 elfNN_aarch64_tls_relax (struct elf_aarch64_link_hash_table *globals,
6520 bfd *input_bfd, asection *input_section,
6521 bfd_byte *contents, Elf_Internal_Rela *rel,
6522 struct elf_link_hash_entry *h,
6523 struct bfd_link_info *info)
6524 {
6525 bool local_exec = bfd_link_executable (info)
6526 && SYMBOL_REFERENCES_LOCAL (info, h);
6527 unsigned int r_type = ELFNN_R_TYPE (rel->r_info);
6528 unsigned long insn;
6529
6530 BFD_ASSERT (globals && input_bfd && contents && rel);
6531
6532 switch (elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type))
6533 {
6534 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6535 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6536 if (local_exec)
6537 {
6538 /* GD->LE relaxation:
6539 adrp x0, :tlsgd:var => movz R0, :tprel_g1:var
6540 or
6541 adrp x0, :tlsdesc:var => movz R0, :tprel_g1:var
6542
6543 Where R is x for LP64, and w for ILP32. */
6544 bfd_putl32 (movz_R0, contents + rel->r_offset);
6545 /* We have relaxed the adrp into a mov, we may have to clear any
6546 pending erratum fixes. */
6547 clear_erratum_843419_entry (globals, rel->r_offset, input_section);
6548 return bfd_reloc_continue;
6549 }
6550 else
6551 {
6552 /* GD->IE relaxation:
6553 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
6554 or
6555 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
6556 */
6557 return bfd_reloc_continue;
6558 }
6559
6560 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6561 BFD_ASSERT (0);
6562 break;
6563
6564 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6565 if (local_exec)
6566 {
6567 /* Tiny TLSDESC->LE relaxation:
6568 ldr x1, :tlsdesc:var => movz R0, #:tprel_g1:var
6569 adr x0, :tlsdesc:var => movk R0, #:tprel_g0_nc:var
6570 .tlsdesccall var
6571 blr x1 => nop
6572
6573 Where R is x for LP64, and w for ILP32. */
6574 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
6575 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
6576
6577 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
6578 AARCH64_R (TLSLE_MOVW_TPREL_G0_NC));
6579 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6580
6581 bfd_putl32 (movz_R0, contents + rel->r_offset);
6582 bfd_putl32 (movk_R0, contents + rel->r_offset + 4);
6583 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
6584 return bfd_reloc_continue;
6585 }
6586 else
6587 {
6588 /* Tiny TLSDESC->IE relaxation:
6589 ldr x1, :tlsdesc:var => ldr x0, :gottprel:var
6590 adr x0, :tlsdesc:var => nop
6591 .tlsdesccall var
6592 blr x1 => nop
6593 */
6594 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
6595 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
6596
6597 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6598 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6599
6600 bfd_putl32 (ldr_R0, contents + rel->r_offset);
6601 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4);
6602 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
6603 return bfd_reloc_continue;
6604 }
6605
6606 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6607 if (local_exec)
6608 {
6609 /* Tiny GD->LE relaxation:
6610 adr x0, :tlsgd:var => mrs x1, tpidr_el0
6611 bl __tls_get_addr => add R0, R1, #:tprel_hi12:x, lsl #12
6612 nop => add R0, R0, #:tprel_lo12_nc:x
6613
6614 Where R is x for LP64, and x for Ilp32. */
6615
6616 /* First kill the tls_get_addr reloc on the bl instruction. */
6617 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
6618
6619 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 0);
6620 bfd_putl32 (add_R0_R1, contents + rel->r_offset + 4);
6621 bfd_putl32 (add_R0_R0, contents + rel->r_offset + 8);
6622
6623 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
6624 AARCH64_R (TLSLE_ADD_TPREL_LO12_NC));
6625 rel[1].r_offset = rel->r_offset + 8;
6626
6627 /* Move the current relocation to the second instruction in
6628 the sequence. */
6629 rel->r_offset += 4;
6630 rel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
6631 AARCH64_R (TLSLE_ADD_TPREL_HI12));
6632 return bfd_reloc_continue;
6633 }
6634 else
6635 {
6636 /* Tiny GD->IE relaxation:
6637 adr x0, :tlsgd:var => ldr R0, :gottprel:var
6638 bl __tls_get_addr => mrs x1, tpidr_el0
6639 nop => add R0, R0, R1
6640
6641 Where R is x for LP64, and w for Ilp32. */
6642
6643 /* First kill the tls_get_addr reloc on the bl instruction. */
6644 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
6645 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6646
6647 bfd_putl32 (ldr_R0, contents + rel->r_offset);
6648 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
6649 bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 8);
6650 return bfd_reloc_continue;
6651 }
6652
6653 #if ARCH_SIZE == 64
6654 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6655 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSGD_MOVW_G0_NC));
6656 BFD_ASSERT (rel->r_offset + 12 == rel[2].r_offset);
6657 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (CALL26));
6658
6659 if (local_exec)
6660 {
6661 /* Large GD->LE relaxation:
6662 movz x0, #:tlsgd_g1:var => movz x0, #:tprel_g2:var, lsl #32
6663 movk x0, #:tlsgd_g0_nc:var => movk x0, #:tprel_g1_nc:var, lsl #16
6664 add x0, gp, x0 => movk x0, #:tprel_g0_nc:var
6665 bl __tls_get_addr => mrs x1, tpidr_el0
6666 nop => add x0, x0, x1
6667 */
6668 rel[2].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
6669 AARCH64_R (TLSLE_MOVW_TPREL_G0_NC));
6670 rel[2].r_offset = rel->r_offset + 8;
6671
6672 bfd_putl32 (movz_hw_R0, contents + rel->r_offset + 0);
6673 bfd_putl32 (ldr_hw_R0, contents + rel->r_offset + 4);
6674 bfd_putl32 (movk_R0, contents + rel->r_offset + 8);
6675 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 12);
6676 bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 16);
6677 }
6678 else
6679 {
6680 /* Large GD->IE relaxation:
6681 movz x0, #:tlsgd_g1:var => movz x0, #:gottprel_g1:var, lsl #16
6682 movk x0, #:tlsgd_g0_nc:var => movk x0, #:gottprel_g0_nc:var
6683 add x0, gp, x0 => ldr x0, [gp, x0]
6684 bl __tls_get_addr => mrs x1, tpidr_el0
6685 nop => add x0, x0, x1
6686 */
6687 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6688 bfd_putl32 (0xd2a80000, contents + rel->r_offset + 0);
6689 bfd_putl32 (ldr_R0, contents + rel->r_offset + 8);
6690 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 12);
6691 bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 16);
6692 }
6693 return bfd_reloc_continue;
6694
6695 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6696 return bfd_reloc_continue;
6697 #endif
6698
6699 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6700 return bfd_reloc_continue;
6701
6702 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
6703 if (local_exec)
6704 {
6705 /* GD->LE relaxation:
6706 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
6707
6708 Where R is x for lp64 mode, and w for ILP32 mode. */
6709 bfd_putl32 (movk_R0, contents + rel->r_offset);
6710 return bfd_reloc_continue;
6711 }
6712 else
6713 {
6714 /* GD->IE relaxation:
6715 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr R0, [x0, #:gottprel_lo12:var]
6716
6717 Where R is x for lp64 mode, and w for ILP32 mode. */
6718 insn = bfd_getl32 (contents + rel->r_offset);
6719 bfd_putl32 (ldr_R0_mask (insn), contents + rel->r_offset);
6720 return bfd_reloc_continue;
6721 }
6722
6723 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6724 if (local_exec)
6725 {
6726 /* GD->LE relaxation
6727 add x0, #:tlsgd_lo12:var => movk R0, :tprel_g0_nc:var
6728 bl __tls_get_addr => mrs x1, tpidr_el0
6729 nop => add R0, R1, R0
6730
6731 Where R is x for lp64 mode, and w for ILP32 mode. */
6732
6733 /* First kill the tls_get_addr reloc on the bl instruction. */
6734 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
6735 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6736
6737 bfd_putl32 (movk_R0, contents + rel->r_offset);
6738 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
6739 bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 8);
6740 return bfd_reloc_continue;
6741 }
6742 else
6743 {
6744 /* GD->IE relaxation
6745 ADD x0, #:tlsgd_lo12:var => ldr R0, [x0, #:gottprel_lo12:var]
6746 BL __tls_get_addr => mrs x1, tpidr_el0
6747 R_AARCH64_CALL26
6748 NOP => add R0, R1, R0
6749
6750 Where R is x for lp64 mode, and w for ilp32 mode. */
6751
6752 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
6753
6754 /* Remove the relocation on the BL instruction. */
6755 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6756
6757 /* We choose to fixup the BL and NOP instructions using the
6758 offset from the second relocation to allow flexibility in
6759 scheduling instructions between the ADD and BL. */
6760 bfd_putl32 (ldr_R0_x0, contents + rel->r_offset);
6761 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
6762 bfd_putl32 (add_R0_R0_R1, contents + rel[1].r_offset + 4);
6763 return bfd_reloc_continue;
6764 }
6765
6766 case BFD_RELOC_AARCH64_TLSDESC_ADD:
6767 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
6768 case BFD_RELOC_AARCH64_TLSDESC_CALL:
6769 /* GD->IE/LE relaxation:
6770 add x0, x0, #:tlsdesc_lo12:var => nop
6771 blr xd => nop
6772 */
6773 bfd_putl32 (INSN_NOP, contents + rel->r_offset);
6774 return bfd_reloc_ok;
6775
6776 case BFD_RELOC_AARCH64_TLSDESC_LDR:
6777 if (local_exec)
6778 {
6779 /* GD->LE relaxation:
6780 ldr xd, [gp, xn] => movk R0, #:tprel_g0_nc:var
6781
6782 Where R is x for lp64 mode, and w for ILP32 mode. */
6783 bfd_putl32 (movk_R0, contents + rel->r_offset);
6784 return bfd_reloc_continue;
6785 }
6786 else
6787 {
6788 /* GD->IE relaxation:
6789 ldr xd, [gp, xn] => ldr R0, [gp, xn]
6790
6791 Where R is x for lp64 mode, and w for ILP32 mode. */
6792 insn = bfd_getl32 (contents + rel->r_offset);
6793 bfd_putl32 (ldr_R0_mask (insn), contents + rel->r_offset);
6794 return bfd_reloc_ok;
6795 }
6796
6797 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6798 /* GD->LE relaxation:
6799 movk xd, #:tlsdesc_off_g0_nc:var => movk R0, #:tprel_g1_nc:var, lsl #16
6800 GD->IE relaxation:
6801 movk xd, #:tlsdesc_off_g0_nc:var => movk Rd, #:gottprel_g0_nc:var
6802
6803 Where R is x for lp64 mode, and w for ILP32 mode. */
6804 if (local_exec)
6805 bfd_putl32 (ldr_hw_R0, contents + rel->r_offset);
6806 return bfd_reloc_continue;
6807
6808 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6809 if (local_exec)
6810 {
6811 /* GD->LE relaxation:
6812 movz xd, #:tlsdesc_off_g1:var => movz R0, #:tprel_g2:var, lsl #32
6813
6814 Where R is x for lp64 mode, and w for ILP32 mode. */
6815 bfd_putl32 (movz_hw_R0, contents + rel->r_offset);
6816 return bfd_reloc_continue;
6817 }
6818 else
6819 {
6820 /* GD->IE relaxation:
6821 movz xd, #:tlsdesc_off_g1:var => movz Rd, #:gottprel_g1:var, lsl #16
6822
6823 Where R is x for lp64 mode, and w for ILP32 mode. */
6824 insn = bfd_getl32 (contents + rel->r_offset);
6825 bfd_putl32 (movz_R0 | (insn & 0x1f), contents + rel->r_offset);
6826 return bfd_reloc_continue;
6827 }
6828
6829 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6830 /* IE->LE relaxation:
6831 adrp xd, :gottprel:var => movz Rd, :tprel_g1:var
6832
6833 Where R is x for lp64 mode, and w for ILP32 mode. */
6834 if (local_exec)
6835 {
6836 insn = bfd_getl32 (contents + rel->r_offset);
6837 bfd_putl32 (movz_R0 | (insn & 0x1f), contents + rel->r_offset);
6838 /* We have relaxed the adrp into a mov, we may have to clear any
6839 pending erratum fixes. */
6840 clear_erratum_843419_entry (globals, rel->r_offset, input_section);
6841 }
6842 return bfd_reloc_continue;
6843
6844 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
6845 /* IE->LE relaxation:
6846 ldr xd, [xm, #:gottprel_lo12:var] => movk Rd, :tprel_g0_nc:var
6847
6848 Where R is x for lp64 mode, and w for ILP32 mode. */
6849 if (local_exec)
6850 {
6851 insn = bfd_getl32 (contents + rel->r_offset);
6852 bfd_putl32 (movk_R0 | (insn & 0x1f), contents + rel->r_offset);
6853 }
6854 return bfd_reloc_continue;
6855
6856 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6857 /* LD->LE relaxation (tiny):
6858 adr x0, :tlsldm:x => mrs x0, tpidr_el0
6859 bl __tls_get_addr => add R0, R0, TCB_SIZE
6860
6861 Where R is x for lp64 mode, and w for ilp32 mode. */
6862 if (local_exec)
6863 {
6864 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
6865 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
6866 /* No need of CALL26 relocation for tls_get_addr. */
6867 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6868 bfd_putl32 (0xd53bd040, contents + rel->r_offset + 0);
6869 bfd_putl32 (add_R0_R0 | (TCB_SIZE << 10),
6870 contents + rel->r_offset + 4);
6871 return bfd_reloc_ok;
6872 }
6873 return bfd_reloc_continue;
6874
6875 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
6876 /* LD->LE relaxation (small):
6877 adrp x0, :tlsldm:x => mrs x0, tpidr_el0
6878 */
6879 if (local_exec)
6880 {
6881 bfd_putl32 (0xd53bd040, contents + rel->r_offset);
6882 return bfd_reloc_ok;
6883 }
6884 return bfd_reloc_continue;
6885
6886 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
6887 /* LD->LE relaxation (small):
6888 add x0, #:tlsldm_lo12:x => add R0, R0, TCB_SIZE
6889 bl __tls_get_addr => nop
6890
6891 Where R is x for lp64 mode, and w for ilp32 mode. */
6892 if (local_exec)
6893 {
6894 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
6895 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
6896 /* No need of CALL26 relocation for tls_get_addr. */
6897 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6898 bfd_putl32 (add_R0_R0 | (TCB_SIZE << 10),
6899 contents + rel->r_offset + 0);
6900 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4);
6901 return bfd_reloc_ok;
6902 }
6903 return bfd_reloc_continue;
6904
6905 default:
6906 return bfd_reloc_continue;
6907 }
6908
6909 return bfd_reloc_ok;
6910 }
6911
6912 /* Relocate an AArch64 ELF section. */
6913
6914 static int
6915 elfNN_aarch64_relocate_section (bfd *output_bfd,
6916 struct bfd_link_info *info,
6917 bfd *input_bfd,
6918 asection *input_section,
6919 bfd_byte *contents,
6920 Elf_Internal_Rela *relocs,
6921 Elf_Internal_Sym *local_syms,
6922 asection **local_sections)
6923 {
6924 Elf_Internal_Shdr *symtab_hdr;
6925 struct elf_link_hash_entry **sym_hashes;
6926 Elf_Internal_Rela *rel;
6927 Elf_Internal_Rela *relend;
6928 const char *name;
6929 struct elf_aarch64_link_hash_table *globals;
6930 bool save_addend = false;
6931 bfd_vma addend = 0;
6932
6933 globals = elf_aarch64_hash_table (info);
6934
6935 symtab_hdr = &elf_symtab_hdr (input_bfd);
6936 sym_hashes = elf_sym_hashes (input_bfd);
6937
6938 rel = relocs;
6939 relend = relocs + input_section->reloc_count;
6940 for (; rel < relend; rel++)
6941 {
6942 unsigned int r_type;
6943 bfd_reloc_code_real_type bfd_r_type;
6944 bfd_reloc_code_real_type relaxed_bfd_r_type;
6945 reloc_howto_type *howto;
6946 unsigned long r_symndx;
6947 Elf_Internal_Sym *sym;
6948 asection *sec;
6949 struct elf_link_hash_entry *h;
6950 bfd_vma relocation;
6951 bfd_reloc_status_type r;
6952 arelent bfd_reloc;
6953 char sym_type;
6954 bool unresolved_reloc = false;
6955 char *error_message = NULL;
6956
6957 r_symndx = ELFNN_R_SYM (rel->r_info);
6958 r_type = ELFNN_R_TYPE (rel->r_info);
6959
6960 bfd_reloc.howto = elfNN_aarch64_howto_from_type (input_bfd, r_type);
6961 howto = bfd_reloc.howto;
6962
6963 if (howto == NULL)
6964 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
6965
6966 bfd_r_type = elfNN_aarch64_bfd_reloc_from_howto (howto);
6967
6968 h = NULL;
6969 sym = NULL;
6970 sec = NULL;
6971
6972 if (r_symndx < symtab_hdr->sh_info)
6973 {
6974 sym = local_syms + r_symndx;
6975 sym_type = ELFNN_ST_TYPE (sym->st_info);
6976 sec = local_sections[r_symndx];
6977
6978 /* An object file might have a reference to a local
6979 undefined symbol. This is a daft object file, but we
6980 should at least do something about it. NONE and NULL
6981 relocations do not use the symbol and are explicitly
6982 allowed to use an undefined one, so allow those.
6983 Likewise for relocations against STN_UNDEF. */
6984 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
6985 && r_symndx != STN_UNDEF
6986 && bfd_is_und_section (sec)
6987 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
6988 (*info->callbacks->undefined_symbol)
6989 (info, bfd_elf_string_from_elf_section
6990 (input_bfd, symtab_hdr->sh_link, sym->st_name),
6991 input_bfd, input_section, rel->r_offset, true);
6992
6993 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
6994
6995 /* Relocate against local STT_GNU_IFUNC symbol. */
6996 if (!bfd_link_relocatable (info)
6997 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
6998 {
6999 h = elfNN_aarch64_get_local_sym_hash (globals, input_bfd,
7000 rel, false);
7001 if (h == NULL)
7002 abort ();
7003
7004 /* Set STT_GNU_IFUNC symbol value. */
7005 h->root.u.def.value = sym->st_value;
7006 h->root.u.def.section = sec;
7007 }
7008 }
7009 else
7010 {
7011 bool warned, ignored;
7012
7013 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
7014 r_symndx, symtab_hdr, sym_hashes,
7015 h, sec, relocation,
7016 unresolved_reloc, warned, ignored);
7017
7018 sym_type = h->type;
7019 }
7020
7021 if (sec != NULL && discarded_section (sec))
7022 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
7023 rel, 1, relend, howto, 0, contents);
7024
7025 if (bfd_link_relocatable (info))
7026 continue;
7027
7028 if (h != NULL)
7029 name = h->root.root.string;
7030 else
7031 {
7032 name = (bfd_elf_string_from_elf_section
7033 (input_bfd, symtab_hdr->sh_link, sym->st_name));
7034 if (name == NULL || *name == '\0')
7035 name = bfd_section_name (sec);
7036 }
7037
7038 if (r_symndx != 0
7039 && r_type != R_AARCH64_NONE
7040 && r_type != R_AARCH64_NULL
7041 && (h == NULL
7042 || h->root.type == bfd_link_hash_defined
7043 || h->root.type == bfd_link_hash_defweak)
7044 && IS_AARCH64_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS))
7045 {
7046 _bfd_error_handler
7047 ((sym_type == STT_TLS
7048 /* xgettext:c-format */
7049 ? _("%pB(%pA+%#" PRIx64 "): %s used with TLS symbol %s")
7050 /* xgettext:c-format */
7051 : _("%pB(%pA+%#" PRIx64 "): %s used with non-TLS symbol %s")),
7052 input_bfd,
7053 input_section, (uint64_t) rel->r_offset, howto->name, name);
7054 }
7055
7056 /* We relax only if we can see that there can be a valid transition
7057 from a reloc type to another.
7058 We call elfNN_aarch64_final_link_relocate unless we're completely
7059 done, i.e., the relaxation produced the final output we want. */
7060
7061 relaxed_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type,
7062 h, r_symndx);
7063 if (relaxed_bfd_r_type != bfd_r_type)
7064 {
7065 bfd_r_type = relaxed_bfd_r_type;
7066 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
7067 BFD_ASSERT (howto != NULL);
7068 r_type = howto->type;
7069 r = elfNN_aarch64_tls_relax (globals, input_bfd, input_section,
7070 contents, rel, h, info);
7071 unresolved_reloc = 0;
7072 }
7073 else
7074 r = bfd_reloc_continue;
7075
7076 /* There may be multiple consecutive relocations for the
7077 same offset. In that case we are supposed to treat the
7078 output of each relocation as the addend for the next. */
7079 if (rel + 1 < relend
7080 && rel->r_offset == rel[1].r_offset
7081 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
7082 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
7083 save_addend = true;
7084 else
7085 save_addend = false;
7086
7087 if (r == bfd_reloc_continue)
7088 r = elfNN_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
7089 input_section, contents, rel,
7090 relocation, info, sec,
7091 h, &unresolved_reloc,
7092 save_addend, &addend, sym);
7093
7094 switch (elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type))
7095 {
7096 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7097 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7098 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7099 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7100 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7101 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7102 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7103 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7104 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
7105 {
7106 bool need_relocs = false;
7107 bfd_byte *loc;
7108 int indx;
7109 bfd_vma off;
7110
7111 off = symbol_got_offset (input_bfd, h, r_symndx);
7112 indx = h && h->dynindx != -1 ? h->dynindx : 0;
7113
7114 need_relocs =
7115 (!bfd_link_executable (info) || indx != 0) &&
7116 (h == NULL
7117 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7118 || h->root.type != bfd_link_hash_undefweak);
7119
7120 BFD_ASSERT (globals->root.srelgot != NULL);
7121
7122 if (need_relocs)
7123 {
7124 Elf_Internal_Rela rela;
7125 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPMOD));
7126 rela.r_addend = 0;
7127 rela.r_offset = globals->root.sgot->output_section->vma +
7128 globals->root.sgot->output_offset + off;
7129
7130
7131 loc = globals->root.srelgot->contents;
7132 loc += globals->root.srelgot->reloc_count++
7133 * RELOC_SIZE (htab);
7134 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7135
7136 bfd_reloc_code_real_type real_type =
7137 elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type);
7138
7139 if (real_type == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21
7140 || real_type == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21
7141 || real_type == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC)
7142 {
7143 /* For local dynamic, don't generate DTPREL in any case.
7144 Initialize the DTPREL slot into zero, so we get module
7145 base address when invoke runtime TLS resolver. */
7146 bfd_put_NN (output_bfd, 0,
7147 globals->root.sgot->contents + off
7148 + GOT_ENTRY_SIZE);
7149 }
7150 else if (indx == 0)
7151 {
7152 bfd_put_NN (output_bfd,
7153 relocation - dtpoff_base (info),
7154 globals->root.sgot->contents + off
7155 + GOT_ENTRY_SIZE);
7156 }
7157 else
7158 {
7159 /* This TLS symbol is global. We emit a
7160 relocation to fixup the tls offset at load
7161 time. */
7162 rela.r_info =
7163 ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPREL));
7164 rela.r_addend = 0;
7165 rela.r_offset =
7166 (globals->root.sgot->output_section->vma
7167 + globals->root.sgot->output_offset + off
7168 + GOT_ENTRY_SIZE);
7169
7170 loc = globals->root.srelgot->contents;
7171 loc += globals->root.srelgot->reloc_count++
7172 * RELOC_SIZE (globals);
7173 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7174 bfd_put_NN (output_bfd, (bfd_vma) 0,
7175 globals->root.sgot->contents + off
7176 + GOT_ENTRY_SIZE);
7177 }
7178 }
7179 else
7180 {
7181 bfd_put_NN (output_bfd, (bfd_vma) 1,
7182 globals->root.sgot->contents + off);
7183 bfd_put_NN (output_bfd,
7184 relocation - dtpoff_base (info),
7185 globals->root.sgot->contents + off
7186 + GOT_ENTRY_SIZE);
7187 }
7188
7189 symbol_got_offset_mark (input_bfd, h, r_symndx);
7190 }
7191 break;
7192
7193 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7194 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
7195 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7196 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7197 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7198 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
7199 {
7200 bool need_relocs = false;
7201 bfd_byte *loc;
7202 int indx;
7203 bfd_vma off;
7204
7205 off = symbol_got_offset (input_bfd, h, r_symndx);
7206
7207 indx = h && h->dynindx != -1 ? h->dynindx : 0;
7208
7209 need_relocs =
7210 (!bfd_link_executable (info) || indx != 0) &&
7211 (h == NULL
7212 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7213 || h->root.type != bfd_link_hash_undefweak);
7214
7215 BFD_ASSERT (globals->root.srelgot != NULL);
7216
7217 if (need_relocs)
7218 {
7219 Elf_Internal_Rela rela;
7220
7221 if (indx == 0)
7222 rela.r_addend = relocation - dtpoff_base (info);
7223 else
7224 rela.r_addend = 0;
7225
7226 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_TPREL));
7227 rela.r_offset = globals->root.sgot->output_section->vma +
7228 globals->root.sgot->output_offset + off;
7229
7230 loc = globals->root.srelgot->contents;
7231 loc += globals->root.srelgot->reloc_count++
7232 * RELOC_SIZE (htab);
7233
7234 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7235
7236 bfd_put_NN (output_bfd, rela.r_addend,
7237 globals->root.sgot->contents + off);
7238 }
7239 else
7240 bfd_put_NN (output_bfd, relocation - tpoff_base (info),
7241 globals->root.sgot->contents + off);
7242
7243 symbol_got_offset_mark (input_bfd, h, r_symndx);
7244 }
7245 break;
7246
7247 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
7248 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7249 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7250 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
7251 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7252 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7253 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7254 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
7255 {
7256 bool need_relocs = false;
7257 int indx = h && h->dynindx != -1 ? h->dynindx : 0;
7258 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
7259
7260 need_relocs = (h == NULL
7261 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7262 || h->root.type != bfd_link_hash_undefweak);
7263
7264 BFD_ASSERT (globals->root.srelgot != NULL);
7265 BFD_ASSERT (globals->root.sgot != NULL);
7266
7267 if (need_relocs)
7268 {
7269 bfd_byte *loc;
7270 Elf_Internal_Rela rela;
7271 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLSDESC));
7272
7273 rela.r_addend = 0;
7274 rela.r_offset = (globals->root.sgotplt->output_section->vma
7275 + globals->root.sgotplt->output_offset
7276 + off + globals->sgotplt_jump_table_size);
7277
7278 if (indx == 0)
7279 rela.r_addend = relocation - dtpoff_base (info);
7280
7281 /* Allocate the next available slot in the PLT reloc
7282 section to hold our R_AARCH64_TLSDESC, the next
7283 available slot is determined from reloc_count,
7284 which we step. But note, reloc_count was
7285 artifically moved down while allocating slots for
7286 real PLT relocs such that all of the PLT relocs
7287 will fit above the initial reloc_count and the
7288 extra stuff will fit below. */
7289 loc = globals->root.srelplt->contents;
7290 loc += globals->root.srelplt->reloc_count++
7291 * RELOC_SIZE (globals);
7292
7293 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7294
7295 bfd_put_NN (output_bfd, (bfd_vma) 0,
7296 globals->root.sgotplt->contents + off +
7297 globals->sgotplt_jump_table_size);
7298 bfd_put_NN (output_bfd, (bfd_vma) 0,
7299 globals->root.sgotplt->contents + off +
7300 globals->sgotplt_jump_table_size +
7301 GOT_ENTRY_SIZE);
7302 }
7303
7304 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
7305 }
7306 break;
7307 default:
7308 break;
7309 }
7310
7311 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
7312 because such sections are not SEC_ALLOC and thus ld.so will
7313 not process them. */
7314 if (unresolved_reloc
7315 && !((input_section->flags & SEC_DEBUGGING) != 0
7316 && h->def_dynamic)
7317 && _bfd_elf_section_offset (output_bfd, info, input_section,
7318 +rel->r_offset) != (bfd_vma) - 1)
7319 {
7320 _bfd_error_handler
7321 /* xgettext:c-format */
7322 (_("%pB(%pA+%#" PRIx64 "): "
7323 "unresolvable %s relocation against symbol `%s'"),
7324 input_bfd, input_section, (uint64_t) rel->r_offset, howto->name,
7325 h->root.root.string);
7326 return false;
7327 }
7328
7329 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
7330 {
7331 bfd_reloc_code_real_type real_r_type
7332 = elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type);
7333
7334 switch (r)
7335 {
7336 case bfd_reloc_overflow:
7337 (*info->callbacks->reloc_overflow)
7338 (info, (h ? &h->root : NULL), name, howto->name, (bfd_vma) 0,
7339 input_bfd, input_section, rel->r_offset);
7340 if (real_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
7341 || real_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
7342 {
7343 (*info->callbacks->warning)
7344 (info,
7345 _("too many GOT entries for -fpic, "
7346 "please recompile with -fPIC"),
7347 name, input_bfd, input_section, rel->r_offset);
7348 return false;
7349 }
7350 /* Overflow can occur when a variable is referenced with a type
7351 that has a larger alignment than the type with which it was
7352 declared. eg:
7353 file1.c: extern int foo; int a (void) { return foo; }
7354 file2.c: char bar, foo, baz;
7355 If the variable is placed into a data section at an offset
7356 that is incompatible with the larger alignment requirement
7357 overflow will occur. (Strictly speaking this is not overflow
7358 but rather an alignment problem, but the bfd_reloc_ error
7359 enum does not have a value to cover that situation).
7360
7361 Try to catch this situation here and provide a more helpful
7362 error message to the user. */
7363 if (addend & (((bfd_vma) 1 << howto->rightshift) - 1)
7364 /* FIXME: Are we testing all of the appropriate reloc
7365 types here ? */
7366 && (real_r_type == BFD_RELOC_AARCH64_LD_LO19_PCREL
7367 || real_r_type == BFD_RELOC_AARCH64_LDST16_LO12
7368 || real_r_type == BFD_RELOC_AARCH64_LDST32_LO12
7369 || real_r_type == BFD_RELOC_AARCH64_LDST64_LO12
7370 || real_r_type == BFD_RELOC_AARCH64_LDST128_LO12))
7371 {
7372 info->callbacks->warning
7373 (info, _("one possible cause of this error is that the \
7374 symbol is being referenced in the indicated code as if it had a larger \
7375 alignment than was declared where it was defined"),
7376 name, input_bfd, input_section, rel->r_offset);
7377 }
7378 break;
7379
7380 case bfd_reloc_undefined:
7381 (*info->callbacks->undefined_symbol)
7382 (info, name, input_bfd, input_section, rel->r_offset, true);
7383 break;
7384
7385 case bfd_reloc_outofrange:
7386 error_message = _("out of range");
7387 goto common_error;
7388
7389 case bfd_reloc_notsupported:
7390 error_message = _("unsupported relocation");
7391 goto common_error;
7392
7393 case bfd_reloc_dangerous:
7394 /* error_message should already be set. */
7395 goto common_error;
7396
7397 default:
7398 error_message = _("unknown error");
7399 /* Fall through. */
7400
7401 common_error:
7402 BFD_ASSERT (error_message != NULL);
7403 (*info->callbacks->reloc_dangerous)
7404 (info, error_message, input_bfd, input_section, rel->r_offset);
7405 break;
7406 }
7407 }
7408
7409 if (!save_addend)
7410 addend = 0;
7411 }
7412
7413 return true;
7414 }
7415
7416 /* Set the right machine number. */
7417
7418 static bool
7419 elfNN_aarch64_object_p (bfd *abfd)
7420 {
7421 #if ARCH_SIZE == 32
7422 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64_ilp32);
7423 #else
7424 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
7425 #endif
7426 return true;
7427 }
7428
7429 /* Function to keep AArch64 specific flags in the ELF header. */
7430
7431 static bool
7432 elfNN_aarch64_set_private_flags (bfd *abfd, flagword flags)
7433 {
7434 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
7435 {
7436 }
7437 else
7438 {
7439 elf_elfheader (abfd)->e_flags = flags;
7440 elf_flags_init (abfd) = true;
7441 }
7442
7443 return true;
7444 }
7445
7446 /* Merge backend specific data from an object file to the output
7447 object file when linking. */
7448
7449 static bool
7450 elfNN_aarch64_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info)
7451 {
7452 bfd *obfd = info->output_bfd;
7453 flagword out_flags;
7454 flagword in_flags;
7455 bool flags_compatible = true;
7456 asection *sec;
7457
7458 /* Check if we have the same endianess. */
7459 if (!_bfd_generic_verify_endian_match (ibfd, info))
7460 return false;
7461
7462 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
7463 return true;
7464
7465 /* The input BFD must have had its flags initialised. */
7466 /* The following seems bogus to me -- The flags are initialized in
7467 the assembler but I don't think an elf_flags_init field is
7468 written into the object. */
7469 /* BFD_ASSERT (elf_flags_init (ibfd)); */
7470
7471 in_flags = elf_elfheader (ibfd)->e_flags;
7472 out_flags = elf_elfheader (obfd)->e_flags;
7473
7474 if (!elf_flags_init (obfd))
7475 {
7476 /* If the input is the default architecture and had the default
7477 flags then do not bother setting the flags for the output
7478 architecture, instead allow future merges to do this. If no
7479 future merges ever set these flags then they will retain their
7480 uninitialised values, which surprise surprise, correspond
7481 to the default values. */
7482 if (bfd_get_arch_info (ibfd)->the_default
7483 && elf_elfheader (ibfd)->e_flags == 0)
7484 return true;
7485
7486 elf_flags_init (obfd) = true;
7487 elf_elfheader (obfd)->e_flags = in_flags;
7488
7489 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
7490 && bfd_get_arch_info (obfd)->the_default)
7491 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
7492 bfd_get_mach (ibfd));
7493
7494 return true;
7495 }
7496
7497 /* Identical flags must be compatible. */
7498 if (in_flags == out_flags)
7499 return true;
7500
7501 /* Check to see if the input BFD actually contains any sections. If
7502 not, its flags may not have been initialised either, but it
7503 cannot actually cause any incompatiblity. Do not short-circuit
7504 dynamic objects; their section list may be emptied by
7505 elf_link_add_object_symbols.
7506
7507 Also check to see if there are no code sections in the input.
7508 In this case there is no need to check for code specific flags.
7509 XXX - do we need to worry about floating-point format compatability
7510 in data sections ? */
7511 if (!(ibfd->flags & DYNAMIC))
7512 {
7513 bool null_input_bfd = true;
7514 bool only_data_sections = true;
7515
7516 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
7517 {
7518 if ((bfd_section_flags (sec)
7519 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
7520 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
7521 only_data_sections = false;
7522
7523 null_input_bfd = false;
7524 break;
7525 }
7526
7527 if (null_input_bfd || only_data_sections)
7528 return true;
7529 }
7530
7531 return flags_compatible;
7532 }
7533
7534 /* Display the flags field. */
7535
7536 static bool
7537 elfNN_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
7538 {
7539 FILE *file = (FILE *) ptr;
7540 unsigned long flags;
7541
7542 BFD_ASSERT (abfd != NULL && ptr != NULL);
7543
7544 /* Print normal ELF private data. */
7545 _bfd_elf_print_private_bfd_data (abfd, ptr);
7546
7547 flags = elf_elfheader (abfd)->e_flags;
7548 /* Ignore init flag - it may not be set, despite the flags field
7549 containing valid data. */
7550
7551 /* xgettext:c-format */
7552 fprintf (file, _("private flags = 0x%lx:"), elf_elfheader (abfd)->e_flags);
7553
7554 if (flags)
7555 fprintf (file, _(" <Unrecognised flag bits set>"));
7556
7557 fputc ('\n', file);
7558
7559 return true;
7560 }
7561
7562 /* Return true if we need copy relocation against EH. */
7563
7564 static bool
7565 need_copy_relocation_p (struct elf_aarch64_link_hash_entry *eh)
7566 {
7567 struct elf_dyn_relocs *p;
7568 asection *s;
7569
7570 for (p = eh->root.dyn_relocs; p != NULL; p = p->next)
7571 {
7572 /* If there is any pc-relative reference, we need to keep copy relocation
7573 to avoid propagating the relocation into runtime that current glibc
7574 does not support. */
7575 if (p->pc_count)
7576 return true;
7577
7578 s = p->sec->output_section;
7579 /* Need copy relocation if it's against read-only section. */
7580 if (s != NULL && (s->flags & SEC_READONLY) != 0)
7581 return true;
7582 }
7583
7584 return false;
7585 }
7586
7587 /* Adjust a symbol defined by a dynamic object and referenced by a
7588 regular object. The current definition is in some section of the
7589 dynamic object, but we're not including those sections. We have to
7590 change the definition to something the rest of the link can
7591 understand. */
7592
7593 static bool
7594 elfNN_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
7595 struct elf_link_hash_entry *h)
7596 {
7597 struct elf_aarch64_link_hash_table *htab;
7598 asection *s, *srel;
7599
7600 /* If this is a function, put it in the procedure linkage table. We
7601 will fill in the contents of the procedure linkage table later,
7602 when we know the address of the .got section. */
7603 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
7604 {
7605 if (h->plt.refcount <= 0
7606 || (h->type != STT_GNU_IFUNC
7607 && (SYMBOL_CALLS_LOCAL (info, h)
7608 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
7609 && h->root.type == bfd_link_hash_undefweak))))
7610 {
7611 /* This case can occur if we saw a CALL26 reloc in
7612 an input file, but the symbol wasn't referred to
7613 by a dynamic object or all references were
7614 garbage collected. In which case we can end up
7615 resolving. */
7616 h->plt.offset = (bfd_vma) - 1;
7617 h->needs_plt = 0;
7618 }
7619
7620 return true;
7621 }
7622 else
7623 /* Otherwise, reset to -1. */
7624 h->plt.offset = (bfd_vma) - 1;
7625
7626
7627 /* If this is a weak symbol, and there is a real definition, the
7628 processor independent code will have arranged for us to see the
7629 real definition first, and we can just use the same value. */
7630 if (h->is_weakalias)
7631 {
7632 struct elf_link_hash_entry *def = weakdef (h);
7633 BFD_ASSERT (def->root.type == bfd_link_hash_defined);
7634 h->root.u.def.section = def->root.u.def.section;
7635 h->root.u.def.value = def->root.u.def.value;
7636 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
7637 h->non_got_ref = def->non_got_ref;
7638 return true;
7639 }
7640
7641 /* If we are creating a shared library, we must presume that the
7642 only references to the symbol are via the global offset table.
7643 For such cases we need not do anything here; the relocations will
7644 be handled correctly by relocate_section. */
7645 if (bfd_link_pic (info))
7646 return true;
7647
7648 /* If there are no references to this symbol that do not use the
7649 GOT, we don't need to generate a copy reloc. */
7650 if (!h->non_got_ref)
7651 return true;
7652
7653 /* If -z nocopyreloc was given, we won't generate them either. */
7654 if (info->nocopyreloc)
7655 {
7656 h->non_got_ref = 0;
7657 return true;
7658 }
7659
7660 if (ELIMINATE_COPY_RELOCS)
7661 {
7662 struct elf_aarch64_link_hash_entry *eh;
7663 /* If we don't find any dynamic relocs in read-only sections, then
7664 we'll be keeping the dynamic relocs and avoiding the copy reloc. */
7665 eh = (struct elf_aarch64_link_hash_entry *) h;
7666 if (!need_copy_relocation_p (eh))
7667 {
7668 h->non_got_ref = 0;
7669 return true;
7670 }
7671 }
7672
7673 /* We must allocate the symbol in our .dynbss section, which will
7674 become part of the .bss section of the executable. There will be
7675 an entry for this symbol in the .dynsym section. The dynamic
7676 object will contain position independent code, so all references
7677 from the dynamic object to this symbol will go through the global
7678 offset table. The dynamic linker will use the .dynsym entry to
7679 determine the address it must put in the global offset table, so
7680 both the dynamic object and the regular object will refer to the
7681 same memory location for the variable. */
7682
7683 htab = elf_aarch64_hash_table (info);
7684
7685 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
7686 to copy the initial value out of the dynamic object and into the
7687 runtime process image. */
7688 if ((h->root.u.def.section->flags & SEC_READONLY) != 0)
7689 {
7690 s = htab->root.sdynrelro;
7691 srel = htab->root.sreldynrelro;
7692 }
7693 else
7694 {
7695 s = htab->root.sdynbss;
7696 srel = htab->root.srelbss;
7697 }
7698 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
7699 {
7700 srel->size += RELOC_SIZE (htab);
7701 h->needs_copy = 1;
7702 }
7703
7704 return _bfd_elf_adjust_dynamic_copy (info, h, s);
7705
7706 }
7707
7708 static bool
7709 elfNN_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
7710 {
7711 struct elf_aarch64_local_symbol *locals;
7712 locals = elf_aarch64_locals (abfd);
7713 if (locals == NULL)
7714 {
7715 locals = (struct elf_aarch64_local_symbol *)
7716 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
7717 if (locals == NULL)
7718 return false;
7719 elf_aarch64_locals (abfd) = locals;
7720 }
7721 return true;
7722 }
7723
7724 /* Create the .got section to hold the global offset table. */
7725
7726 static bool
7727 aarch64_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
7728 {
7729 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
7730 flagword flags;
7731 asection *s;
7732 struct elf_link_hash_entry *h;
7733 struct elf_link_hash_table *htab = elf_hash_table (info);
7734
7735 /* This function may be called more than once. */
7736 if (htab->sgot != NULL)
7737 return true;
7738
7739 flags = bed->dynamic_sec_flags;
7740
7741 s = bfd_make_section_anyway_with_flags (abfd,
7742 (bed->rela_plts_and_copies_p
7743 ? ".rela.got" : ".rel.got"),
7744 (bed->dynamic_sec_flags
7745 | SEC_READONLY));
7746 if (s == NULL
7747 || !bfd_set_section_alignment (s, bed->s->log_file_align))
7748 return false;
7749 htab->srelgot = s;
7750
7751 s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
7752 if (s == NULL
7753 || !bfd_set_section_alignment (s, bed->s->log_file_align))
7754 return false;
7755 htab->sgot = s;
7756 htab->sgot->size += GOT_ENTRY_SIZE;
7757
7758 if (bed->want_got_sym)
7759 {
7760 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
7761 (or .got.plt) section. We don't do this in the linker script
7762 because we don't want to define the symbol if we are not creating
7763 a global offset table. */
7764 h = _bfd_elf_define_linkage_sym (abfd, info, s,
7765 "_GLOBAL_OFFSET_TABLE_");
7766 elf_hash_table (info)->hgot = h;
7767 if (h == NULL)
7768 return false;
7769 }
7770
7771 if (bed->want_got_plt)
7772 {
7773 s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
7774 if (s == NULL
7775 || !bfd_set_section_alignment (s, bed->s->log_file_align))
7776 return false;
7777 htab->sgotplt = s;
7778 }
7779
7780 /* The first bit of the global offset table is the header. */
7781 s->size += bed->got_header_size;
7782
7783 return true;
7784 }
7785
7786 /* Look through the relocs for a section during the first phase. */
7787
7788 static bool
7789 elfNN_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
7790 asection *sec, const Elf_Internal_Rela *relocs)
7791 {
7792 Elf_Internal_Shdr *symtab_hdr;
7793 struct elf_link_hash_entry **sym_hashes;
7794 const Elf_Internal_Rela *rel;
7795 const Elf_Internal_Rela *rel_end;
7796 asection *sreloc;
7797
7798 struct elf_aarch64_link_hash_table *htab;
7799
7800 if (bfd_link_relocatable (info))
7801 return true;
7802
7803 BFD_ASSERT (is_aarch64_elf (abfd));
7804
7805 htab = elf_aarch64_hash_table (info);
7806 sreloc = NULL;
7807
7808 symtab_hdr = &elf_symtab_hdr (abfd);
7809 sym_hashes = elf_sym_hashes (abfd);
7810
7811 rel_end = relocs + sec->reloc_count;
7812 for (rel = relocs; rel < rel_end; rel++)
7813 {
7814 struct elf_link_hash_entry *h;
7815 unsigned int r_symndx;
7816 unsigned int r_type;
7817 bfd_reloc_code_real_type bfd_r_type;
7818 Elf_Internal_Sym *isym;
7819
7820 r_symndx = ELFNN_R_SYM (rel->r_info);
7821 r_type = ELFNN_R_TYPE (rel->r_info);
7822
7823 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
7824 {
7825 /* xgettext:c-format */
7826 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd, r_symndx);
7827 return false;
7828 }
7829
7830 if (r_symndx < symtab_hdr->sh_info)
7831 {
7832 /* A local symbol. */
7833 isym = bfd_sym_from_r_symndx (&htab->root.sym_cache,
7834 abfd, r_symndx);
7835 if (isym == NULL)
7836 return false;
7837
7838 /* Check relocation against local STT_GNU_IFUNC symbol. */
7839 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
7840 {
7841 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel,
7842 true);
7843 if (h == NULL)
7844 return false;
7845
7846 /* Fake a STT_GNU_IFUNC symbol. */
7847 h->type = STT_GNU_IFUNC;
7848 h->def_regular = 1;
7849 h->ref_regular = 1;
7850 h->forced_local = 1;
7851 h->root.type = bfd_link_hash_defined;
7852 }
7853 else
7854 h = NULL;
7855 }
7856 else
7857 {
7858 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
7859 while (h->root.type == bfd_link_hash_indirect
7860 || h->root.type == bfd_link_hash_warning)
7861 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7862 }
7863
7864 /* Could be done earlier, if h were already available. */
7865 bfd_r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
7866
7867 if (h != NULL)
7868 {
7869 /* If a relocation refers to _GLOBAL_OFFSET_TABLE_, create the .got.
7870 This shows up in particular in an R_AARCH64_PREL64 in large model
7871 when calculating the pc-relative address to .got section which is
7872 used to initialize the gp register. */
7873 if (h->root.root.string
7874 && strcmp (h->root.root.string, "_GLOBAL_OFFSET_TABLE_") == 0)
7875 {
7876 if (htab->root.dynobj == NULL)
7877 htab->root.dynobj = abfd;
7878
7879 if (! aarch64_elf_create_got_section (htab->root.dynobj, info))
7880 return false;
7881
7882 BFD_ASSERT (h == htab->root.hgot);
7883 }
7884
7885 /* Create the ifunc sections for static executables. If we
7886 never see an indirect function symbol nor we are building
7887 a static executable, those sections will be empty and
7888 won't appear in output. */
7889 switch (bfd_r_type)
7890 {
7891 default:
7892 break;
7893
7894 case BFD_RELOC_AARCH64_ADD_LO12:
7895 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7896 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7897 case BFD_RELOC_AARCH64_CALL26:
7898 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7899 case BFD_RELOC_AARCH64_JUMP26:
7900 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7901 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7902 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7903 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7904 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7905 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
7906 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7907 case BFD_RELOC_AARCH64_NN:
7908 if (htab->root.dynobj == NULL)
7909 htab->root.dynobj = abfd;
7910 if (!_bfd_elf_create_ifunc_sections (htab->root.dynobj, info))
7911 return false;
7912 break;
7913 }
7914
7915 /* It is referenced by a non-shared object. */
7916 h->ref_regular = 1;
7917 }
7918
7919 switch (bfd_r_type)
7920 {
7921 case BFD_RELOC_AARCH64_16:
7922 #if ARCH_SIZE == 64
7923 case BFD_RELOC_AARCH64_32:
7924 #endif
7925 if (bfd_link_pic (info) && (sec->flags & SEC_ALLOC) != 0)
7926 {
7927 if (h != NULL
7928 /* This is an absolute symbol. It represents a value instead
7929 of an address. */
7930 && (bfd_is_abs_symbol (&h->root)
7931 /* This is an undefined symbol. */
7932 || h->root.type == bfd_link_hash_undefined))
7933 break;
7934
7935 /* For local symbols, defined global symbols in a non-ABS section,
7936 it is assumed that the value is an address. */
7937 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
7938 _bfd_error_handler
7939 /* xgettext:c-format */
7940 (_("%pB: relocation %s against `%s' can not be used when making "
7941 "a shared object"),
7942 abfd, elfNN_aarch64_howto_table[howto_index].name,
7943 (h) ? h->root.root.string : "a local symbol");
7944 bfd_set_error (bfd_error_bad_value);
7945 return false;
7946 }
7947 else
7948 break;
7949
7950 case BFD_RELOC_AARCH64_MOVW_G0_NC:
7951 case BFD_RELOC_AARCH64_MOVW_G1_NC:
7952 case BFD_RELOC_AARCH64_MOVW_G2_NC:
7953 case BFD_RELOC_AARCH64_MOVW_G3:
7954 if (bfd_link_pic (info))
7955 {
7956 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
7957 _bfd_error_handler
7958 /* xgettext:c-format */
7959 (_("%pB: relocation %s against `%s' can not be used when making "
7960 "a shared object; recompile with -fPIC"),
7961 abfd, elfNN_aarch64_howto_table[howto_index].name,
7962 (h) ? h->root.root.string : "a local symbol");
7963 bfd_set_error (bfd_error_bad_value);
7964 return false;
7965 }
7966 /* Fall through. */
7967
7968 case BFD_RELOC_AARCH64_16_PCREL:
7969 case BFD_RELOC_AARCH64_32_PCREL:
7970 case BFD_RELOC_AARCH64_64_PCREL:
7971 case BFD_RELOC_AARCH64_ADD_LO12:
7972 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7973 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7974 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
7975 case BFD_RELOC_AARCH64_LDST128_LO12:
7976 case BFD_RELOC_AARCH64_LDST16_LO12:
7977 case BFD_RELOC_AARCH64_LDST32_LO12:
7978 case BFD_RELOC_AARCH64_LDST64_LO12:
7979 case BFD_RELOC_AARCH64_LDST8_LO12:
7980 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
7981 if (h == NULL || bfd_link_pic (info))
7982 break;
7983 /* Fall through. */
7984
7985 case BFD_RELOC_AARCH64_NN:
7986
7987 /* We don't need to handle relocs into sections not going into
7988 the "real" output. */
7989 if ((sec->flags & SEC_ALLOC) == 0)
7990 break;
7991
7992 if (h != NULL)
7993 {
7994 if (!bfd_link_pic (info))
7995 h->non_got_ref = 1;
7996
7997 h->plt.refcount += 1;
7998 h->pointer_equality_needed = 1;
7999 }
8000
8001 /* No need to do anything if we're not creating a shared
8002 object. */
8003 if (!(bfd_link_pic (info)
8004 /* If on the other hand, we are creating an executable, we
8005 may need to keep relocations for symbols satisfied by a
8006 dynamic library if we manage to avoid copy relocs for the
8007 symbol.
8008
8009 NOTE: Currently, there is no support of copy relocs
8010 elimination on pc-relative relocation types, because there is
8011 no dynamic relocation support for them in glibc. We still
8012 record the dynamic symbol reference for them. This is
8013 because one symbol may be referenced by both absolute
8014 relocation (for example, BFD_RELOC_AARCH64_NN) and
8015 pc-relative relocation. We need full symbol reference
8016 information to make correct decision later in
8017 elfNN_aarch64_adjust_dynamic_symbol. */
8018 || (ELIMINATE_COPY_RELOCS
8019 && !bfd_link_pic (info)
8020 && h != NULL
8021 && (h->root.type == bfd_link_hash_defweak
8022 || !h->def_regular))))
8023 break;
8024
8025 {
8026 struct elf_dyn_relocs *p;
8027 struct elf_dyn_relocs **head;
8028 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
8029
8030 /* We must copy these reloc types into the output file.
8031 Create a reloc section in dynobj and make room for
8032 this reloc. */
8033 if (sreloc == NULL)
8034 {
8035 if (htab->root.dynobj == NULL)
8036 htab->root.dynobj = abfd;
8037
8038 sreloc = _bfd_elf_make_dynamic_reloc_section
8039 (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ true);
8040
8041 if (sreloc == NULL)
8042 return false;
8043 }
8044
8045 /* If this is a global symbol, we count the number of
8046 relocations we need for this symbol. */
8047 if (h != NULL)
8048 {
8049 head = &h->dyn_relocs;
8050 }
8051 else
8052 {
8053 /* Track dynamic relocs needed for local syms too.
8054 We really need local syms available to do this
8055 easily. Oh well. */
8056
8057 asection *s;
8058 void **vpp;
8059
8060 isym = bfd_sym_from_r_symndx (&htab->root.sym_cache,
8061 abfd, r_symndx);
8062 if (isym == NULL)
8063 return false;
8064
8065 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
8066 if (s == NULL)
8067 s = sec;
8068
8069 /* Beware of type punned pointers vs strict aliasing
8070 rules. */
8071 vpp = &(elf_section_data (s)->local_dynrel);
8072 head = (struct elf_dyn_relocs **) vpp;
8073 }
8074
8075 p = *head;
8076 if (p == NULL || p->sec != sec)
8077 {
8078 size_t amt = sizeof *p;
8079 p = ((struct elf_dyn_relocs *)
8080 bfd_zalloc (htab->root.dynobj, amt));
8081 if (p == NULL)
8082 return false;
8083 p->next = *head;
8084 *head = p;
8085 p->sec = sec;
8086 }
8087
8088 p->count += 1;
8089
8090 if (elfNN_aarch64_howto_table[howto_index].pc_relative)
8091 p->pc_count += 1;
8092 }
8093 break;
8094
8095 /* RR: We probably want to keep a consistency check that
8096 there are no dangling GOT_PAGE relocs. */
8097 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8098 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8099 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8100 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8101 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8102 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8103 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8104 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
8105 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8106 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8107 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8108 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8109 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8110 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8111 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8112 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8113 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8114 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8115 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8116 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8117 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8118 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8119 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8120 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8121 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8122 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8123 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8124 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8125 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8126 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8127 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8128 {
8129 unsigned got_type;
8130 unsigned old_got_type;
8131
8132 got_type = aarch64_reloc_got_type (bfd_r_type);
8133
8134 if (h)
8135 {
8136 h->got.refcount += 1;
8137 old_got_type = elf_aarch64_hash_entry (h)->got_type;
8138 }
8139 else
8140 {
8141 struct elf_aarch64_local_symbol *locals;
8142
8143 if (!elfNN_aarch64_allocate_local_symbols
8144 (abfd, symtab_hdr->sh_info))
8145 return false;
8146
8147 locals = elf_aarch64_locals (abfd);
8148 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
8149 locals[r_symndx].got_refcount += 1;
8150 old_got_type = locals[r_symndx].got_type;
8151 }
8152
8153 /* If a variable is accessed with both general dynamic TLS
8154 methods, two slots may be created. */
8155 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
8156 got_type |= old_got_type;
8157
8158 /* We will already have issued an error message if there
8159 is a TLS/non-TLS mismatch, based on the symbol type.
8160 So just combine any TLS types needed. */
8161 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
8162 && got_type != GOT_NORMAL)
8163 got_type |= old_got_type;
8164
8165 /* If the symbol is accessed by both IE and GD methods, we
8166 are able to relax. Turn off the GD flag, without
8167 messing up with any other kind of TLS types that may be
8168 involved. */
8169 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
8170 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
8171
8172 if (old_got_type != got_type)
8173 {
8174 if (h != NULL)
8175 elf_aarch64_hash_entry (h)->got_type = got_type;
8176 else
8177 {
8178 struct elf_aarch64_local_symbol *locals;
8179 locals = elf_aarch64_locals (abfd);
8180 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
8181 locals[r_symndx].got_type = got_type;
8182 }
8183 }
8184
8185 if (htab->root.dynobj == NULL)
8186 htab->root.dynobj = abfd;
8187 if (! aarch64_elf_create_got_section (htab->root.dynobj, info))
8188 return false;
8189 break;
8190 }
8191
8192 case BFD_RELOC_AARCH64_CALL26:
8193 case BFD_RELOC_AARCH64_JUMP26:
8194 /* If this is a local symbol then we resolve it
8195 directly without creating a PLT entry. */
8196 if (h == NULL)
8197 continue;
8198
8199 h->needs_plt = 1;
8200 if (h->plt.refcount <= 0)
8201 h->plt.refcount = 1;
8202 else
8203 h->plt.refcount += 1;
8204 break;
8205
8206 default:
8207 break;
8208 }
8209 }
8210
8211 return true;
8212 }
8213
8214 /* Treat mapping symbols as special target symbols. */
8215
8216 static bool
8217 elfNN_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
8218 asymbol *sym)
8219 {
8220 return bfd_is_aarch64_special_symbol_name (sym->name,
8221 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
8222 }
8223
8224 /* If the ELF symbol SYM might be a function in SEC, return the
8225 function size and set *CODE_OFF to the function's entry point,
8226 otherwise return zero. */
8227
8228 static bfd_size_type
8229 elfNN_aarch64_maybe_function_sym (const asymbol *sym, asection *sec,
8230 bfd_vma *code_off)
8231 {
8232 bfd_size_type size;
8233 elf_symbol_type * elf_sym = (elf_symbol_type *) sym;
8234
8235 if ((sym->flags & (BSF_SECTION_SYM | BSF_FILE | BSF_OBJECT
8236 | BSF_THREAD_LOCAL | BSF_RELC | BSF_SRELC)) != 0
8237 || sym->section != sec)
8238 return 0;
8239
8240 size = (sym->flags & BSF_SYNTHETIC) ? 0 : elf_sym->internal_elf_sym.st_size;
8241
8242 if (!(sym->flags & BSF_SYNTHETIC))
8243 switch (ELF_ST_TYPE (elf_sym->internal_elf_sym.st_info))
8244 {
8245 case STT_NOTYPE:
8246 /* Ignore symbols created by the annobin plugin for gcc and clang.
8247 These symbols are hidden, local, notype and have a size of 0. */
8248 if (size == 0
8249 && sym->flags & BSF_LOCAL
8250 && ELF_ST_VISIBILITY (elf_sym->internal_elf_sym.st_other) == STV_HIDDEN)
8251 return 0;
8252 /* Fall through. */
8253 case STT_FUNC:
8254 /* FIXME: Allow STT_GNU_IFUNC as well ? */
8255 break;
8256 default:
8257 return 0;
8258 }
8259
8260 if ((sym->flags & BSF_LOCAL)
8261 && bfd_is_aarch64_special_symbol_name (sym->name,
8262 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY))
8263 return 0;
8264
8265 *code_off = sym->value;
8266
8267 /* Do not return 0 for the function's size. */
8268 return size ? size : 1;
8269 }
8270
8271 static bool
8272 elfNN_aarch64_find_inliner_info (bfd *abfd,
8273 const char **filename_ptr,
8274 const char **functionname_ptr,
8275 unsigned int *line_ptr)
8276 {
8277 bool found;
8278 found = _bfd_dwarf2_find_inliner_info
8279 (abfd, filename_ptr,
8280 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
8281 return found;
8282 }
8283
8284
8285 static bool
8286 elfNN_aarch64_init_file_header (bfd *abfd, struct bfd_link_info *link_info)
8287 {
8288 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
8289
8290 if (!_bfd_elf_init_file_header (abfd, link_info))
8291 return false;
8292
8293 i_ehdrp = elf_elfheader (abfd);
8294 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
8295 return true;
8296 }
8297
8298 static enum elf_reloc_type_class
8299 elfNN_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
8300 const asection *rel_sec ATTRIBUTE_UNUSED,
8301 const Elf_Internal_Rela *rela)
8302 {
8303 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
8304
8305 if (htab->root.dynsym != NULL
8306 && htab->root.dynsym->contents != NULL)
8307 {
8308 /* Check relocation against STT_GNU_IFUNC symbol if there are
8309 dynamic symbols. */
8310 bfd *abfd = info->output_bfd;
8311 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
8312 unsigned long r_symndx = ELFNN_R_SYM (rela->r_info);
8313 if (r_symndx != STN_UNDEF)
8314 {
8315 Elf_Internal_Sym sym;
8316 if (!bed->s->swap_symbol_in (abfd,
8317 (htab->root.dynsym->contents
8318 + r_symndx * bed->s->sizeof_sym),
8319 0, &sym))
8320 {
8321 /* xgettext:c-format */
8322 _bfd_error_handler (_("%pB symbol number %lu references"
8323 " nonexistent SHT_SYMTAB_SHNDX section"),
8324 abfd, r_symndx);
8325 /* Ideally an error class should be returned here. */
8326 }
8327 else if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
8328 return reloc_class_ifunc;
8329 }
8330 }
8331
8332 switch ((int) ELFNN_R_TYPE (rela->r_info))
8333 {
8334 case AARCH64_R (IRELATIVE):
8335 return reloc_class_ifunc;
8336 case AARCH64_R (RELATIVE):
8337 return reloc_class_relative;
8338 case AARCH64_R (JUMP_SLOT):
8339 return reloc_class_plt;
8340 case AARCH64_R (COPY):
8341 return reloc_class_copy;
8342 default:
8343 return reloc_class_normal;
8344 }
8345 }
8346
8347 /* Handle an AArch64 specific section when reading an object file. This is
8348 called when bfd_section_from_shdr finds a section with an unknown
8349 type. */
8350
8351 static bool
8352 elfNN_aarch64_section_from_shdr (bfd *abfd,
8353 Elf_Internal_Shdr *hdr,
8354 const char *name, int shindex)
8355 {
8356 /* There ought to be a place to keep ELF backend specific flags, but
8357 at the moment there isn't one. We just keep track of the
8358 sections by their name, instead. Fortunately, the ABI gives
8359 names for all the AArch64 specific sections, so we will probably get
8360 away with this. */
8361 switch (hdr->sh_type)
8362 {
8363 case SHT_AARCH64_ATTRIBUTES:
8364 break;
8365
8366 default:
8367 return false;
8368 }
8369
8370 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
8371 return false;
8372
8373 return true;
8374 }
8375
8376 /* Process any AArch64-specific program segment types. */
8377
8378 static bool
8379 elfNN_aarch64_section_from_phdr (bfd *abfd ATTRIBUTE_UNUSED,
8380 Elf_Internal_Phdr *hdr,
8381 int hdr_index ATTRIBUTE_UNUSED,
8382 const char *name ATTRIBUTE_UNUSED)
8383 {
8384 /* Right now we only handle the PT_AARCH64_MEMTAG_MTE segment type. */
8385 if (hdr == NULL || hdr->p_type != PT_AARCH64_MEMTAG_MTE)
8386 return false;
8387
8388 if (hdr->p_filesz > 0)
8389 {
8390 /* Sections created from memory tag p_type's are always named
8391 "memtag". This makes it easier for tools (for example, GDB)
8392 to find them. */
8393 asection *newsect = bfd_make_section_anyway (abfd, "memtag");
8394
8395 if (newsect == NULL)
8396 return false;
8397
8398 unsigned int opb = bfd_octets_per_byte (abfd, NULL);
8399
8400 /* p_vaddr holds the original start address of the tagged memory
8401 range. */
8402 newsect->vma = hdr->p_vaddr / opb;
8403
8404 /* p_filesz holds the storage size of the packed tags. */
8405 newsect->size = hdr->p_filesz;
8406 newsect->filepos = hdr->p_offset;
8407
8408 /* p_memsz holds the size of the memory range that contains tags. The
8409 section's rawsize field is reused for this purpose. */
8410 newsect->rawsize = hdr->p_memsz;
8411
8412 /* Make sure the section's flags has SEC_HAS_CONTENTS set, otherwise
8413 BFD will return all zeroes when attempting to get contents from this
8414 section. */
8415 newsect->flags |= SEC_HAS_CONTENTS;
8416 }
8417
8418 return true;
8419 }
8420
8421 /* Implements the bfd_elf_modify_headers hook for aarch64. */
8422
8423 static bool
8424 elfNN_aarch64_modify_headers (bfd *abfd,
8425 struct bfd_link_info *info)
8426 {
8427 struct elf_segment_map *m;
8428 unsigned int segment_count = 0;
8429 Elf_Internal_Phdr *p;
8430
8431 for (m = elf_seg_map (abfd); m != NULL; m = m->next, segment_count++)
8432 {
8433 /* We are only interested in the memory tag segment that will be dumped
8434 to a core file. If we have no memory tags or this isn't a core file we
8435 are dealing with, just skip this segment. */
8436 if (m->p_type != PT_AARCH64_MEMTAG_MTE
8437 || bfd_get_format (abfd) != bfd_core)
8438 continue;
8439
8440 /* For memory tag segments in core files, the size of the file contents
8441 is smaller than the size of the memory range. Adjust the memory size
8442 accordingly. The real memory size is held in the section's rawsize
8443 field. */
8444 if (m->count > 0)
8445 {
8446 p = elf_tdata (abfd)->phdr;
8447 p += m->idx;
8448 p->p_memsz = m->sections[0]->rawsize;
8449 p->p_flags = 0;
8450 p->p_paddr = 0;
8451 p->p_align = 0;
8452 }
8453 }
8454
8455 /* Give the generic code a chance to handle the headers. */
8456 return _bfd_elf_modify_headers (abfd, info);
8457 }
8458
8459 /* A structure used to record a list of sections, independently
8460 of the next and prev fields in the asection structure. */
8461 typedef struct section_list
8462 {
8463 asection *sec;
8464 struct section_list *next;
8465 struct section_list *prev;
8466 }
8467 section_list;
8468
8469 /* Unfortunately we need to keep a list of sections for which
8470 an _aarch64_elf_section_data structure has been allocated. This
8471 is because it is possible for functions like elfNN_aarch64_write_section
8472 to be called on a section which has had an elf_data_structure
8473 allocated for it (and so the used_by_bfd field is valid) but
8474 for which the AArch64 extended version of this structure - the
8475 _aarch64_elf_section_data structure - has not been allocated. */
8476 static section_list *sections_with_aarch64_elf_section_data = NULL;
8477
8478 static void
8479 record_section_with_aarch64_elf_section_data (asection *sec)
8480 {
8481 struct section_list *entry;
8482
8483 entry = bfd_malloc (sizeof (*entry));
8484 if (entry == NULL)
8485 return;
8486 entry->sec = sec;
8487 entry->next = sections_with_aarch64_elf_section_data;
8488 entry->prev = NULL;
8489 if (entry->next != NULL)
8490 entry->next->prev = entry;
8491 sections_with_aarch64_elf_section_data = entry;
8492 }
8493
8494 static struct section_list *
8495 find_aarch64_elf_section_entry (asection *sec)
8496 {
8497 struct section_list *entry;
8498 static struct section_list *last_entry = NULL;
8499
8500 /* This is a short cut for the typical case where the sections are added
8501 to the sections_with_aarch64_elf_section_data list in forward order and
8502 then looked up here in backwards order. This makes a real difference
8503 to the ld-srec/sec64k.exp linker test. */
8504 entry = sections_with_aarch64_elf_section_data;
8505 if (last_entry != NULL)
8506 {
8507 if (last_entry->sec == sec)
8508 entry = last_entry;
8509 else if (last_entry->next != NULL && last_entry->next->sec == sec)
8510 entry = last_entry->next;
8511 }
8512
8513 for (; entry; entry = entry->next)
8514 if (entry->sec == sec)
8515 break;
8516
8517 if (entry)
8518 /* Record the entry prior to this one - it is the entry we are
8519 most likely to want to locate next time. Also this way if we
8520 have been called from
8521 unrecord_section_with_aarch64_elf_section_data () we will not
8522 be caching a pointer that is about to be freed. */
8523 last_entry = entry->prev;
8524
8525 return entry;
8526 }
8527
8528 static void
8529 unrecord_section_with_aarch64_elf_section_data (asection *sec)
8530 {
8531 struct section_list *entry;
8532
8533 entry = find_aarch64_elf_section_entry (sec);
8534
8535 if (entry)
8536 {
8537 if (entry->prev != NULL)
8538 entry->prev->next = entry->next;
8539 if (entry->next != NULL)
8540 entry->next->prev = entry->prev;
8541 if (entry == sections_with_aarch64_elf_section_data)
8542 sections_with_aarch64_elf_section_data = entry->next;
8543 free (entry);
8544 }
8545 }
8546
8547
8548 typedef struct
8549 {
8550 void *finfo;
8551 struct bfd_link_info *info;
8552 asection *sec;
8553 int sec_shndx;
8554 int (*func) (void *, const char *, Elf_Internal_Sym *,
8555 asection *, struct elf_link_hash_entry *);
8556 } output_arch_syminfo;
8557
8558 enum map_symbol_type
8559 {
8560 AARCH64_MAP_INSN,
8561 AARCH64_MAP_DATA
8562 };
8563
8564
8565 /* Output a single mapping symbol. */
8566
8567 static bool
8568 elfNN_aarch64_output_map_sym (output_arch_syminfo *osi,
8569 enum map_symbol_type type, bfd_vma offset)
8570 {
8571 static const char *names[2] = { "$x", "$d" };
8572 Elf_Internal_Sym sym;
8573
8574 sym.st_value = (osi->sec->output_section->vma
8575 + osi->sec->output_offset + offset);
8576 sym.st_size = 0;
8577 sym.st_other = 0;
8578 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
8579 sym.st_shndx = osi->sec_shndx;
8580 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
8581 }
8582
8583 /* Output a single local symbol for a generated stub. */
8584
8585 static bool
8586 elfNN_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
8587 bfd_vma offset, bfd_vma size)
8588 {
8589 Elf_Internal_Sym sym;
8590
8591 sym.st_value = (osi->sec->output_section->vma
8592 + osi->sec->output_offset + offset);
8593 sym.st_size = size;
8594 sym.st_other = 0;
8595 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
8596 sym.st_shndx = osi->sec_shndx;
8597 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
8598 }
8599
8600 static bool
8601 aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
8602 {
8603 struct elf_aarch64_stub_hash_entry *stub_entry;
8604 asection *stub_sec;
8605 bfd_vma addr;
8606 char *stub_name;
8607 output_arch_syminfo *osi;
8608
8609 /* Massage our args to the form they really have. */
8610 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
8611 osi = (output_arch_syminfo *) in_arg;
8612
8613 stub_sec = stub_entry->stub_sec;
8614
8615 /* Ensure this stub is attached to the current section being
8616 processed. */
8617 if (stub_sec != osi->sec)
8618 return true;
8619
8620 addr = (bfd_vma) stub_entry->stub_offset;
8621
8622 stub_name = stub_entry->output_name;
8623
8624 switch (stub_entry->stub_type)
8625 {
8626 case aarch64_stub_adrp_branch:
8627 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
8628 sizeof (aarch64_adrp_branch_stub)))
8629 return false;
8630 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
8631 return false;
8632 break;
8633 case aarch64_stub_long_branch:
8634 if (!elfNN_aarch64_output_stub_sym
8635 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
8636 return false;
8637 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
8638 return false;
8639 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
8640 return false;
8641 break;
8642 case aarch64_stub_bti_direct_branch:
8643 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
8644 sizeof (aarch64_bti_direct_branch_stub)))
8645 return false;
8646 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
8647 return false;
8648 break;
8649 case aarch64_stub_erratum_835769_veneer:
8650 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
8651 sizeof (aarch64_erratum_835769_stub)))
8652 return false;
8653 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
8654 return false;
8655 break;
8656 case aarch64_stub_erratum_843419_veneer:
8657 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
8658 sizeof (aarch64_erratum_843419_stub)))
8659 return false;
8660 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
8661 return false;
8662 break;
8663 case aarch64_stub_none:
8664 break;
8665
8666 default:
8667 abort ();
8668 }
8669
8670 return true;
8671 }
8672
8673 /* Output mapping symbols for linker generated sections. */
8674
8675 static bool
8676 elfNN_aarch64_output_arch_local_syms (bfd *output_bfd,
8677 struct bfd_link_info *info,
8678 void *finfo,
8679 int (*func) (void *, const char *,
8680 Elf_Internal_Sym *,
8681 asection *,
8682 struct elf_link_hash_entry
8683 *))
8684 {
8685 output_arch_syminfo osi;
8686 struct elf_aarch64_link_hash_table *htab;
8687
8688 if (info->strip == strip_all
8689 && !info->emitrelocations
8690 && !bfd_link_relocatable (info))
8691 return true;
8692
8693 htab = elf_aarch64_hash_table (info);
8694
8695 osi.finfo = finfo;
8696 osi.info = info;
8697 osi.func = func;
8698
8699 /* Long calls stubs. */
8700 if (htab->stub_bfd && htab->stub_bfd->sections)
8701 {
8702 asection *stub_sec;
8703
8704 for (stub_sec = htab->stub_bfd->sections;
8705 stub_sec != NULL; stub_sec = stub_sec->next)
8706 {
8707 /* Ignore non-stub sections. */
8708 if (!strstr (stub_sec->name, STUB_SUFFIX))
8709 continue;
8710
8711 osi.sec = stub_sec;
8712
8713 osi.sec_shndx = _bfd_elf_section_from_bfd_section
8714 (output_bfd, osi.sec->output_section);
8715
8716 /* The first instruction in a stub is always a branch. */
8717 if (!elfNN_aarch64_output_map_sym (&osi, AARCH64_MAP_INSN, 0))
8718 return false;
8719
8720 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
8721 &osi);
8722 }
8723 }
8724
8725 /* Finally, output mapping symbols for the PLT. */
8726 if (!htab->root.splt || htab->root.splt->size == 0)
8727 return true;
8728
8729 osi.sec_shndx = _bfd_elf_section_from_bfd_section
8730 (output_bfd, htab->root.splt->output_section);
8731 osi.sec = htab->root.splt;
8732
8733 elfNN_aarch64_output_map_sym (&osi, AARCH64_MAP_INSN, 0);
8734
8735 return true;
8736
8737 }
8738
8739 /* Allocate target specific section data. */
8740
8741 static bool
8742 elfNN_aarch64_new_section_hook (bfd *abfd, asection *sec)
8743 {
8744 if (!sec->used_by_bfd)
8745 {
8746 _aarch64_elf_section_data *sdata;
8747 size_t amt = sizeof (*sdata);
8748
8749 sdata = bfd_zalloc (abfd, amt);
8750 if (sdata == NULL)
8751 return false;
8752 sec->used_by_bfd = sdata;
8753 }
8754
8755 record_section_with_aarch64_elf_section_data (sec);
8756
8757 return _bfd_elf_new_section_hook (abfd, sec);
8758 }
8759
8760
8761 static void
8762 unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
8763 asection *sec,
8764 void *ignore ATTRIBUTE_UNUSED)
8765 {
8766 unrecord_section_with_aarch64_elf_section_data (sec);
8767 }
8768
8769 static bool
8770 elfNN_aarch64_bfd_free_cached_info (bfd *abfd)
8771 {
8772 if (abfd->sections)
8773 bfd_map_over_sections (abfd,
8774 unrecord_section_via_map_over_sections, NULL);
8775
8776 return _bfd_elf_free_cached_info (abfd);
8777 }
8778
8779 /* Create dynamic sections. This is different from the ARM backend in that
8780 the got, plt, gotplt and their relocation sections are all created in the
8781 standard part of the bfd elf backend. */
8782
8783 static bool
8784 elfNN_aarch64_create_dynamic_sections (bfd *dynobj,
8785 struct bfd_link_info *info)
8786 {
8787 /* We need to create .got section. */
8788 if (!aarch64_elf_create_got_section (dynobj, info))
8789 return false;
8790
8791 return _bfd_elf_create_dynamic_sections (dynobj, info);
8792 }
8793
8794
8795 /* Allocate space in .plt, .got and associated reloc sections for
8796 dynamic relocs. */
8797
8798 static bool
8799 elfNN_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
8800 {
8801 struct bfd_link_info *info;
8802 struct elf_aarch64_link_hash_table *htab;
8803 struct elf_aarch64_link_hash_entry *eh;
8804 struct elf_dyn_relocs *p;
8805
8806 /* An example of a bfd_link_hash_indirect symbol is versioned
8807 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
8808 -> __gxx_personality_v0(bfd_link_hash_defined)
8809
8810 There is no need to process bfd_link_hash_indirect symbols here
8811 because we will also be presented with the concrete instance of
8812 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
8813 called to copy all relevant data from the generic to the concrete
8814 symbol instance. */
8815 if (h->root.type == bfd_link_hash_indirect)
8816 return true;
8817
8818 if (h->root.type == bfd_link_hash_warning)
8819 h = (struct elf_link_hash_entry *) h->root.u.i.link;
8820
8821 info = (struct bfd_link_info *) inf;
8822 htab = elf_aarch64_hash_table (info);
8823
8824 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
8825 here if it is defined and referenced in a non-shared object. */
8826 if (h->type == STT_GNU_IFUNC
8827 && h->def_regular)
8828 return true;
8829 else if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
8830 {
8831 /* Make sure this symbol is output as a dynamic symbol.
8832 Undefined weak syms won't yet be marked as dynamic. */
8833 if (h->dynindx == -1 && !h->forced_local
8834 && h->root.type == bfd_link_hash_undefweak)
8835 {
8836 if (!bfd_elf_link_record_dynamic_symbol (info, h))
8837 return false;
8838 }
8839
8840 if (bfd_link_pic (info) || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
8841 {
8842 asection *s = htab->root.splt;
8843
8844 /* If this is the first .plt entry, make room for the special
8845 first entry. */
8846 if (s->size == 0)
8847 s->size += htab->plt_header_size;
8848
8849 h->plt.offset = s->size;
8850
8851 /* If this symbol is not defined in a regular file, and we are
8852 not generating a shared library, then set the symbol to this
8853 location in the .plt. This is required to make function
8854 pointers compare as equal between the normal executable and
8855 the shared library. */
8856 if (!bfd_link_pic (info) && !h->def_regular)
8857 {
8858 h->root.u.def.section = s;
8859 h->root.u.def.value = h->plt.offset;
8860 }
8861
8862 /* Make room for this entry. For now we only create the
8863 small model PLT entries. We later need to find a way
8864 of relaxing into these from the large model PLT entries. */
8865 s->size += htab->plt_entry_size;
8866
8867 /* We also need to make an entry in the .got.plt section, which
8868 will be placed in the .got section by the linker script. */
8869 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
8870
8871 /* We also need to make an entry in the .rela.plt section. */
8872 htab->root.srelplt->size += RELOC_SIZE (htab);
8873
8874 /* We need to ensure that all GOT entries that serve the PLT
8875 are consecutive with the special GOT slots [0] [1] and
8876 [2]. Any addtional relocations, such as
8877 R_AARCH64_TLSDESC, must be placed after the PLT related
8878 entries. We abuse the reloc_count such that during
8879 sizing we adjust reloc_count to indicate the number of
8880 PLT related reserved entries. In subsequent phases when
8881 filling in the contents of the reloc entries, PLT related
8882 entries are placed by computing their PLT index (0
8883 .. reloc_count). While other none PLT relocs are placed
8884 at the slot indicated by reloc_count and reloc_count is
8885 updated. */
8886
8887 htab->root.srelplt->reloc_count++;
8888
8889 /* Mark the DSO in case R_<CLS>_JUMP_SLOT relocs against
8890 variant PCS symbols are present. */
8891 if (h->other & STO_AARCH64_VARIANT_PCS)
8892 htab->variant_pcs = 1;
8893
8894 }
8895 else
8896 {
8897 h->plt.offset = (bfd_vma) - 1;
8898 h->needs_plt = 0;
8899 }
8900 }
8901 else
8902 {
8903 h->plt.offset = (bfd_vma) - 1;
8904 h->needs_plt = 0;
8905 }
8906
8907 eh = (struct elf_aarch64_link_hash_entry *) h;
8908 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
8909
8910 if (h->got.refcount > 0)
8911 {
8912 bool dyn;
8913 unsigned got_type = elf_aarch64_hash_entry (h)->got_type;
8914
8915 h->got.offset = (bfd_vma) - 1;
8916
8917 dyn = htab->root.dynamic_sections_created;
8918
8919 /* Make sure this symbol is output as a dynamic symbol.
8920 Undefined weak syms won't yet be marked as dynamic. */
8921 if (dyn && h->dynindx == -1 && !h->forced_local
8922 && h->root.type == bfd_link_hash_undefweak)
8923 {
8924 if (!bfd_elf_link_record_dynamic_symbol (info, h))
8925 return false;
8926 }
8927
8928 if (got_type == GOT_UNKNOWN)
8929 {
8930 }
8931 else if (got_type == GOT_NORMAL)
8932 {
8933 h->got.offset = htab->root.sgot->size;
8934 htab->root.sgot->size += GOT_ENTRY_SIZE;
8935 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
8936 || h->root.type != bfd_link_hash_undefweak)
8937 && (bfd_link_pic (info)
8938 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h))
8939 /* Undefined weak symbol in static PIE resolves to 0 without
8940 any dynamic relocations. */
8941 && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
8942 {
8943 htab->root.srelgot->size += RELOC_SIZE (htab);
8944 }
8945 }
8946 else
8947 {
8948 int indx;
8949 if (got_type & GOT_TLSDESC_GD)
8950 {
8951 eh->tlsdesc_got_jump_table_offset =
8952 (htab->root.sgotplt->size
8953 - aarch64_compute_jump_table_size (htab));
8954 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
8955 h->got.offset = (bfd_vma) - 2;
8956 }
8957
8958 if (got_type & GOT_TLS_GD)
8959 {
8960 h->got.offset = htab->root.sgot->size;
8961 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
8962 }
8963
8964 if (got_type & GOT_TLS_IE)
8965 {
8966 h->got.offset = htab->root.sgot->size;
8967 htab->root.sgot->size += GOT_ENTRY_SIZE;
8968 }
8969
8970 indx = h && h->dynindx != -1 ? h->dynindx : 0;
8971 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
8972 || h->root.type != bfd_link_hash_undefweak)
8973 && (!bfd_link_executable (info)
8974 || indx != 0
8975 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
8976 {
8977 if (got_type & GOT_TLSDESC_GD)
8978 {
8979 htab->root.srelplt->size += RELOC_SIZE (htab);
8980 /* Note reloc_count not incremented here! We have
8981 already adjusted reloc_count for this relocation
8982 type. */
8983
8984 /* TLSDESC PLT is now needed, but not yet determined. */
8985 htab->root.tlsdesc_plt = (bfd_vma) - 1;
8986 }
8987
8988 if (got_type & GOT_TLS_GD)
8989 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
8990
8991 if (got_type & GOT_TLS_IE)
8992 htab->root.srelgot->size += RELOC_SIZE (htab);
8993 }
8994 }
8995 }
8996 else
8997 {
8998 h->got.offset = (bfd_vma) - 1;
8999 }
9000
9001 if (h->dyn_relocs == NULL)
9002 return true;
9003
9004 for (p = h->dyn_relocs; p != NULL; p = p->next)
9005 if (eh->def_protected)
9006 {
9007 /* Disallow copy relocations against protected symbol. */
9008 asection *s = p->sec->output_section;
9009 if (s != NULL && (s->flags & SEC_READONLY) != 0)
9010 {
9011 info->callbacks->einfo
9012 /* xgettext:c-format */
9013 (_ ("%F%P: %pB: copy relocation against non-copyable "
9014 "protected symbol `%s'\n"),
9015 p->sec->owner, h->root.root.string);
9016 return false;
9017 }
9018 }
9019
9020 /* In the shared -Bsymbolic case, discard space allocated for
9021 dynamic pc-relative relocs against symbols which turn out to be
9022 defined in regular objects. For the normal shared case, discard
9023 space for pc-relative relocs that have become local due to symbol
9024 visibility changes. */
9025
9026 if (bfd_link_pic (info))
9027 {
9028 /* Relocs that use pc_count are those that appear on a call
9029 insn, or certain REL relocs that can generated via assembly.
9030 We want calls to protected symbols to resolve directly to the
9031 function rather than going via the plt. If people want
9032 function pointer comparisons to work as expected then they
9033 should avoid writing weird assembly. */
9034 if (SYMBOL_CALLS_LOCAL (info, h))
9035 {
9036 struct elf_dyn_relocs **pp;
9037
9038 for (pp = &h->dyn_relocs; (p = *pp) != NULL;)
9039 {
9040 p->count -= p->pc_count;
9041 p->pc_count = 0;
9042 if (p->count == 0)
9043 *pp = p->next;
9044 else
9045 pp = &p->next;
9046 }
9047 }
9048
9049 /* Also discard relocs on undefined weak syms with non-default
9050 visibility. */
9051 if (h->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
9052 {
9053 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
9054 || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
9055 h->dyn_relocs = NULL;
9056
9057 /* Make sure undefined weak symbols are output as a dynamic
9058 symbol in PIEs. */
9059 else if (h->dynindx == -1
9060 && !h->forced_local
9061 && h->root.type == bfd_link_hash_undefweak
9062 && !bfd_elf_link_record_dynamic_symbol (info, h))
9063 return false;
9064 }
9065
9066 }
9067 else if (ELIMINATE_COPY_RELOCS)
9068 {
9069 /* For the non-shared case, discard space for relocs against
9070 symbols which turn out to need copy relocs or are not
9071 dynamic. */
9072
9073 if (!h->non_got_ref
9074 && ((h->def_dynamic
9075 && !h->def_regular)
9076 || (htab->root.dynamic_sections_created
9077 && (h->root.type == bfd_link_hash_undefweak
9078 || h->root.type == bfd_link_hash_undefined))))
9079 {
9080 /* Make sure this symbol is output as a dynamic symbol.
9081 Undefined weak syms won't yet be marked as dynamic. */
9082 if (h->dynindx == -1
9083 && !h->forced_local
9084 && h->root.type == bfd_link_hash_undefweak
9085 && !bfd_elf_link_record_dynamic_symbol (info, h))
9086 return false;
9087
9088 /* If that succeeded, we know we'll be keeping all the
9089 relocs. */
9090 if (h->dynindx != -1)
9091 goto keep;
9092 }
9093
9094 h->dyn_relocs = NULL;
9095
9096 keep:;
9097 }
9098
9099 /* Finally, allocate space. */
9100 for (p = h->dyn_relocs; p != NULL; p = p->next)
9101 {
9102 asection *sreloc;
9103
9104 sreloc = elf_section_data (p->sec)->sreloc;
9105
9106 BFD_ASSERT (sreloc != NULL);
9107
9108 sreloc->size += p->count * RELOC_SIZE (htab);
9109 }
9110
9111 return true;
9112 }
9113
9114 /* Allocate space in .plt, .got and associated reloc sections for
9115 ifunc dynamic relocs. */
9116
9117 static bool
9118 elfNN_aarch64_allocate_ifunc_dynrelocs (struct elf_link_hash_entry *h,
9119 void *inf)
9120 {
9121 struct bfd_link_info *info;
9122 struct elf_aarch64_link_hash_table *htab;
9123
9124 /* An example of a bfd_link_hash_indirect symbol is versioned
9125 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
9126 -> __gxx_personality_v0(bfd_link_hash_defined)
9127
9128 There is no need to process bfd_link_hash_indirect symbols here
9129 because we will also be presented with the concrete instance of
9130 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
9131 called to copy all relevant data from the generic to the concrete
9132 symbol instance. */
9133 if (h->root.type == bfd_link_hash_indirect)
9134 return true;
9135
9136 if (h->root.type == bfd_link_hash_warning)
9137 h = (struct elf_link_hash_entry *) h->root.u.i.link;
9138
9139 info = (struct bfd_link_info *) inf;
9140 htab = elf_aarch64_hash_table (info);
9141
9142 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
9143 here if it is defined and referenced in a non-shared object. */
9144 if (h->type == STT_GNU_IFUNC
9145 && h->def_regular)
9146 return _bfd_elf_allocate_ifunc_dyn_relocs (info, h,
9147 &h->dyn_relocs,
9148 htab->plt_entry_size,
9149 htab->plt_header_size,
9150 GOT_ENTRY_SIZE,
9151 false);
9152 return true;
9153 }
9154
9155 /* Allocate space in .plt, .got and associated reloc sections for
9156 local ifunc dynamic relocs. */
9157
9158 static int
9159 elfNN_aarch64_allocate_local_ifunc_dynrelocs (void **slot, void *inf)
9160 {
9161 struct elf_link_hash_entry *h
9162 = (struct elf_link_hash_entry *) *slot;
9163
9164 if (h->type != STT_GNU_IFUNC
9165 || !h->def_regular
9166 || !h->ref_regular
9167 || !h->forced_local
9168 || h->root.type != bfd_link_hash_defined)
9169 abort ();
9170
9171 return elfNN_aarch64_allocate_ifunc_dynrelocs (h, inf);
9172 }
9173
9174 /* This is the most important function of all . Innocuosly named
9175 though ! */
9176
9177 static bool
9178 elfNN_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
9179 struct bfd_link_info *info)
9180 {
9181 struct elf_aarch64_link_hash_table *htab;
9182 bfd *dynobj;
9183 asection *s;
9184 bool relocs;
9185 bfd *ibfd;
9186
9187 htab = elf_aarch64_hash_table ((info));
9188 dynobj = htab->root.dynobj;
9189
9190 BFD_ASSERT (dynobj != NULL);
9191
9192 if (htab->root.dynamic_sections_created)
9193 {
9194 if (bfd_link_executable (info) && !info->nointerp)
9195 {
9196 s = bfd_get_linker_section (dynobj, ".interp");
9197 if (s == NULL)
9198 abort ();
9199 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
9200 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
9201 }
9202 }
9203
9204 /* Set up .got offsets for local syms, and space for local dynamic
9205 relocs. */
9206 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
9207 {
9208 struct elf_aarch64_local_symbol *locals = NULL;
9209 Elf_Internal_Shdr *symtab_hdr;
9210 asection *srel;
9211 unsigned int i;
9212
9213 if (!is_aarch64_elf (ibfd))
9214 continue;
9215
9216 for (s = ibfd->sections; s != NULL; s = s->next)
9217 {
9218 struct elf_dyn_relocs *p;
9219
9220 for (p = (struct elf_dyn_relocs *)
9221 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
9222 {
9223 if (!bfd_is_abs_section (p->sec)
9224 && bfd_is_abs_section (p->sec->output_section))
9225 {
9226 /* Input section has been discarded, either because
9227 it is a copy of a linkonce section or due to
9228 linker script /DISCARD/, so we'll be discarding
9229 the relocs too. */
9230 }
9231 else if (p->count != 0)
9232 {
9233 srel = elf_section_data (p->sec)->sreloc;
9234 srel->size += p->count * RELOC_SIZE (htab);
9235 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
9236 info->flags |= DF_TEXTREL;
9237 }
9238 }
9239 }
9240
9241 locals = elf_aarch64_locals (ibfd);
9242 if (!locals)
9243 continue;
9244
9245 symtab_hdr = &elf_symtab_hdr (ibfd);
9246 srel = htab->root.srelgot;
9247 for (i = 0; i < symtab_hdr->sh_info; i++)
9248 {
9249 locals[i].got_offset = (bfd_vma) - 1;
9250 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
9251 if (locals[i].got_refcount > 0)
9252 {
9253 unsigned got_type = locals[i].got_type;
9254 if (got_type & GOT_TLSDESC_GD)
9255 {
9256 locals[i].tlsdesc_got_jump_table_offset =
9257 (htab->root.sgotplt->size
9258 - aarch64_compute_jump_table_size (htab));
9259 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
9260 locals[i].got_offset = (bfd_vma) - 2;
9261 }
9262
9263 if (got_type & GOT_TLS_GD)
9264 {
9265 locals[i].got_offset = htab->root.sgot->size;
9266 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
9267 }
9268
9269 if (got_type & GOT_TLS_IE
9270 || got_type & GOT_NORMAL)
9271 {
9272 locals[i].got_offset = htab->root.sgot->size;
9273 htab->root.sgot->size += GOT_ENTRY_SIZE;
9274 }
9275
9276 if (got_type == GOT_UNKNOWN)
9277 {
9278 }
9279
9280 if (bfd_link_pic (info))
9281 {
9282 if (got_type & GOT_TLSDESC_GD)
9283 {
9284 htab->root.srelplt->size += RELOC_SIZE (htab);
9285 /* Note RELOC_COUNT not incremented here! */
9286 htab->root.tlsdesc_plt = (bfd_vma) - 1;
9287 }
9288
9289 if (got_type & GOT_TLS_GD)
9290 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
9291
9292 if (got_type & GOT_TLS_IE
9293 || got_type & GOT_NORMAL)
9294 htab->root.srelgot->size += RELOC_SIZE (htab);
9295 }
9296 }
9297 else
9298 {
9299 locals[i].got_refcount = (bfd_vma) - 1;
9300 }
9301 }
9302 }
9303
9304
9305 /* Allocate global sym .plt and .got entries, and space for global
9306 sym dynamic relocs. */
9307 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_dynrelocs,
9308 info);
9309
9310 /* Allocate global ifunc sym .plt and .got entries, and space for global
9311 ifunc sym dynamic relocs. */
9312 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_ifunc_dynrelocs,
9313 info);
9314
9315 /* Allocate .plt and .got entries, and space for local ifunc symbols. */
9316 htab_traverse (htab->loc_hash_table,
9317 elfNN_aarch64_allocate_local_ifunc_dynrelocs,
9318 info);
9319
9320 /* For every jump slot reserved in the sgotplt, reloc_count is
9321 incremented. However, when we reserve space for TLS descriptors,
9322 it's not incremented, so in order to compute the space reserved
9323 for them, it suffices to multiply the reloc count by the jump
9324 slot size. */
9325
9326 if (htab->root.srelplt)
9327 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
9328
9329 if (htab->root.tlsdesc_plt)
9330 {
9331 if (htab->root.splt->size == 0)
9332 htab->root.splt->size += htab->plt_header_size;
9333
9334 /* If we're not using lazy TLS relocations, don't generate the
9335 GOT and PLT entry required. */
9336 if ((info->flags & DF_BIND_NOW))
9337 htab->root.tlsdesc_plt = 0;
9338 else
9339 {
9340 htab->root.tlsdesc_plt = htab->root.splt->size;
9341 htab->root.splt->size += htab->tlsdesc_plt_entry_size;
9342
9343 htab->root.tlsdesc_got = htab->root.sgot->size;
9344 htab->root.sgot->size += GOT_ENTRY_SIZE;
9345 }
9346 }
9347
9348 /* Init mapping symbols information to use later to distingush between
9349 code and data while scanning for errata. */
9350 if (htab->fix_erratum_835769 || htab->fix_erratum_843419)
9351 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
9352 {
9353 if (!is_aarch64_elf (ibfd))
9354 continue;
9355 bfd_elfNN_aarch64_init_maps (ibfd);
9356 }
9357
9358 /* We now have determined the sizes of the various dynamic sections.
9359 Allocate memory for them. */
9360 relocs = false;
9361 for (s = dynobj->sections; s != NULL; s = s->next)
9362 {
9363 if ((s->flags & SEC_LINKER_CREATED) == 0)
9364 continue;
9365
9366 if (s == htab->root.splt
9367 || s == htab->root.sgot
9368 || s == htab->root.sgotplt
9369 || s == htab->root.iplt
9370 || s == htab->root.igotplt
9371 || s == htab->root.sdynbss
9372 || s == htab->root.sdynrelro)
9373 {
9374 /* Strip this section if we don't need it; see the
9375 comment below. */
9376 }
9377 else if (startswith (bfd_section_name (s), ".rela"))
9378 {
9379 if (s->size != 0 && s != htab->root.srelplt)
9380 relocs = true;
9381
9382 /* We use the reloc_count field as a counter if we need
9383 to copy relocs into the output file. */
9384 if (s != htab->root.srelplt)
9385 s->reloc_count = 0;
9386 }
9387 else
9388 {
9389 /* It's not one of our sections, so don't allocate space. */
9390 continue;
9391 }
9392
9393 if (s->size == 0)
9394 {
9395 /* If we don't need this section, strip it from the
9396 output file. This is mostly to handle .rela.bss and
9397 .rela.plt. We must create both sections in
9398 create_dynamic_sections, because they must be created
9399 before the linker maps input sections to output
9400 sections. The linker does that before
9401 adjust_dynamic_symbol is called, and it is that
9402 function which decides whether anything needs to go
9403 into these sections. */
9404 s->flags |= SEC_EXCLUDE;
9405 continue;
9406 }
9407
9408 if ((s->flags & SEC_HAS_CONTENTS) == 0)
9409 continue;
9410
9411 /* Allocate memory for the section contents. We use bfd_zalloc
9412 here in case unused entries are not reclaimed before the
9413 section's contents are written out. This should not happen,
9414 but this way if it does, we get a R_AARCH64_NONE reloc instead
9415 of garbage. */
9416 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
9417 if (s->contents == NULL)
9418 return false;
9419 }
9420
9421 if (htab->root.dynamic_sections_created)
9422 {
9423 /* Add some entries to the .dynamic section. We fill in the
9424 values later, in elfNN_aarch64_finish_dynamic_sections, but we
9425 must add the entries now so that we get the correct size for
9426 the .dynamic section. The DT_DEBUG entry is filled in by the
9427 dynamic linker and used by the debugger. */
9428 #define add_dynamic_entry(TAG, VAL) \
9429 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
9430
9431 if (!_bfd_elf_add_dynamic_tags (output_bfd, info, relocs))
9432 return false;
9433
9434 if (htab->root.splt->size != 0)
9435 {
9436 if (htab->variant_pcs
9437 && !add_dynamic_entry (DT_AARCH64_VARIANT_PCS, 0))
9438 return false;
9439
9440 if ((elf_aarch64_tdata (output_bfd)->plt_type == PLT_BTI_PAC)
9441 && (!add_dynamic_entry (DT_AARCH64_BTI_PLT, 0)
9442 || !add_dynamic_entry (DT_AARCH64_PAC_PLT, 0)))
9443 return false;
9444
9445 else if ((elf_aarch64_tdata (output_bfd)->plt_type == PLT_BTI)
9446 && !add_dynamic_entry (DT_AARCH64_BTI_PLT, 0))
9447 return false;
9448
9449 else if ((elf_aarch64_tdata (output_bfd)->plt_type == PLT_PAC)
9450 && !add_dynamic_entry (DT_AARCH64_PAC_PLT, 0))
9451 return false;
9452 }
9453 }
9454 #undef add_dynamic_entry
9455
9456 return true;
9457 }
9458
9459 static inline void
9460 elf_aarch64_update_plt_entry (bfd *output_bfd,
9461 bfd_reloc_code_real_type r_type,
9462 bfd_byte *plt_entry, bfd_vma value)
9463 {
9464 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (r_type);
9465
9466 /* FIXME: We should check the return value from this function call. */
9467 (void) _bfd_aarch64_elf_put_addend (output_bfd, plt_entry, r_type, howto, value);
9468 }
9469
9470 static void
9471 elfNN_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
9472 struct elf_aarch64_link_hash_table
9473 *htab, bfd *output_bfd,
9474 struct bfd_link_info *info)
9475 {
9476 bfd_byte *plt_entry;
9477 bfd_vma plt_index;
9478 bfd_vma got_offset;
9479 bfd_vma gotplt_entry_address;
9480 bfd_vma plt_entry_address;
9481 Elf_Internal_Rela rela;
9482 bfd_byte *loc;
9483 asection *plt, *gotplt, *relplt;
9484
9485 /* When building a static executable, use .iplt, .igot.plt and
9486 .rela.iplt sections for STT_GNU_IFUNC symbols. */
9487 if (htab->root.splt != NULL)
9488 {
9489 plt = htab->root.splt;
9490 gotplt = htab->root.sgotplt;
9491 relplt = htab->root.srelplt;
9492 }
9493 else
9494 {
9495 plt = htab->root.iplt;
9496 gotplt = htab->root.igotplt;
9497 relplt = htab->root.irelplt;
9498 }
9499
9500 /* Get the index in the procedure linkage table which
9501 corresponds to this symbol. This is the index of this symbol
9502 in all the symbols for which we are making plt entries. The
9503 first entry in the procedure linkage table is reserved.
9504
9505 Get the offset into the .got table of the entry that
9506 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
9507 bytes. The first three are reserved for the dynamic linker.
9508
9509 For static executables, we don't reserve anything. */
9510
9511 if (plt == htab->root.splt)
9512 {
9513 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
9514 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
9515 }
9516 else
9517 {
9518 plt_index = h->plt.offset / htab->plt_entry_size;
9519 got_offset = plt_index * GOT_ENTRY_SIZE;
9520 }
9521
9522 plt_entry = plt->contents + h->plt.offset;
9523 plt_entry_address = plt->output_section->vma
9524 + plt->output_offset + h->plt.offset;
9525 gotplt_entry_address = gotplt->output_section->vma +
9526 gotplt->output_offset + got_offset;
9527
9528 /* Copy in the boiler-plate for the PLTn entry. */
9529 memcpy (plt_entry, htab->plt_entry, htab->plt_entry_size);
9530
9531 /* First instruction in BTI enabled PLT stub is a BTI
9532 instruction so skip it. */
9533 if (elf_aarch64_tdata (output_bfd)->plt_type & PLT_BTI
9534 && elf_elfheader (output_bfd)->e_type == ET_EXEC)
9535 plt_entry = plt_entry + 4;
9536
9537 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
9538 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
9539 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
9540 plt_entry,
9541 PG (gotplt_entry_address) -
9542 PG (plt_entry_address));
9543
9544 /* Fill in the lo12 bits for the load from the pltgot. */
9545 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
9546 plt_entry + 4,
9547 PG_OFFSET (gotplt_entry_address));
9548
9549 /* Fill in the lo12 bits for the add from the pltgot entry. */
9550 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
9551 plt_entry + 8,
9552 PG_OFFSET (gotplt_entry_address));
9553
9554 /* All the GOTPLT Entries are essentially initialized to PLT0. */
9555 bfd_put_NN (output_bfd,
9556 plt->output_section->vma + plt->output_offset,
9557 gotplt->contents + got_offset);
9558
9559 rela.r_offset = gotplt_entry_address;
9560
9561 if (h->dynindx == -1
9562 || ((bfd_link_executable (info)
9563 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
9564 && h->def_regular
9565 && h->type == STT_GNU_IFUNC))
9566 {
9567 /* If an STT_GNU_IFUNC symbol is locally defined, generate
9568 R_AARCH64_IRELATIVE instead of R_AARCH64_JUMP_SLOT. */
9569 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
9570 rela.r_addend = (h->root.u.def.value
9571 + h->root.u.def.section->output_section->vma
9572 + h->root.u.def.section->output_offset);
9573 }
9574 else
9575 {
9576 /* Fill in the entry in the .rela.plt section. */
9577 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (JUMP_SLOT));
9578 rela.r_addend = 0;
9579 }
9580
9581 /* Compute the relocation entry to used based on PLT index and do
9582 not adjust reloc_count. The reloc_count has already been adjusted
9583 to account for this entry. */
9584 loc = relplt->contents + plt_index * RELOC_SIZE (htab);
9585 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
9586 }
9587
9588 /* Size sections even though they're not dynamic. We use it to setup
9589 _TLS_MODULE_BASE_, if needed. */
9590
9591 static bool
9592 elfNN_aarch64_always_size_sections (bfd *output_bfd,
9593 struct bfd_link_info *info)
9594 {
9595 asection *tls_sec;
9596
9597 if (bfd_link_relocatable (info))
9598 return true;
9599
9600 tls_sec = elf_hash_table (info)->tls_sec;
9601
9602 if (tls_sec)
9603 {
9604 struct elf_link_hash_entry *tlsbase;
9605
9606 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
9607 "_TLS_MODULE_BASE_", true, true, false);
9608
9609 if (tlsbase)
9610 {
9611 struct bfd_link_hash_entry *h = NULL;
9612 const struct elf_backend_data *bed =
9613 get_elf_backend_data (output_bfd);
9614
9615 if (!(_bfd_generic_link_add_one_symbol
9616 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
9617 tls_sec, 0, NULL, false, bed->collect, &h)))
9618 return false;
9619
9620 tlsbase->type = STT_TLS;
9621 tlsbase = (struct elf_link_hash_entry *) h;
9622 tlsbase->def_regular = 1;
9623 tlsbase->other = STV_HIDDEN;
9624 (*bed->elf_backend_hide_symbol) (info, tlsbase, true);
9625 }
9626 }
9627
9628 return true;
9629 }
9630
9631 /* Finish up dynamic symbol handling. We set the contents of various
9632 dynamic sections here. */
9633
9634 static bool
9635 elfNN_aarch64_finish_dynamic_symbol (bfd *output_bfd,
9636 struct bfd_link_info *info,
9637 struct elf_link_hash_entry *h,
9638 Elf_Internal_Sym *sym)
9639 {
9640 struct elf_aarch64_link_hash_table *htab;
9641 htab = elf_aarch64_hash_table (info);
9642
9643 if (h->plt.offset != (bfd_vma) - 1)
9644 {
9645 asection *plt, *gotplt, *relplt;
9646
9647 /* This symbol has an entry in the procedure linkage table. Set
9648 it up. */
9649
9650 /* When building a static executable, use .iplt, .igot.plt and
9651 .rela.iplt sections for STT_GNU_IFUNC symbols. */
9652 if (htab->root.splt != NULL)
9653 {
9654 plt = htab->root.splt;
9655 gotplt = htab->root.sgotplt;
9656 relplt = htab->root.srelplt;
9657 }
9658 else
9659 {
9660 plt = htab->root.iplt;
9661 gotplt = htab->root.igotplt;
9662 relplt = htab->root.irelplt;
9663 }
9664
9665 /* This symbol has an entry in the procedure linkage table. Set
9666 it up. */
9667 if ((h->dynindx == -1
9668 && !((h->forced_local || bfd_link_executable (info))
9669 && h->def_regular
9670 && h->type == STT_GNU_IFUNC))
9671 || plt == NULL
9672 || gotplt == NULL
9673 || relplt == NULL)
9674 return false;
9675
9676 elfNN_aarch64_create_small_pltn_entry (h, htab, output_bfd, info);
9677 if (!h->def_regular)
9678 {
9679 /* Mark the symbol as undefined, rather than as defined in
9680 the .plt section. */
9681 sym->st_shndx = SHN_UNDEF;
9682 /* If the symbol is weak we need to clear the value.
9683 Otherwise, the PLT entry would provide a definition for
9684 the symbol even if the symbol wasn't defined anywhere,
9685 and so the symbol would never be NULL. Leave the value if
9686 there were any relocations where pointer equality matters
9687 (this is a clue for the dynamic linker, to make function
9688 pointer comparisons work between an application and shared
9689 library). */
9690 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
9691 sym->st_value = 0;
9692 }
9693 }
9694
9695 if (h->got.offset != (bfd_vma) - 1
9696 && elf_aarch64_hash_entry (h)->got_type == GOT_NORMAL
9697 /* Undefined weak symbol in static PIE resolves to 0 without
9698 any dynamic relocations. */
9699 && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
9700 {
9701 Elf_Internal_Rela rela;
9702 bfd_byte *loc;
9703
9704 /* This symbol has an entry in the global offset table. Set it
9705 up. */
9706 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
9707 abort ();
9708
9709 rela.r_offset = (htab->root.sgot->output_section->vma
9710 + htab->root.sgot->output_offset
9711 + (h->got.offset & ~(bfd_vma) 1));
9712
9713 if (h->def_regular
9714 && h->type == STT_GNU_IFUNC)
9715 {
9716 if (bfd_link_pic (info))
9717 {
9718 /* Generate R_AARCH64_GLOB_DAT. */
9719 goto do_glob_dat;
9720 }
9721 else
9722 {
9723 asection *plt;
9724
9725 if (!h->pointer_equality_needed)
9726 abort ();
9727
9728 /* For non-shared object, we can't use .got.plt, which
9729 contains the real function address if we need pointer
9730 equality. We load the GOT entry with the PLT entry. */
9731 plt = htab->root.splt ? htab->root.splt : htab->root.iplt;
9732 bfd_put_NN (output_bfd, (plt->output_section->vma
9733 + plt->output_offset
9734 + h->plt.offset),
9735 htab->root.sgot->contents
9736 + (h->got.offset & ~(bfd_vma) 1));
9737 return true;
9738 }
9739 }
9740 else if (bfd_link_pic (info) && SYMBOL_REFERENCES_LOCAL (info, h))
9741 {
9742 if (!(h->def_regular || ELF_COMMON_DEF_P (h)))
9743 return false;
9744
9745 BFD_ASSERT ((h->got.offset & 1) != 0);
9746 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
9747 rela.r_addend = (h->root.u.def.value
9748 + h->root.u.def.section->output_section->vma
9749 + h->root.u.def.section->output_offset);
9750 }
9751 else
9752 {
9753 do_glob_dat:
9754 BFD_ASSERT ((h->got.offset & 1) == 0);
9755 bfd_put_NN (output_bfd, (bfd_vma) 0,
9756 htab->root.sgot->contents + h->got.offset);
9757 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (GLOB_DAT));
9758 rela.r_addend = 0;
9759 }
9760
9761 loc = htab->root.srelgot->contents;
9762 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
9763 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
9764 }
9765
9766 if (h->needs_copy)
9767 {
9768 Elf_Internal_Rela rela;
9769 asection *s;
9770 bfd_byte *loc;
9771
9772 /* This symbol needs a copy reloc. Set it up. */
9773 if (h->dynindx == -1
9774 || (h->root.type != bfd_link_hash_defined
9775 && h->root.type != bfd_link_hash_defweak)
9776 || htab->root.srelbss == NULL)
9777 abort ();
9778
9779 rela.r_offset = (h->root.u.def.value
9780 + h->root.u.def.section->output_section->vma
9781 + h->root.u.def.section->output_offset);
9782 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (COPY));
9783 rela.r_addend = 0;
9784 if (h->root.u.def.section == htab->root.sdynrelro)
9785 s = htab->root.sreldynrelro;
9786 else
9787 s = htab->root.srelbss;
9788 loc = s->contents + s->reloc_count++ * RELOC_SIZE (htab);
9789 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
9790 }
9791
9792 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
9793 be NULL for local symbols. */
9794 if (sym != NULL
9795 && (h == elf_hash_table (info)->hdynamic
9796 || h == elf_hash_table (info)->hgot))
9797 sym->st_shndx = SHN_ABS;
9798
9799 return true;
9800 }
9801
9802 /* Finish up local dynamic symbol handling. We set the contents of
9803 various dynamic sections here. */
9804
9805 static int
9806 elfNN_aarch64_finish_local_dynamic_symbol (void **slot, void *inf)
9807 {
9808 struct elf_link_hash_entry *h
9809 = (struct elf_link_hash_entry *) *slot;
9810 struct bfd_link_info *info
9811 = (struct bfd_link_info *) inf;
9812
9813 return elfNN_aarch64_finish_dynamic_symbol (info->output_bfd,
9814 info, h, NULL);
9815 }
9816
9817 static void
9818 elfNN_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
9819 struct elf_aarch64_link_hash_table
9820 *htab)
9821 {
9822 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
9823 small and large plts and at the minute just generates
9824 the small PLT. */
9825
9826 /* PLT0 of the small PLT looks like this in ELF64 -
9827 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
9828 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
9829 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
9830 // symbol resolver
9831 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
9832 // GOTPLT entry for this.
9833 br x17
9834 PLT0 will be slightly different in ELF32 due to different got entry
9835 size. */
9836 bfd_vma plt_got_2nd_ent; /* Address of GOT[2]. */
9837 bfd_vma plt_base;
9838
9839
9840 memcpy (htab->root.splt->contents, htab->plt0_entry,
9841 htab->plt_header_size);
9842
9843 /* PR 26312: Explicitly set the sh_entsize to 0 so that
9844 consumers do not think that the section contains fixed
9845 sized objects. */
9846 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize = 0;
9847
9848 plt_got_2nd_ent = (htab->root.sgotplt->output_section->vma
9849 + htab->root.sgotplt->output_offset
9850 + GOT_ENTRY_SIZE * 2);
9851
9852 plt_base = htab->root.splt->output_section->vma +
9853 htab->root.splt->output_offset;
9854
9855 /* First instruction in BTI enabled PLT stub is a BTI
9856 instruction so skip it. */
9857 bfd_byte *plt0_entry = htab->root.splt->contents;
9858 if (elf_aarch64_tdata (output_bfd)->plt_type & PLT_BTI)
9859 plt0_entry = plt0_entry + 4;
9860
9861 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
9862 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
9863 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
9864 plt0_entry + 4,
9865 PG (plt_got_2nd_ent) - PG (plt_base + 4));
9866
9867 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
9868 plt0_entry + 8,
9869 PG_OFFSET (plt_got_2nd_ent));
9870
9871 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
9872 plt0_entry + 12,
9873 PG_OFFSET (plt_got_2nd_ent));
9874 }
9875
9876 static bool
9877 elfNN_aarch64_finish_dynamic_sections (bfd *output_bfd,
9878 struct bfd_link_info *info)
9879 {
9880 struct elf_aarch64_link_hash_table *htab;
9881 bfd *dynobj;
9882 asection *sdyn;
9883
9884 htab = elf_aarch64_hash_table (info);
9885 dynobj = htab->root.dynobj;
9886 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
9887
9888 if (htab->root.dynamic_sections_created)
9889 {
9890 ElfNN_External_Dyn *dyncon, *dynconend;
9891
9892 if (sdyn == NULL || htab->root.sgot == NULL)
9893 abort ();
9894
9895 dyncon = (ElfNN_External_Dyn *) sdyn->contents;
9896 dynconend = (ElfNN_External_Dyn *) (sdyn->contents + sdyn->size);
9897 for (; dyncon < dynconend; dyncon++)
9898 {
9899 Elf_Internal_Dyn dyn;
9900 asection *s;
9901
9902 bfd_elfNN_swap_dyn_in (dynobj, dyncon, &dyn);
9903
9904 switch (dyn.d_tag)
9905 {
9906 default:
9907 continue;
9908
9909 case DT_PLTGOT:
9910 s = htab->root.sgotplt;
9911 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
9912 break;
9913
9914 case DT_JMPREL:
9915 s = htab->root.srelplt;
9916 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
9917 break;
9918
9919 case DT_PLTRELSZ:
9920 s = htab->root.srelplt;
9921 dyn.d_un.d_val = s->size;
9922 break;
9923
9924 case DT_TLSDESC_PLT:
9925 s = htab->root.splt;
9926 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
9927 + htab->root.tlsdesc_plt;
9928 break;
9929
9930 case DT_TLSDESC_GOT:
9931 s = htab->root.sgot;
9932 BFD_ASSERT (htab->root.tlsdesc_got != (bfd_vma)-1);
9933 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
9934 + htab->root.tlsdesc_got;
9935 break;
9936 }
9937
9938 bfd_elfNN_swap_dyn_out (output_bfd, &dyn, dyncon);
9939 }
9940
9941 }
9942
9943 /* Fill in the special first entry in the procedure linkage table. */
9944 if (htab->root.splt && htab->root.splt->size > 0)
9945 {
9946 elfNN_aarch64_init_small_plt0_entry (output_bfd, htab);
9947
9948 if (htab->root.tlsdesc_plt && !(info->flags & DF_BIND_NOW))
9949 {
9950 BFD_ASSERT (htab->root.tlsdesc_got != (bfd_vma)-1);
9951 bfd_put_NN (output_bfd, (bfd_vma) 0,
9952 htab->root.sgot->contents + htab->root.tlsdesc_got);
9953
9954 const bfd_byte *entry = elfNN_aarch64_tlsdesc_small_plt_entry;
9955 htab->tlsdesc_plt_entry_size = PLT_TLSDESC_ENTRY_SIZE;
9956
9957 aarch64_plt_type type = elf_aarch64_tdata (output_bfd)->plt_type;
9958 if (type == PLT_BTI || type == PLT_BTI_PAC)
9959 {
9960 entry = elfNN_aarch64_tlsdesc_small_plt_bti_entry;
9961 }
9962
9963 memcpy (htab->root.splt->contents + htab->root.tlsdesc_plt,
9964 entry, htab->tlsdesc_plt_entry_size);
9965
9966 {
9967 bfd_vma adrp1_addr =
9968 htab->root.splt->output_section->vma
9969 + htab->root.splt->output_offset
9970 + htab->root.tlsdesc_plt + 4;
9971
9972 bfd_vma adrp2_addr = adrp1_addr + 4;
9973
9974 bfd_vma got_addr =
9975 htab->root.sgot->output_section->vma
9976 + htab->root.sgot->output_offset;
9977
9978 bfd_vma pltgot_addr =
9979 htab->root.sgotplt->output_section->vma
9980 + htab->root.sgotplt->output_offset;
9981
9982 bfd_vma dt_tlsdesc_got = got_addr + htab->root.tlsdesc_got;
9983
9984 bfd_byte *plt_entry =
9985 htab->root.splt->contents + htab->root.tlsdesc_plt;
9986
9987 /* First instruction in BTI enabled PLT stub is a BTI
9988 instruction so skip it. */
9989 if (type & PLT_BTI)
9990 {
9991 plt_entry = plt_entry + 4;
9992 adrp1_addr = adrp1_addr + 4;
9993 adrp2_addr = adrp2_addr + 4;
9994 }
9995
9996 /* adrp x2, DT_TLSDESC_GOT */
9997 elf_aarch64_update_plt_entry (output_bfd,
9998 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
9999 plt_entry + 4,
10000 (PG (dt_tlsdesc_got)
10001 - PG (adrp1_addr)));
10002
10003 /* adrp x3, 0 */
10004 elf_aarch64_update_plt_entry (output_bfd,
10005 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
10006 plt_entry + 8,
10007 (PG (pltgot_addr)
10008 - PG (adrp2_addr)));
10009
10010 /* ldr x2, [x2, #0] */
10011 elf_aarch64_update_plt_entry (output_bfd,
10012 BFD_RELOC_AARCH64_LDSTNN_LO12,
10013 plt_entry + 12,
10014 PG_OFFSET (dt_tlsdesc_got));
10015
10016 /* add x3, x3, 0 */
10017 elf_aarch64_update_plt_entry (output_bfd,
10018 BFD_RELOC_AARCH64_ADD_LO12,
10019 plt_entry + 16,
10020 PG_OFFSET (pltgot_addr));
10021 }
10022 }
10023 }
10024
10025 if (htab->root.sgotplt)
10026 {
10027 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
10028 {
10029 _bfd_error_handler
10030 (_("discarded output section: `%pA'"), htab->root.sgotplt);
10031 return false;
10032 }
10033
10034 /* Fill in the first three entries in the global offset table. */
10035 if (htab->root.sgotplt->size > 0)
10036 {
10037 bfd_put_NN (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents);
10038
10039 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
10040 bfd_put_NN (output_bfd,
10041 (bfd_vma) 0,
10042 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
10043 bfd_put_NN (output_bfd,
10044 (bfd_vma) 0,
10045 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
10046 }
10047
10048 if (htab->root.sgot)
10049 {
10050 if (htab->root.sgot->size > 0)
10051 {
10052 bfd_vma addr =
10053 sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0;
10054 bfd_put_NN (output_bfd, addr, htab->root.sgot->contents);
10055 }
10056 }
10057
10058 elf_section_data (htab->root.sgotplt->output_section)->
10059 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
10060 }
10061
10062 if (htab->root.sgot && htab->root.sgot->size > 0)
10063 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
10064 = GOT_ENTRY_SIZE;
10065
10066 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
10067 htab_traverse (htab->loc_hash_table,
10068 elfNN_aarch64_finish_local_dynamic_symbol,
10069 info);
10070
10071 return true;
10072 }
10073
10074 /* Check if BTI enabled PLTs are needed. Returns the type needed. */
10075 static aarch64_plt_type
10076 get_plt_type (bfd *abfd)
10077 {
10078 aarch64_plt_type ret = PLT_NORMAL;
10079 bfd_byte *contents, *extdyn, *extdynend;
10080 asection *sec = bfd_get_section_by_name (abfd, ".dynamic");
10081 if (!sec
10082 || (sec->flags & SEC_HAS_CONTENTS) == 0
10083 || sec->size < sizeof (ElfNN_External_Dyn)
10084 || !bfd_malloc_and_get_section (abfd, sec, &contents))
10085 return ret;
10086 extdyn = contents;
10087 extdynend = contents + sec->size - sizeof (ElfNN_External_Dyn);
10088 for (; extdyn <= extdynend; extdyn += sizeof (ElfNN_External_Dyn))
10089 {
10090 Elf_Internal_Dyn dyn;
10091 bfd_elfNN_swap_dyn_in (abfd, extdyn, &dyn);
10092
10093 /* Let's check the processor specific dynamic array tags. */
10094 bfd_vma tag = dyn.d_tag;
10095 if (tag < DT_LOPROC || tag > DT_HIPROC)
10096 continue;
10097
10098 switch (tag)
10099 {
10100 case DT_AARCH64_BTI_PLT:
10101 ret |= PLT_BTI;
10102 break;
10103
10104 case DT_AARCH64_PAC_PLT:
10105 ret |= PLT_PAC;
10106 break;
10107
10108 default: break;
10109 }
10110 }
10111 free (contents);
10112 return ret;
10113 }
10114
10115 static long
10116 elfNN_aarch64_get_synthetic_symtab (bfd *abfd,
10117 long symcount,
10118 asymbol **syms,
10119 long dynsymcount,
10120 asymbol **dynsyms,
10121 asymbol **ret)
10122 {
10123 elf_aarch64_tdata (abfd)->plt_type = get_plt_type (abfd);
10124 return _bfd_elf_get_synthetic_symtab (abfd, symcount, syms,
10125 dynsymcount, dynsyms, ret);
10126 }
10127
10128 /* Return address for Ith PLT stub in section PLT, for relocation REL
10129 or (bfd_vma) -1 if it should not be included. */
10130
10131 static bfd_vma
10132 elfNN_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
10133 const arelent *rel ATTRIBUTE_UNUSED)
10134 {
10135 size_t plt0_size = PLT_ENTRY_SIZE;
10136 size_t pltn_size = PLT_SMALL_ENTRY_SIZE;
10137
10138 if (elf_aarch64_tdata (plt->owner)->plt_type == PLT_BTI_PAC)
10139 {
10140 if (elf_elfheader (plt->owner)->e_type == ET_EXEC)
10141 pltn_size = PLT_BTI_PAC_SMALL_ENTRY_SIZE;
10142 else
10143 pltn_size = PLT_PAC_SMALL_ENTRY_SIZE;
10144 }
10145 else if (elf_aarch64_tdata (plt->owner)->plt_type == PLT_BTI)
10146 {
10147 if (elf_elfheader (plt->owner)->e_type == ET_EXEC)
10148 pltn_size = PLT_BTI_SMALL_ENTRY_SIZE;
10149 }
10150 else if (elf_aarch64_tdata (plt->owner)->plt_type == PLT_PAC)
10151 {
10152 pltn_size = PLT_PAC_SMALL_ENTRY_SIZE;
10153 }
10154
10155 return plt->vma + plt0_size + i * pltn_size;
10156 }
10157
10158 /* Returns TRUE if NAME is an AArch64 mapping symbol.
10159 The ARM ELF standard defines $x (for A64 code) and $d (for data).
10160 It also allows a period initiated suffix to be added to the symbol, ie:
10161 "$[adtx]\.[:sym_char]+". */
10162
10163 static bool
10164 is_aarch64_mapping_symbol (const char * name)
10165 {
10166 return name != NULL /* Paranoia. */
10167 && name[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
10168 the mapping symbols could have acquired a prefix.
10169 We do not support this here, since such symbols no
10170 longer conform to the ARM ELF ABI. */
10171 && (name[1] == 'd' || name[1] == 'x')
10172 && (name[2] == 0 || name[2] == '.');
10173 /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
10174 any characters that follow the period are legal characters for the body
10175 of a symbol's name. For now we just assume that this is the case. */
10176 }
10177
10178 /* Make sure that mapping symbols in object files are not removed via the
10179 "strip --strip-unneeded" tool. These symbols might needed in order to
10180 correctly generate linked files. Once an object file has been linked,
10181 it should be safe to remove them. */
10182
10183 static void
10184 elfNN_aarch64_backend_symbol_processing (bfd *abfd, asymbol *sym)
10185 {
10186 if (((abfd->flags & (EXEC_P | DYNAMIC)) == 0)
10187 && sym->section != bfd_abs_section_ptr
10188 && is_aarch64_mapping_symbol (sym->name))
10189 sym->flags |= BSF_KEEP;
10190 }
10191
10192 /* Implement elf_backend_setup_gnu_properties for AArch64. It serves as a
10193 wrapper function for _bfd_aarch64_elf_link_setup_gnu_properties to account
10194 for the effect of GNU properties of the output_bfd. */
10195 static bfd *
10196 elfNN_aarch64_link_setup_gnu_properties (struct bfd_link_info *info)
10197 {
10198 uint32_t prop = elf_aarch64_tdata (info->output_bfd)->gnu_and_prop;
10199 bfd *pbfd = _bfd_aarch64_elf_link_setup_gnu_properties (info, &prop);
10200 elf_aarch64_tdata (info->output_bfd)->gnu_and_prop = prop;
10201 elf_aarch64_tdata (info->output_bfd)->plt_type
10202 |= (prop & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) ? PLT_BTI : 0;
10203 setup_plt_values (info, elf_aarch64_tdata (info->output_bfd)->plt_type);
10204 return pbfd;
10205 }
10206
10207 /* Implement elf_backend_merge_gnu_properties for AArch64. It serves as a
10208 wrapper function for _bfd_aarch64_elf_merge_gnu_properties to account
10209 for the effect of GNU properties of the output_bfd. */
10210 static bool
10211 elfNN_aarch64_merge_gnu_properties (struct bfd_link_info *info,
10212 bfd *abfd, bfd *bbfd,
10213 elf_property *aprop,
10214 elf_property *bprop)
10215 {
10216 uint32_t prop
10217 = elf_aarch64_tdata (info->output_bfd)->gnu_and_prop;
10218
10219 /* If output has been marked with BTI using command line argument, give out
10220 warning if necessary. */
10221 /* Properties are merged per type, hence only check for warnings when merging
10222 GNU_PROPERTY_AARCH64_FEATURE_1_AND. */
10223 if (((aprop && aprop->pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND)
10224 || (bprop && bprop->pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND))
10225 && (prop & GNU_PROPERTY_AARCH64_FEATURE_1_BTI)
10226 && (!elf_aarch64_tdata (info->output_bfd)->no_bti_warn))
10227 {
10228 if ((aprop && !(aprop->u.number & GNU_PROPERTY_AARCH64_FEATURE_1_BTI))
10229 || !aprop)
10230 {
10231 _bfd_error_handler (_("%pB: warning: BTI turned on by -z force-bti when "
10232 "all inputs do not have BTI in NOTE section."),
10233 abfd);
10234 }
10235 if ((bprop && !(bprop->u.number & GNU_PROPERTY_AARCH64_FEATURE_1_BTI))
10236 || !bprop)
10237 {
10238 _bfd_error_handler (_("%pB: warning: BTI turned on by -z force-bti when "
10239 "all inputs do not have BTI in NOTE section."),
10240 bbfd);
10241 }
10242 }
10243
10244 return _bfd_aarch64_elf_merge_gnu_properties (info, abfd, aprop,
10245 bprop, prop);
10246 }
10247
10248 /* We use this so we can override certain functions
10249 (though currently we don't). */
10250
10251 const struct elf_size_info elfNN_aarch64_size_info =
10252 {
10253 sizeof (ElfNN_External_Ehdr),
10254 sizeof (ElfNN_External_Phdr),
10255 sizeof (ElfNN_External_Shdr),
10256 sizeof (ElfNN_External_Rel),
10257 sizeof (ElfNN_External_Rela),
10258 sizeof (ElfNN_External_Sym),
10259 sizeof (ElfNN_External_Dyn),
10260 sizeof (Elf_External_Note),
10261 4, /* Hash table entry size. */
10262 1, /* Internal relocs per external relocs. */
10263 ARCH_SIZE, /* Arch size. */
10264 LOG_FILE_ALIGN, /* Log_file_align. */
10265 ELFCLASSNN, EV_CURRENT,
10266 bfd_elfNN_write_out_phdrs,
10267 bfd_elfNN_write_shdrs_and_ehdr,
10268 bfd_elfNN_checksum_contents,
10269 bfd_elfNN_write_relocs,
10270 bfd_elfNN_swap_symbol_in,
10271 bfd_elfNN_swap_symbol_out,
10272 bfd_elfNN_slurp_reloc_table,
10273 bfd_elfNN_slurp_symbol_table,
10274 bfd_elfNN_swap_dyn_in,
10275 bfd_elfNN_swap_dyn_out,
10276 bfd_elfNN_swap_reloc_in,
10277 bfd_elfNN_swap_reloc_out,
10278 bfd_elfNN_swap_reloca_in,
10279 bfd_elfNN_swap_reloca_out
10280 };
10281
10282 #define ELF_ARCH bfd_arch_aarch64
10283 #define ELF_MACHINE_CODE EM_AARCH64
10284 #define ELF_MAXPAGESIZE 0x10000
10285 #define ELF_COMMONPAGESIZE 0x1000
10286
10287 #define bfd_elfNN_bfd_free_cached_info \
10288 elfNN_aarch64_bfd_free_cached_info
10289
10290 #define bfd_elfNN_bfd_is_target_special_symbol \
10291 elfNN_aarch64_is_target_special_symbol
10292
10293 #define bfd_elfNN_bfd_link_hash_table_create \
10294 elfNN_aarch64_link_hash_table_create
10295
10296 #define bfd_elfNN_bfd_merge_private_bfd_data \
10297 elfNN_aarch64_merge_private_bfd_data
10298
10299 #define bfd_elfNN_bfd_print_private_bfd_data \
10300 elfNN_aarch64_print_private_bfd_data
10301
10302 #define bfd_elfNN_bfd_reloc_type_lookup \
10303 elfNN_aarch64_reloc_type_lookup
10304
10305 #define bfd_elfNN_bfd_reloc_name_lookup \
10306 elfNN_aarch64_reloc_name_lookup
10307
10308 #define bfd_elfNN_bfd_set_private_flags \
10309 elfNN_aarch64_set_private_flags
10310
10311 #define bfd_elfNN_find_inliner_info \
10312 elfNN_aarch64_find_inliner_info
10313
10314 #define bfd_elfNN_get_synthetic_symtab \
10315 elfNN_aarch64_get_synthetic_symtab
10316
10317 #define bfd_elfNN_mkobject \
10318 elfNN_aarch64_mkobject
10319
10320 #define bfd_elfNN_new_section_hook \
10321 elfNN_aarch64_new_section_hook
10322
10323 #define elf_backend_adjust_dynamic_symbol \
10324 elfNN_aarch64_adjust_dynamic_symbol
10325
10326 #define elf_backend_always_size_sections \
10327 elfNN_aarch64_always_size_sections
10328
10329 #define elf_backend_check_relocs \
10330 elfNN_aarch64_check_relocs
10331
10332 #define elf_backend_copy_indirect_symbol \
10333 elfNN_aarch64_copy_indirect_symbol
10334
10335 #define elf_backend_merge_symbol_attribute \
10336 elfNN_aarch64_merge_symbol_attribute
10337
10338 /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
10339 to them in our hash. */
10340 #define elf_backend_create_dynamic_sections \
10341 elfNN_aarch64_create_dynamic_sections
10342
10343 #define elf_backend_init_index_section \
10344 _bfd_elf_init_2_index_sections
10345
10346 #define elf_backend_finish_dynamic_sections \
10347 elfNN_aarch64_finish_dynamic_sections
10348
10349 #define elf_backend_finish_dynamic_symbol \
10350 elfNN_aarch64_finish_dynamic_symbol
10351
10352 #define elf_backend_object_p \
10353 elfNN_aarch64_object_p
10354
10355 #define elf_backend_output_arch_local_syms \
10356 elfNN_aarch64_output_arch_local_syms
10357
10358 #define elf_backend_maybe_function_sym \
10359 elfNN_aarch64_maybe_function_sym
10360
10361 #define elf_backend_plt_sym_val \
10362 elfNN_aarch64_plt_sym_val
10363
10364 #define elf_backend_init_file_header \
10365 elfNN_aarch64_init_file_header
10366
10367 #define elf_backend_relocate_section \
10368 elfNN_aarch64_relocate_section
10369
10370 #define elf_backend_reloc_type_class \
10371 elfNN_aarch64_reloc_type_class
10372
10373 #define elf_backend_section_from_shdr \
10374 elfNN_aarch64_section_from_shdr
10375
10376 #define elf_backend_section_from_phdr \
10377 elfNN_aarch64_section_from_phdr
10378
10379 #define elf_backend_modify_headers \
10380 elfNN_aarch64_modify_headers
10381
10382 #define elf_backend_size_dynamic_sections \
10383 elfNN_aarch64_size_dynamic_sections
10384
10385 #define elf_backend_size_info \
10386 elfNN_aarch64_size_info
10387
10388 #define elf_backend_write_section \
10389 elfNN_aarch64_write_section
10390
10391 #define elf_backend_symbol_processing \
10392 elfNN_aarch64_backend_symbol_processing
10393
10394 #define elf_backend_setup_gnu_properties \
10395 elfNN_aarch64_link_setup_gnu_properties
10396
10397 #define elf_backend_merge_gnu_properties \
10398 elfNN_aarch64_merge_gnu_properties
10399
10400 #define elf_backend_can_refcount 1
10401 #define elf_backend_can_gc_sections 1
10402 #define elf_backend_plt_readonly 1
10403 #define elf_backend_want_got_plt 1
10404 #define elf_backend_want_plt_sym 0
10405 #define elf_backend_want_dynrelro 1
10406 #define elf_backend_may_use_rel_p 0
10407 #define elf_backend_may_use_rela_p 1
10408 #define elf_backend_default_use_rela_p 1
10409 #define elf_backend_rela_normal 1
10410 #define elf_backend_dtrel_excludes_plt 1
10411 #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
10412 #define elf_backend_default_execstack 0
10413 #define elf_backend_extern_protected_data 0
10414 #define elf_backend_hash_symbol elf_aarch64_hash_symbol
10415
10416 #undef elf_backend_obj_attrs_section
10417 #define elf_backend_obj_attrs_section ".ARM.attributes"
10418
10419 #include "elfNN-target.h"
10420
10421 /* CloudABI support. */
10422
10423 #undef TARGET_LITTLE_SYM
10424 #define TARGET_LITTLE_SYM aarch64_elfNN_le_cloudabi_vec
10425 #undef TARGET_LITTLE_NAME
10426 #define TARGET_LITTLE_NAME "elfNN-littleaarch64-cloudabi"
10427 #undef TARGET_BIG_SYM
10428 #define TARGET_BIG_SYM aarch64_elfNN_be_cloudabi_vec
10429 #undef TARGET_BIG_NAME
10430 #define TARGET_BIG_NAME "elfNN-bigaarch64-cloudabi"
10431
10432 #undef ELF_OSABI
10433 #define ELF_OSABI ELFOSABI_CLOUDABI
10434
10435 #undef elfNN_bed
10436 #define elfNN_bed elfNN_aarch64_cloudabi_bed
10437
10438 #include "elfNN-target.h"