initial commit
[glibc.git] / sysdeps / powerpc / powerpc64 / dl-machine.h
1 /* Machine-dependent ELF dynamic relocation inline functions.
2 PowerPC64 version.
3 Copyright 1995-2022 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Library General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Library General Public License for more details.
15
16 You should have received a copy of the GNU Library General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <https://www.gnu.org/licenses/>. */
19
20 #ifndef dl_machine_h
21 #define dl_machine_h
22
23 #define ELF_MACHINE_NAME "powerpc64"
24
25 #include <assert.h>
26 #include <sys/param.h>
27 #include <dl-tls.h>
28 #include <sysdep.h>
29 #include <hwcapinfo.h>
30 #include <cpu-features.c>
31 #include <dl-static-tls.h>
32 #include <dl-funcdesc.h>
33 #include <dl-machine-rel.h>
34
35 /* Translate a processor specific dynamic tag to the index
36 in l_info array. */
37 #define DT_PPC64(x) (DT_PPC64_##x - DT_LOPROC + DT_NUM)
38
39 #define ELF_MULT_MACHINES_SUPPORTED
40
41 /* Return nonzero iff ELF header is compatible with the running host. */
42 static inline int
43 elf_machine_matches_host (const Elf64_Ehdr *ehdr)
44 {
45 /* Verify that the binary matches our ABI version. */
46 if ((ehdr->e_flags & EF_PPC64_ABI) != 0)
47 {
48 #if _CALL_ELF != 2
49 if ((ehdr->e_flags & EF_PPC64_ABI) != 1)
50 return 0;
51 #else
52 if ((ehdr->e_flags & EF_PPC64_ABI) != 2)
53 return 0;
54 #endif
55 }
56
57 return ehdr->e_machine == EM_PPC64;
58 }
59
60 /* Return nonzero iff ELF header is compatible with the running host,
61 but not this loader. */
62 static inline int
63 elf_host_tolerates_machine (const Elf64_Ehdr *ehdr)
64 {
65 return ehdr->e_machine == EM_PPC;
66 }
67
68 /* Return nonzero iff ELF header is compatible with the running host,
69 but not this loader. */
70 static inline int
71 elf_host_tolerates_class (const Elf64_Ehdr *ehdr)
72 {
73 return ehdr->e_ident[EI_CLASS] == ELFCLASS32;
74 }
75
76
77 /* Return the run-time load address of the shared object, assuming it
78 was originally linked at zero. */
79 static inline Elf64_Addr
80 elf_machine_load_address (void) __attribute__ ((const));
81
82 static inline Elf64_Addr
83 elf_machine_load_address (void)
84 {
85 Elf64_Addr ret;
86
87 /* The first entry in .got (and thus the first entry in .toc) is the
88 link-time TOC_base, ie. r2. So the difference between that and
89 the current r2 set by the kernel is how far the shared lib has
90 moved. */
91 asm ( " ld %0,-32768(2)\n"
92 " subf %0,%0,2\n"
93 : "=r" (ret));
94 return ret;
95 }
96
97 /* Return the link-time address of _DYNAMIC. */
98 static inline Elf64_Addr
99 elf_machine_dynamic (void)
100 {
101 Elf64_Addr runtime_dynamic;
102 /* It's easier to get the run-time address. */
103 asm ( " addis %0,2,_DYNAMIC@toc@ha\n"
104 " addi %0,%0,_DYNAMIC@toc@l\n"
105 : "=b" (runtime_dynamic));
106 /* Then subtract off the load address offset. */
107 return runtime_dynamic - elf_machine_load_address() ;
108 }
109
110 /* The PLT uses Elf64_Rela relocs. */
111 #define elf_machine_relplt elf_machine_rela
112
113
114 #ifdef HAVE_INLINED_SYSCALLS
115 /* We do not need _dl_starting_up. */
116 # define DL_STARTING_UP_DEF
117 #else
118 # define DL_STARTING_UP_DEF \
119 ".LC__dl_starting_up:\n" \
120 " .tc __GI__dl_starting_up[TC],__GI__dl_starting_up\n"
121 #endif
122
123
124 /* Initial entry point code for the dynamic linker. The C function
125 `_dl_start' is the real entry point; its return value is the user
126 program's entry point. */
127 #define RTLD_START \
128 asm (".pushsection \".text\"\n" \
129 " .align 2\n" \
130 " " ENTRY_2(_start) "\n" \
131 BODY_PREFIX "_start:\n" \
132 " " LOCALENTRY(_start) "\n" \
133 /* We start with the following on the stack, from top: \
134 argc (4 bytes); \
135 arguments for program (terminated by NULL); \
136 environment variables (terminated by NULL); \
137 arguments for the program loader. */ \
138 " mr 3,1\n" \
139 " li 4,0\n" \
140 " stdu 4,-128(1)\n" \
141 /* Call _dl_start with one parameter pointing at argc. */ \
142 " bl " DOT_PREFIX "_dl_start\n" \
143 " nop\n" \
144 /* Transfer control to _dl_start_user! */ \
145 " b " DOT_PREFIX "_dl_start_user\n" \
146 ".LT__start:\n" \
147 " .long 0\n" \
148 " .byte 0x00,0x0c,0x24,0x40,0x00,0x00,0x00,0x00\n" \
149 " .long .LT__start-" BODY_PREFIX "_start\n" \
150 " .short .LT__start_name_end-.LT__start_name_start\n" \
151 ".LT__start_name_start:\n" \
152 " .ascii \"_start\"\n" \
153 ".LT__start_name_end:\n" \
154 " .align 2\n" \
155 " " END_2(_start) "\n" \
156 " .pushsection \".toc\",\"aw\"\n" \
157 DL_STARTING_UP_DEF \
158 ".LC__rtld_local:\n" \
159 " .tc _rtld_local[TC],_rtld_local\n" \
160 ".LC__dl_argc:\n" \
161 " .tc _dl_argc[TC],_dl_argc\n" \
162 ".LC__dl_argv:\n" \
163 " .tc __GI__dl_argv[TC],__GI__dl_argv\n" \
164 ".LC__dl_fini:\n" \
165 " .tc _dl_fini[TC],_dl_fini\n" \
166 " .popsection\n" \
167 " " ENTRY_2(_dl_start_user) "\n" \
168 /* Now, we do our main work of calling initialisation procedures. \
169 The ELF ABI doesn't say anything about parameters for these, \
170 so we just pass argc, argv, and the environment. \
171 Changing these is strongly discouraged (not least because argc is \
172 passed by value!). */ \
173 BODY_PREFIX "_dl_start_user:\n" \
174 " " LOCALENTRY(_dl_start_user) "\n" \
175 /* the address of _start in r30. */ \
176 " mr 30,3\n" \
177 /* &_dl_argc in 29, &_dl_argv in 27, and _dl_loaded in 28. */ \
178 " addis 28,2,.LC__rtld_local@toc@ha\n" \
179 " ld 28,.LC__rtld_local@toc@l(28)\n" \
180 " addis 29,2,.LC__dl_argc@toc@ha\n" \
181 " ld 29,.LC__dl_argc@toc@l(29)\n" \
182 " addis 27,2,.LC__dl_argv@toc@ha\n" \
183 " ld 27,.LC__dl_argv@toc@l(27)\n" \
184 /* _dl_init (_dl_loaded, _dl_argc, _dl_argv, _dl_argv+_dl_argc+1). */ \
185 " ld 3,0(28)\n" \
186 " lwa 4,0(29)\n" \
187 " ld 5,0(27)\n" \
188 " sldi 6,4,3\n" \
189 " add 6,5,6\n" \
190 " addi 6,6,8\n" \
191 " bl " DOT_PREFIX "_dl_init\n" \
192 " nop\n" \
193 /* Now, to conform to the ELF ABI, we have to: \
194 Pass argc (actually _dl_argc) in r3; */ \
195 " lwa 3,0(29)\n" \
196 /* Pass argv (actually _dl_argv) in r4; */ \
197 " ld 4,0(27)\n" \
198 /* Pass argv+argc+1 in r5; */ \
199 " sldi 5,3,3\n" \
200 " add 6,4,5\n" \
201 " addi 5,6,8\n" \
202 /* Pass the auxiliary vector in r6. This is passed to us just after \
203 _envp. */ \
204 "2: ldu 0,8(6)\n" \
205 " cmpdi 0,0\n" \
206 " bne 2b\n" \
207 " addi 6,6,8\n" \
208 /* Pass a termination function pointer (in this case _dl_fini) in \
209 r7. */ \
210 " addis 7,2,.LC__dl_fini@toc@ha\n" \
211 " ld 7,.LC__dl_fini@toc@l(7)\n" \
212 /* Pass the stack pointer in r1 (so far so good), pointing to a NULL \
213 value. This lets our startup code distinguish between a program \
214 linked statically, which linux will call with argc on top of the \
215 stack which will hopefully never be zero, and a dynamically linked \
216 program which will always have a NULL on the top of the stack. \
217 Take the opportunity to clear LR, so anyone who accidentally \
218 returns from _start gets SEGV. Also clear the next few words of \
219 the stack. */ \
220 " li 31,0\n" \
221 " std 31,0(1)\n" \
222 " mtlr 31\n" \
223 " std 31,8(1)\n" \
224 " std 31,16(1)\n" \
225 " std 31,24(1)\n" \
226 /* Now, call the start function descriptor at r30... */ \
227 " .globl ._dl_main_dispatch\n" \
228 "._dl_main_dispatch:\n" \
229 " " PPC64_LOAD_FUNCPTR(30) "\n" \
230 " bctr\n" \
231 ".LT__dl_start_user:\n" \
232 " .long 0\n" \
233 " .byte 0x00,0x0c,0x24,0x40,0x00,0x00,0x00,0x00\n" \
234 " .long .LT__dl_start_user-" BODY_PREFIX "_dl_start_user\n" \
235 " .short .LT__dl_start_user_name_end-.LT__dl_start_user_name_start\n" \
236 ".LT__dl_start_user_name_start:\n" \
237 " .ascii \"_dl_start_user\"\n" \
238 ".LT__dl_start_user_name_end:\n" \
239 " .align 2\n" \
240 " " END_2(_dl_start_user) "\n" \
241 " .popsection");
242
243 /* ELF_RTYPE_CLASS_COPY iff TYPE should not be allowed to resolve to
244 one of the main executable's symbols, as for a COPY reloc.
245
246 To make function pointer comparisons work on most targets, the
247 relevant ABI states that the address of a non-local function in a
248 dynamically linked executable is the address of the PLT entry for
249 that function. This is quite reasonable since using the real
250 function address in a non-PIC executable would typically require
251 dynamic relocations in .text, something to be avoided. For such
252 functions, the linker emits a SHN_UNDEF symbol in the executable
253 with value equal to the PLT entry address. Normally, SHN_UNDEF
254 symbols have a value of zero, so this is a clue to ld.so that it
255 should treat these symbols specially. For relocations not in
256 ELF_RTYPE_CLASS_PLT (eg. those on function pointers), ld.so should
257 use the value of the executable SHN_UNDEF symbol, ie. the PLT entry
258 address. For relocations in ELF_RTYPE_CLASS_PLT (eg. the relocs in
259 the PLT itself), ld.so should use the value of the corresponding
260 defined symbol in the object that defines the function, ie. the
261 real function address. This complicates ld.so in that there are
262 now two possible values for a given symbol, and it gets even worse
263 because protected symbols need yet another set of rules.
264
265 On PowerPC64 we don't need any of this. The linker won't emit
266 SHN_UNDEF symbols with non-zero values. ld.so can make all
267 relocations behave "normally", ie. always use the real address
268 like PLT relocations. So always set ELF_RTYPE_CLASS_PLT. */
269
270 #if _CALL_ELF != 2
271 #define elf_machine_type_class(type) \
272 (ELF_RTYPE_CLASS_PLT | (((type) == R_PPC64_COPY) * ELF_RTYPE_CLASS_COPY))
273 #else
274 /* And now that you have read that large comment, you can disregard it
275 all for ELFv2. ELFv2 does need the special SHN_UNDEF treatment. */
276 #define IS_PPC64_TLS_RELOC(R) \
277 (((R) >= R_PPC64_TLS && (R) <= R_PPC64_DTPREL16_HIGHESTA) \
278 || ((R) >= R_PPC64_TPREL16_HIGH && (R) <= R_PPC64_DTPREL16_HIGHA))
279
280 #define elf_machine_type_class(type) \
281 ((((type) == R_PPC64_JMP_SLOT \
282 || (type) == R_PPC64_ADDR24 \
283 || IS_PPC64_TLS_RELOC (type)) * ELF_RTYPE_CLASS_PLT) \
284 | (((type) == R_PPC64_COPY) * ELF_RTYPE_CLASS_COPY))
285 #endif
286
287 /* A reloc type used for ld.so cmdline arg lookups to reject PLT entries. */
288 #define ELF_MACHINE_JMP_SLOT R_PPC64_JMP_SLOT
289
290 /* We define an initialization function to initialize HWCAP/HWCAP2 and
291 platform data so it can be copied into the TCB later. This is called
292 very early in _dl_sysdep_start for dynamically linked binaries. */
293 #if defined(SHARED) && IS_IN (rtld)
294 # define DL_PLATFORM_INIT dl_platform_init ()
295
296 static inline void __attribute__ ((unused))
297 dl_platform_init (void)
298 {
299 __tcb_parse_hwcap_and_convert_at_platform ();
300 init_cpu_features (&GLRO(dl_powerpc_cpu_features));
301 }
302 #endif
303
304 /* Stuff for the PLT. */
305 #if _CALL_ELF != 2
306 #define PLT_INITIAL_ENTRY_WORDS 3
307 #define PLT_ENTRY_WORDS 3
308 #define GLINK_INITIAL_ENTRY_WORDS 8
309 /* The first 32k entries of glink can set an index and branch using two
310 instructions; past that point, glink uses three instructions. */
311 #define GLINK_ENTRY_WORDS(I) (((I) < 0x8000)? 2 : 3)
312 #else
313 #define PLT_INITIAL_ENTRY_WORDS 2
314 #define PLT_ENTRY_WORDS 1
315 #define GLINK_INITIAL_ENTRY_WORDS 8
316 #define GLINK_ENTRY_WORDS(I) 1
317 #endif
318
319 #define PPC_DCBST(where) asm volatile ("dcbst 0,%0" : : "r"(where) : "memory")
320 #define PPC_DCBT(where) asm volatile ("dcbt 0,%0" : : "r"(where) : "memory")
321 #define PPC_DCBF(where) asm volatile ("dcbf 0,%0" : : "r"(where) : "memory")
322 #define PPC_SYNC asm volatile ("sync" : : : "memory")
323 #define PPC_ISYNC asm volatile ("sync; isync" : : : "memory")
324 #define PPC_ICBI(where) asm volatile ("icbi 0,%0" : : "r"(where) : "memory")
325 #define PPC_DIE asm volatile ("tweq 0,0")
326 /* Use this when you've modified some code, but it won't be in the
327 instruction fetch queue (or when it doesn't matter if it is). */
328 #define MODIFIED_CODE_NOQUEUE(where) \
329 do { PPC_DCBST(where); PPC_SYNC; PPC_ICBI(where); } while (0)
330 /* Use this when it might be in the instruction queue. */
331 #define MODIFIED_CODE(where) \
332 do { PPC_DCBST(where); PPC_SYNC; PPC_ICBI(where); PPC_ISYNC; } while (0)
333
334 /* Set up the loaded object described by MAP so its unrelocated PLT
335 entries will jump to the on-demand fixup code in dl-runtime.c. */
336 static inline int __attribute__ ((always_inline))
337 elf_machine_runtime_setup (struct link_map *map, struct r_scope_elem *scope[],
338 int lazy, int profile)
339 {
340 if (map->l_info[DT_JMPREL])
341 {
342 Elf64_Word i;
343 Elf64_Word *glink = NULL;
344 Elf64_Xword *plt = (Elf64_Xword *) D_PTR (map, l_info[DT_PLTGOT]);
345 Elf64_Word num_plt_entries = (map->l_info[DT_PLTRELSZ]->d_un.d_val
346 / sizeof (Elf64_Rela));
347 Elf64_Addr l_addr = map->l_addr;
348 Elf64_Dyn **info = map->l_info;
349 char *p;
350
351 extern void _dl_runtime_resolve (void);
352 extern void _dl_profile_resolve (void);
353
354 /* Relocate the DT_PPC64_GLINK entry in the _DYNAMIC section.
355 elf_get_dynamic_info takes care of the standard entries but
356 doesn't know exactly what to do with processor specific
357 entries. */
358 if (info[DT_PPC64(GLINK)] != NULL)
359 info[DT_PPC64(GLINK)]->d_un.d_ptr += l_addr;
360
361 if (lazy)
362 {
363 Elf64_Word glink_offset;
364 Elf64_Word offset;
365 Elf64_Addr dlrr;
366
367 dlrr = (Elf64_Addr) (profile ? _dl_profile_resolve
368 : _dl_runtime_resolve);
369 if (profile && GLRO(dl_profile) != NULL
370 && _dl_name_match_p (GLRO(dl_profile), map))
371 /* This is the object we are looking for. Say that we really
372 want profiling and the timers are started. */
373 GL(dl_profile_map) = map;
374
375 #if _CALL_ELF != 2
376 /* We need to stuff the address/TOC of _dl_runtime_resolve
377 into doublewords 0 and 1 of plt_reserve. Then we need to
378 stuff the map address into doubleword 2 of plt_reserve.
379 This allows the GLINK0 code to transfer control to the
380 correct trampoline which will transfer control to fixup
381 in dl-machine.c. */
382 {
383 /* The plt_reserve area is the 1st 3 doublewords of the PLT. */
384 Elf64_FuncDesc *plt_reserve = (Elf64_FuncDesc *) plt;
385 Elf64_FuncDesc *resolve_fd = (Elf64_FuncDesc *) dlrr;
386 plt_reserve->fd_func = resolve_fd->fd_func;
387 plt_reserve->fd_toc = resolve_fd->fd_toc;
388 plt_reserve->fd_aux = (Elf64_Addr) map;
389 #ifdef RTLD_BOOTSTRAP
390 /* When we're bootstrapping, the opd entry will not have
391 been relocated yet. */
392 plt_reserve->fd_func += l_addr;
393 plt_reserve->fd_toc += l_addr;
394 #endif
395 }
396 #else
397 /* When we don't have function descriptors, the first doubleword
398 of the PLT holds the address of _dl_runtime_resolve, and the
399 second doubleword holds the map address. */
400 plt[0] = dlrr;
401 plt[1] = (Elf64_Addr) map;
402 #endif
403
404 /* Set up the lazy PLT entries. */
405 glink = (Elf64_Word *) D_PTR (map, l_info[DT_PPC64(GLINK)]);
406 offset = PLT_INITIAL_ENTRY_WORDS;
407 glink_offset = GLINK_INITIAL_ENTRY_WORDS;
408 for (i = 0; i < num_plt_entries; i++)
409 {
410
411 plt[offset] = (Elf64_Xword) &glink[glink_offset];
412 offset += PLT_ENTRY_WORDS;
413 glink_offset += GLINK_ENTRY_WORDS (i);
414 }
415
416 /* Now, we've modified data. We need to write the changes from
417 the data cache to a second-level unified cache, then make
418 sure that stale data in the instruction cache is removed.
419 (In a multiprocessor system, the effect is more complex.)
420 Most of the PLT shouldn't be in the instruction cache, but
421 there may be a little overlap at the start and the end.
422
423 Assumes that dcbst and icbi apply to lines of 16 bytes or
424 more. Current known line sizes are 16, 32, and 128 bytes. */
425
426 for (p = (char *) plt; p < (char *) &plt[offset]; p += 16)
427 PPC_DCBST (p);
428 PPC_SYNC;
429 }
430 }
431 return lazy;
432 }
433
434 #if _CALL_ELF == 2
435 extern void attribute_hidden _dl_error_localentry (struct link_map *map,
436 const Elf64_Sym *refsym);
437
438 /* If the PLT entry resolves to a function in the same object, return
439 the target function's local entry point offset if usable. */
440 static inline Elf64_Addr __attribute__ ((always_inline))
441 ppc64_local_entry_offset (struct link_map *map, lookup_t sym_map,
442 const ElfW(Sym) *refsym, const ElfW(Sym) *sym)
443 {
444 /* If the target function is in a different object, we cannot
445 use the local entry point. */
446 if (sym_map != map)
447 {
448 /* Check that optimized plt call stubs for localentry:0 functions
449 are not being satisfied by a non-zero localentry symbol. */
450 if (map->l_info[DT_PPC64(OPT)]
451 && (map->l_info[DT_PPC64(OPT)]->d_un.d_val & PPC64_OPT_LOCALENTRY) != 0
452 && refsym->st_info == ELFW(ST_INFO) (STB_GLOBAL, STT_FUNC)
453 && (STO_PPC64_LOCAL_MASK & refsym->st_other) == 0
454 && (STO_PPC64_LOCAL_MASK & sym->st_other) != 0)
455 _dl_error_localentry (map, refsym);
456
457 return 0;
458 }
459
460 /* If the linker inserted multiple TOCs, we cannot use the
461 local entry point. */
462 if (map->l_info[DT_PPC64(OPT)]
463 && (map->l_info[DT_PPC64(OPT)]->d_un.d_val & PPC64_OPT_MULTI_TOC))
464 return 0;
465
466 /* If the target function is an ifunc then the local entry offset is
467 for the resolver, not the final destination. */
468 if (__builtin_expect (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC, 0))
469 return 0;
470
471 /* Otherwise, we can use the local entry point. Retrieve its offset
472 from the symbol's ELF st_other field. */
473 return PPC64_LOCAL_ENTRY_OFFSET (sym->st_other);
474 }
475 #endif
476
477 /* Change the PLT entry whose reloc is 'reloc' to call the actual
478 routine. */
479 static inline Elf64_Addr __attribute__ ((always_inline))
480 elf_machine_fixup_plt (struct link_map *map, lookup_t sym_map,
481 const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
482 const Elf64_Rela *reloc,
483 Elf64_Addr *reloc_addr, Elf64_Addr finaladdr)
484 {
485 #if _CALL_ELF != 2
486 Elf64_FuncDesc *plt = (Elf64_FuncDesc *) reloc_addr;
487 Elf64_FuncDesc *rel = (Elf64_FuncDesc *) finaladdr;
488 Elf64_Addr offset = 0;
489 Elf64_FuncDesc zero_fd = {0, 0, 0};
490
491 PPC_DCBT (&plt->fd_aux);
492 PPC_DCBT (&plt->fd_func);
493
494 /* If sym_map is NULL, it's a weak undefined sym; Set the plt to
495 zero. finaladdr should be zero already in this case, but guard
496 against invalid plt relocations with non-zero addends. */
497 if (sym_map == NULL)
498 finaladdr = 0;
499
500 /* Don't die here if finaladdr is zero, die if this plt entry is
501 actually called. Makes a difference when LD_BIND_NOW=1.
502 finaladdr may be zero for a weak undefined symbol, or when an
503 ifunc resolver returns zero. */
504 if (finaladdr == 0)
505 rel = &zero_fd;
506 else
507 {
508 PPC_DCBT (&rel->fd_aux);
509 PPC_DCBT (&rel->fd_func);
510 }
511
512 /* If the opd entry is not yet relocated (because it's from a shared
513 object that hasn't been processed yet), then manually reloc it. */
514 if (finaladdr != 0 && map != sym_map && !sym_map->l_relocated
515 #if !defined RTLD_BOOTSTRAP && defined SHARED
516 /* Bootstrap map doesn't have l_relocated set for it. */
517 && sym_map != &GL(dl_rtld_map)
518 #endif
519 )
520 offset = sym_map->l_addr;
521
522 /* For PPC64, fixup_plt copies the function descriptor from opd
523 over the corresponding PLT entry.
524 Initially, PLT Entry[i] is set up for lazy linking, or is zero.
525 For lazy linking, the fd_toc and fd_aux entries are irrelevant,
526 so for thread safety we write them before changing fd_func. */
527
528 plt->fd_aux = rel->fd_aux + offset;
529 plt->fd_toc = rel->fd_toc + offset;
530 PPC_DCBF (&plt->fd_toc);
531 PPC_ISYNC;
532
533 plt->fd_func = rel->fd_func + offset;
534 PPC_DCBST (&plt->fd_func);
535 PPC_ISYNC;
536 #else
537 finaladdr += ppc64_local_entry_offset (map, sym_map, refsym, sym);
538 *reloc_addr = finaladdr;
539 #endif
540
541 return finaladdr;
542 }
543
544 /* Return the final value of a plt relocation. */
545 static inline Elf64_Addr
546 elf_machine_plt_value (struct link_map *map, const Elf64_Rela *reloc,
547 Elf64_Addr value)
548 {
549 return value + reloc->r_addend;
550 }
551
552
553 /* Names of the architecture-specific auditing callback functions. */
554 #if _CALL_ELF != 2
555 #define ARCH_LA_PLTENTER ppc64_gnu_pltenter
556 #define ARCH_LA_PLTEXIT ppc64_gnu_pltexit
557 #else
558 #define ARCH_LA_PLTENTER ppc64v2_gnu_pltenter
559 #define ARCH_LA_PLTEXIT ppc64v2_gnu_pltexit
560 #endif
561
562 #if ENABLE_STATIC_PIE && !defined SHARED && !IS_IN (rtld)
563 #include <libc-diag.h>
564 #include <tcb-offsets.h>
565
566 /* Set up r13 for _dl_relocate_static_pie so that libgcc ifuncs that
567 normally access the tcb copy of hwcap will see __tcb.hwcap. */
568
569 static inline void __attribute__ ((always_inline))
570 ppc_init_fake_thread_pointer (void)
571 {
572 DIAG_PUSH_NEEDS_COMMENT;
573 /* We are playing pointer tricks. Silence gcc warning. */
574 DIAG_IGNORE_NEEDS_COMMENT (4.9, "-Warray-bounds");
575 __thread_register = (char *) &__tcb.hwcap - TCB_HWCAP;
576 DIAG_POP_NEEDS_COMMENT;
577 }
578
579 #define ELF_MACHINE_BEFORE_RTLD_RELOC(map, dynamic_info) \
580 ppc_init_fake_thread_pointer ();
581 #endif /* ENABLE_STATIC_PIE && !defined SHARED && !IS_IN (rtld) */
582
583 #endif /* dl_machine_h */
584
585 #ifdef RESOLVE_MAP
586
587 #define PPC_LO(v) ((v) & 0xffff)
588 #define PPC_HI(v) (((v) >> 16) & 0xffff)
589 #define PPC_HA(v) PPC_HI ((v) + 0x8000)
590 #define PPC_HIGHER(v) (((v) >> 32) & 0xffff)
591 #define PPC_HIGHERA(v) PPC_HIGHER ((v) + 0x8000)
592 #define PPC_HIGHEST(v) (((v) >> 48) & 0xffff)
593 #define PPC_HIGHESTA(v) PPC_HIGHEST ((v) + 0x8000)
594 #define BIT_INSERT(var, val, mask) \
595 ((var) = ((var) & ~(Elf64_Addr) (mask)) | ((val) & (mask)))
596
597 #define dont_expect(X) __builtin_expect ((X), 0)
598
599 extern void attribute_hidden _dl_reloc_overflow (struct link_map *map,
600 const char *name,
601 Elf64_Addr *const reloc_addr,
602 const Elf64_Sym *refsym);
603
604 static inline void __attribute__ ((always_inline))
605 elf_machine_rela_relative (Elf64_Addr l_addr, const Elf64_Rela *reloc,
606 void *const reloc_addr_arg)
607 {
608 Elf64_Addr *const reloc_addr = reloc_addr_arg;
609 *reloc_addr = l_addr + reloc->r_addend;
610 }
611
612 /* This computes the value used by TPREL* relocs. */
613 static inline Elf64_Addr __attribute__ ((always_inline, const))
614 elf_machine_tprel (struct link_map *map,
615 struct link_map *sym_map,
616 const Elf64_Sym *sym,
617 const Elf64_Rela *reloc)
618 {
619 #ifndef RTLD_BOOTSTRAP
620 if (sym_map)
621 {
622 CHECK_STATIC_TLS (map, sym_map);
623 #endif
624 return TLS_TPREL_VALUE (sym_map, sym, reloc);
625 #ifndef RTLD_BOOTSTRAP
626 }
627 #endif
628 return 0;
629 }
630
631 /* Call function at address VALUE (an OPD entry) to resolve ifunc relocs. */
632 static inline Elf64_Addr __attribute__ ((always_inline))
633 resolve_ifunc (Elf64_Addr value,
634 const struct link_map *map, const struct link_map *sym_map)
635 {
636 #if _CALL_ELF != 2
637 /* The function we are calling may not yet have its opd entry relocated. */
638 Elf64_FuncDesc opd;
639 if (map != sym_map
640 # if !defined RTLD_BOOTSTRAP && defined SHARED
641 /* Bootstrap map doesn't have l_relocated set for it. */
642 && sym_map != &GL(dl_rtld_map)
643 # endif
644 && !sym_map->l_relocated)
645 {
646 Elf64_FuncDesc *func = (Elf64_FuncDesc *) value;
647 opd.fd_func = func->fd_func + sym_map->l_addr;
648 opd.fd_toc = func->fd_toc + sym_map->l_addr;
649 opd.fd_aux = func->fd_aux;
650 /* GCC 4.9+ eliminates the branch as dead code, force the odp set
651 dependency. */
652 asm ("" : "=r" (value) : "0" (&opd), "X" (opd));
653 }
654 #endif
655 return ((Elf64_Addr (*) (unsigned long int)) value) (GLRO(dl_hwcap));
656 }
657
658 /* Perform the relocation specified by RELOC and SYM (which is fully
659 resolved). MAP is the object containing the reloc. */
660 static inline void __attribute__ ((always_inline))
661 elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
662 const Elf64_Rela *reloc,
663 const Elf64_Sym *sym,
664 const struct r_found_version *version,
665 void *const reloc_addr_arg,
666 int skip_ifunc)
667 {
668 Elf64_Addr *const reloc_addr = reloc_addr_arg;
669 const int r_type = ELF64_R_TYPE (reloc->r_info);
670 const Elf64_Sym *const refsym = sym;
671 union unaligned
672 {
673 uint16_t u2;
674 uint32_t u4;
675 uint64_t u8;
676 } __attribute__ ((__packed__));
677
678 if (r_type == R_PPC64_RELATIVE)
679 {
680 *reloc_addr = map->l_addr + reloc->r_addend;
681 return;
682 }
683
684 if (__glibc_unlikely (r_type == R_PPC64_NONE))
685 return;
686
687 /* We need SYM_MAP even in the absence of TLS, for elf_machine_fixup_plt
688 and STT_GNU_IFUNC. */
689 struct link_map *sym_map = RESOLVE_MAP (map, scope, &sym, version, r_type);
690 Elf64_Addr value = SYMBOL_ADDRESS (sym_map, sym, true) + reloc->r_addend;
691
692 if (sym != NULL
693 && __builtin_expect (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC, 0)
694 && __builtin_expect (sym->st_shndx != SHN_UNDEF, 1)
695 && __builtin_expect (!skip_ifunc, 1))
696 value = resolve_ifunc (value, map, sym_map);
697
698 /* For relocs that don't edit code, return.
699 For relocs that might edit instructions, break from the switch. */
700 switch (r_type)
701 {
702 case R_PPC64_ADDR64:
703 case R_PPC64_GLOB_DAT:
704 *reloc_addr = value;
705 return;
706
707 case R_PPC64_IRELATIVE:
708 if (__glibc_likely (!skip_ifunc))
709 value = resolve_ifunc (value, map, sym_map);
710 *reloc_addr = value;
711 return;
712
713 case R_PPC64_JMP_IREL:
714 if (__glibc_likely (!skip_ifunc))
715 value = resolve_ifunc (value, map, sym_map);
716 /* Fall thru */
717 case R_PPC64_JMP_SLOT:
718 elf_machine_fixup_plt (map, sym_map, refsym, sym,
719 reloc, reloc_addr, value);
720 return;
721
722 case R_PPC64_DTPMOD64:
723 if (map->l_info[DT_PPC64(OPT)]
724 && (map->l_info[DT_PPC64(OPT)]->d_un.d_val & PPC64_OPT_TLS))
725 {
726 #ifdef RTLD_BOOTSTRAP
727 reloc_addr[0] = 0;
728 reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
729 + TLS_DTV_OFFSET);
730 return;
731 #else
732 if (sym_map != NULL)
733 {
734 # ifndef SHARED
735 CHECK_STATIC_TLS (map, sym_map);
736 # else
737 if (TRY_STATIC_TLS (map, sym_map))
738 # endif
739 {
740 reloc_addr[0] = 0;
741 /* Set up for local dynamic. */
742 reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
743 + TLS_DTV_OFFSET);
744 return;
745 }
746 }
747 #endif
748 }
749 #ifdef RTLD_BOOTSTRAP
750 /* During startup the dynamic linker is always index 1. */
751 *reloc_addr = 1;
752 #else
753 /* Get the information from the link map returned by the
754 resolve function. */
755 if (sym_map != NULL)
756 *reloc_addr = sym_map->l_tls_modid;
757 #endif
758 return;
759
760 case R_PPC64_DTPREL64:
761 if (map->l_info[DT_PPC64(OPT)]
762 && (map->l_info[DT_PPC64(OPT)]->d_un.d_val & PPC64_OPT_TLS))
763 {
764 #ifdef RTLD_BOOTSTRAP
765 *reloc_addr = TLS_TPREL_VALUE (sym_map, sym, reloc);
766 return;
767 #else
768 if (sym_map != NULL)
769 {
770 /* This reloc is always preceded by R_PPC64_DTPMOD64. */
771 # ifndef SHARED
772 assert (HAVE_STATIC_TLS (map, sym_map));
773 # else
774 if (HAVE_STATIC_TLS (map, sym_map))
775 # endif
776 {
777 *reloc_addr = TLS_TPREL_VALUE (sym_map, sym, reloc);
778 return;
779 }
780 }
781 #endif
782 }
783 /* During relocation all TLS symbols are defined and used.
784 Therefore the offset is already correct. */
785 #ifndef RTLD_BOOTSTRAP
786 if (sym_map != NULL)
787 *reloc_addr = TLS_DTPREL_VALUE (sym, reloc);
788 #endif
789 return;
790
791 case R_PPC64_TPREL64:
792 *reloc_addr = elf_machine_tprel (map, sym_map, sym, reloc);
793 return;
794
795 case R_PPC64_TPREL16_LO_DS:
796 value = elf_machine_tprel (map, sym_map, sym, reloc);
797 if (dont_expect ((value & 3) != 0))
798 _dl_reloc_overflow (map, "R_PPC64_TPREL16_LO_DS", reloc_addr, refsym);
799 BIT_INSERT (*(Elf64_Half *) reloc_addr, value, 0xfffc);
800 break;
801
802 case R_PPC64_TPREL16_DS:
803 value = elf_machine_tprel (map, sym_map, sym, reloc);
804 if (dont_expect ((value + 0x8000) >= 0x10000 || (value & 3) != 0))
805 _dl_reloc_overflow (map, "R_PPC64_TPREL16_DS", reloc_addr, refsym);
806 BIT_INSERT (*(Elf64_Half *) reloc_addr, value, 0xfffc);
807 break;
808
809 case R_PPC64_TPREL16:
810 value = elf_machine_tprel (map, sym_map, sym, reloc);
811 if (dont_expect ((value + 0x8000) >= 0x10000))
812 _dl_reloc_overflow (map, "R_PPC64_TPREL16", reloc_addr, refsym);
813 *(Elf64_Half *) reloc_addr = PPC_LO (value);
814 break;
815
816 case R_PPC64_TPREL16_LO:
817 value = elf_machine_tprel (map, sym_map, sym, reloc);
818 *(Elf64_Half *) reloc_addr = PPC_LO (value);
819 break;
820
821 case R_PPC64_TPREL16_HI:
822 value = elf_machine_tprel (map, sym_map, sym, reloc);
823 if (dont_expect (value + 0x80000000 >= 0x100000000LL))
824 _dl_reloc_overflow (map, "R_PPC64_TPREL16_HI", reloc_addr, refsym);
825 *(Elf64_Half *) reloc_addr = PPC_HI (value);
826 break;
827
828 case R_PPC64_TPREL16_HIGH:
829 value = elf_machine_tprel (map, sym_map, sym, reloc);
830 *(Elf64_Half *) reloc_addr = PPC_HI (value);
831 break;
832
833 case R_PPC64_TPREL16_HA:
834 value = elf_machine_tprel (map, sym_map, sym, reloc);
835 if (dont_expect (value + 0x80008000 >= 0x100000000LL))
836 _dl_reloc_overflow (map, "R_PPC64_TPREL16_HA", reloc_addr, refsym);
837 *(Elf64_Half *) reloc_addr = PPC_HA (value);
838 break;
839
840 case R_PPC64_TPREL16_HIGHA:
841 value = elf_machine_tprel (map, sym_map, sym, reloc);
842 *(Elf64_Half *) reloc_addr = PPC_HA (value);
843 break;
844
845 case R_PPC64_TPREL16_HIGHER:
846 value = elf_machine_tprel (map, sym_map, sym, reloc);
847 *(Elf64_Half *) reloc_addr = PPC_HIGHER (value);
848 break;
849
850 case R_PPC64_TPREL16_HIGHEST:
851 value = elf_machine_tprel (map, sym_map, sym, reloc);
852 *(Elf64_Half *) reloc_addr = PPC_HIGHEST (value);
853 break;
854
855 case R_PPC64_TPREL16_HIGHERA:
856 value = elf_machine_tprel (map, sym_map, sym, reloc);
857 *(Elf64_Half *) reloc_addr = PPC_HIGHERA (value);
858 break;
859
860 case R_PPC64_TPREL16_HIGHESTA:
861 value = elf_machine_tprel (map, sym_map, sym, reloc);
862 *(Elf64_Half *) reloc_addr = PPC_HIGHESTA (value);
863 break;
864
865 #ifndef RTLD_BOOTSTRAP /* None of the following appear in ld.so */
866 case R_PPC64_ADDR16_LO_DS:
867 if (dont_expect ((value & 3) != 0))
868 _dl_reloc_overflow (map, "R_PPC64_ADDR16_LO_DS", reloc_addr, refsym);
869 BIT_INSERT (*(Elf64_Half *) reloc_addr, value, 0xfffc);
870 break;
871
872 case R_PPC64_ADDR16_LO:
873 *(Elf64_Half *) reloc_addr = PPC_LO (value);
874 break;
875
876 case R_PPC64_ADDR16_HI:
877 if (dont_expect (value + 0x80000000 >= 0x100000000LL))
878 _dl_reloc_overflow (map, "R_PPC64_ADDR16_HI", reloc_addr, refsym);
879 /* Fall through. */
880 case R_PPC64_ADDR16_HIGH:
881 *(Elf64_Half *) reloc_addr = PPC_HI (value);
882 break;
883
884 case R_PPC64_ADDR16_HA:
885 if (dont_expect (value + 0x80008000 >= 0x100000000LL))
886 _dl_reloc_overflow (map, "R_PPC64_ADDR16_HA", reloc_addr, refsym);
887 /* Fall through. */
888 case R_PPC64_ADDR16_HIGHA:
889 *(Elf64_Half *) reloc_addr = PPC_HA (value);
890 break;
891
892 case R_PPC64_ADDR30:
893 {
894 Elf64_Addr delta = value - (Elf64_Xword) reloc_addr;
895 if (dont_expect ((delta + 0x80000000) >= 0x100000000LL
896 || (delta & 3) != 0))
897 _dl_reloc_overflow (map, "R_PPC64_ADDR30", reloc_addr, refsym);
898 BIT_INSERT (*(Elf64_Word *) reloc_addr, delta, 0xfffffffc);
899 }
900 break;
901
902 case R_PPC64_COPY:
903 if (dont_expect (sym == NULL))
904 /* This can happen in trace mode when an object could not be found. */
905 return;
906 if (dont_expect (sym->st_size > refsym->st_size
907 || (GLRO(dl_verbose)
908 && sym->st_size < refsym->st_size)))
909 {
910 const char *strtab;
911
912 strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
913 _dl_error_printf ("%s: Symbol `%s' has different size" \
914 " in shared object," \
915 " consider re-linking\n",
916 RTLD_PROGNAME, strtab + refsym->st_name);
917 }
918 memcpy (reloc_addr_arg, (char *) value,
919 MIN (sym->st_size, refsym->st_size));
920 return;
921
922 case R_PPC64_UADDR64:
923 ((union unaligned *) reloc_addr)->u8 = value;
924 return;
925
926 case R_PPC64_UADDR32:
927 ((union unaligned *) reloc_addr)->u4 = value;
928 return;
929
930 case R_PPC64_ADDR32:
931 if (dont_expect ((value + 0x80000000) >= 0x100000000LL))
932 _dl_reloc_overflow (map, "R_PPC64_ADDR32", reloc_addr, refsym);
933 *(Elf64_Word *) reloc_addr = value;
934 return;
935
936 case R_PPC64_ADDR24:
937 if (dont_expect ((value + 0x2000000) >= 0x4000000 || (value & 3) != 0))
938 _dl_reloc_overflow (map, "R_PPC64_ADDR24", reloc_addr, refsym);
939 BIT_INSERT (*(Elf64_Word *) reloc_addr, value, 0x3fffffc);
940 break;
941
942 case R_PPC64_ADDR16:
943 if (dont_expect ((value + 0x8000) >= 0x10000))
944 _dl_reloc_overflow (map, "R_PPC64_ADDR16", reloc_addr, refsym);
945 *(Elf64_Half *) reloc_addr = value;
946 break;
947
948 case R_PPC64_UADDR16:
949 if (dont_expect ((value + 0x8000) >= 0x10000))
950 _dl_reloc_overflow (map, "R_PPC64_UADDR16", reloc_addr, refsym);
951 ((union unaligned *) reloc_addr)->u2 = value;
952 return;
953
954 case R_PPC64_ADDR16_DS:
955 if (dont_expect ((value + 0x8000) >= 0x10000 || (value & 3) != 0))
956 _dl_reloc_overflow (map, "R_PPC64_ADDR16_DS", reloc_addr, refsym);
957 BIT_INSERT (*(Elf64_Half *) reloc_addr, value, 0xfffc);
958 break;
959
960 case R_PPC64_ADDR16_HIGHER:
961 *(Elf64_Half *) reloc_addr = PPC_HIGHER (value);
962 break;
963
964 case R_PPC64_ADDR16_HIGHEST:
965 *(Elf64_Half *) reloc_addr = PPC_HIGHEST (value);
966 break;
967
968 case R_PPC64_ADDR16_HIGHERA:
969 *(Elf64_Half *) reloc_addr = PPC_HIGHERA (value);
970 break;
971
972 case R_PPC64_ADDR16_HIGHESTA:
973 *(Elf64_Half *) reloc_addr = PPC_HIGHESTA (value);
974 break;
975
976 case R_PPC64_ADDR14:
977 case R_PPC64_ADDR14_BRTAKEN:
978 case R_PPC64_ADDR14_BRNTAKEN:
979 {
980 if (dont_expect ((value + 0x8000) >= 0x10000 || (value & 3) != 0))
981 _dl_reloc_overflow (map, "R_PPC64_ADDR14", reloc_addr, refsym);
982 Elf64_Word insn = *(Elf64_Word *) reloc_addr;
983 BIT_INSERT (insn, value, 0xfffc);
984 if (r_type != R_PPC64_ADDR14)
985 {
986 insn &= ~(1 << 21);
987 if (r_type == R_PPC64_ADDR14_BRTAKEN)
988 insn |= 1 << 21;
989 if ((insn & (0x14 << 21)) == (0x04 << 21))
990 insn |= 0x02 << 21;
991 else if ((insn & (0x14 << 21)) == (0x10 << 21))
992 insn |= 0x08 << 21;
993 }
994 *(Elf64_Word *) reloc_addr = insn;
995 }
996 break;
997
998 case R_PPC64_REL32:
999 *(Elf64_Word *) reloc_addr = value - (Elf64_Addr) reloc_addr;
1000 return;
1001
1002 case R_PPC64_REL64:
1003 *reloc_addr = value - (Elf64_Addr) reloc_addr;
1004 return;
1005 #endif /* !RTLD_BOOTSTRAP */
1006
1007 default:
1008 _dl_reloc_bad_type (map, r_type, 0);
1009 return;
1010 }
1011 MODIFIED_CODE_NOQUEUE (reloc_addr);
1012 }
1013
1014 static inline void __attribute__ ((always_inline))
1015 elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
1016 Elf64_Addr l_addr, const Elf64_Rela *reloc,
1017 int skip_ifunc)
1018 {
1019 /* elf_machine_runtime_setup handles this. */
1020 }
1021
1022
1023 #endif /* RESOLVE */