initial commit
[glibc.git] / sysdeps / alpha / dl-machine.h
1 /* Machine-dependent ELF dynamic relocation inline functions. Alpha version.
2 Copyright (C) 1996-2022 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library. If not, see
17 <https://www.gnu.org/licenses/>. */
18
19 /* This was written in the absence of an ABI -- don't expect
20 it to remain unchanged. */
21
22 #ifndef dl_machine_h
23 #define dl_machine_h 1
24
25 #define ELF_MACHINE_NAME "alpha"
26
27 #include <string.h>
28 #include <dl-static-tls.h>
29 #include <dl-machine-rel.h>
30
31
32 /* Mask identifying addresses reserved for the user program,
33 where the dynamic linker should not map anything. */
34 #define ELF_MACHINE_USER_ADDRESS_MASK 0x120000000UL
35
36 /* Translate a processor specific dynamic tag to the index in l_info array. */
37 #define DT_ALPHA(x) (DT_ALPHA_##x - DT_LOPROC + DT_NUM)
38
39 /* Return nonzero iff ELF header is compatible with the running host. */
40 static inline int
41 elf_machine_matches_host (const Elf64_Ehdr *ehdr)
42 {
43 return ehdr->e_machine == EM_ALPHA;
44 }
45
46 /* Return the link-time address of _DYNAMIC. The multiple-got-capable
47 linker no longer allocates the first .got entry for this. But not to
48 worry, no special tricks are needed. */
49 static inline Elf64_Addr
50 elf_machine_dynamic (void)
51 {
52 #ifndef NO_AXP_MULTI_GOT_LD
53 return (Elf64_Addr) &_DYNAMIC;
54 #else
55 register Elf64_Addr *gp __asm__ ("$29");
56 return gp[-4096];
57 #endif
58 }
59
60 /* Return the run-time load address of the shared object. */
61
62 static inline Elf64_Addr
63 elf_machine_load_address (void)
64 {
65 /* This relies on the compiler using gp-relative addresses for static symbols. */
66 static void *dot = &dot;
67 return (void *)&dot - dot;
68 }
69
70 /* Set up the loaded object described by L so its unrelocated PLT
71 entries will jump to the on-demand fixup code in dl-runtime.c. */
72
73 static inline int
74 elf_machine_runtime_setup (struct link_map *map, struct r_scope_elem *scope[],
75 int lazy, int profile)
76 {
77 extern char _dl_runtime_resolve_new[] attribute_hidden;
78 extern char _dl_runtime_profile_new[] attribute_hidden;
79 extern char _dl_runtime_resolve_old[] attribute_hidden;
80 extern char _dl_runtime_profile_old[] attribute_hidden;
81
82 struct pltgot {
83 char *resolve;
84 struct link_map *link;
85 };
86
87 struct pltgot *pg;
88 long secureplt;
89 char *resolve;
90
91 if (map->l_info[DT_JMPREL] == 0 || !lazy)
92 return lazy;
93
94 /* Check to see if we're using the read-only plt form. */
95 secureplt = map->l_info[DT_ALPHA(PLTRO)] != 0;
96
97 /* If the binary uses the read-only secure plt format, PG points to
98 the .got.plt section, which is the right place for ld.so to place
99 its hooks. Otherwise, PG is currently pointing at the start of
100 the plt; the hooks go at offset 16. */
101 pg = (struct pltgot *) D_PTR (map, l_info[DT_PLTGOT]);
102 pg += !secureplt;
103
104 /* This function will be called to perform the relocation. They're
105 not declared as functions to convince the compiler to use gp
106 relative relocations for them. */
107 if (secureplt)
108 resolve = _dl_runtime_resolve_new;
109 else
110 resolve = _dl_runtime_resolve_old;
111
112 if (__builtin_expect (profile, 0))
113 {
114 if (secureplt)
115 resolve = _dl_runtime_profile_new;
116 else
117 resolve = _dl_runtime_profile_old;
118
119 if (GLRO(dl_profile) && _dl_name_match_p (GLRO(dl_profile), map))
120 {
121 /* This is the object we are looking for. Say that we really
122 want profiling and the timers are started. */
123 GL(dl_profile_map) = map;
124 }
125 }
126
127 pg->resolve = resolve;
128 pg->link = map;
129
130 return lazy;
131 }
132
133 /* Initial entry point code for the dynamic linker.
134 The C function `_dl_start' is the real entry point;
135 its return value is the user program's entry point. */
136
137 #define RTLD_START asm ("\
138 .section .text \n\
139 .set at \n\
140 .globl _start \n\
141 .ent _start \n\
142 _start: \n\
143 .frame $31,0,$31,0 \n\
144 br $gp, 0f \n\
145 0: ldgp $gp, 0($gp) \n\
146 .prologue 0 \n\
147 /* Pass pointer to argument block to _dl_start. */ \n\
148 mov $sp, $16 \n\
149 bsr $26, _dl_start !samegp \n\
150 .end _start \n\
151 /* FALLTHRU */ \n\
152 .globl _dl_start_user \n\
153 .ent _dl_start_user \n\
154 _dl_start_user: \n\
155 .frame $31,0,$31,0 \n\
156 .prologue 0 \n\
157 /* Save the user entry point address in s0. */ \n\
158 mov $0, $9 \n\
159 /* The special initializer gets called with the stack \n\
160 just as the application's entry point will see it; \n\
161 it can switch stacks if it moves these contents \n\
162 over. */ \n\
163 " RTLD_START_SPECIAL_INIT " \n\
164 /* Call _dl_init(_dl_loaded, argc, argv, envp) to run \n\
165 initializers. */ \n\
166 ldah $16, _rtld_local($gp) !gprelhigh \n\
167 ldq $16, _rtld_local($16) !gprellow \n\
168 ldq $17, 0($sp) \n\
169 lda $18, 8($sp) \n\
170 s8addq $17, 8, $19 \n\
171 addq $19, $18, $19 \n\
172 bsr $26, _dl_init !samegp \n\
173 /* Pass our finalizer function to the user in $0. */ \n\
174 ldah $0, _dl_fini($gp) !gprelhigh \n\
175 lda $0, _dl_fini($0) !gprellow \n\
176 /* Jump to the user's entry point. */ \n\
177 mov $9, $27 \n\
178 jmp ($9) \n\
179 .end _dl_start_user \n\
180 .set noat \n\
181 .previous");
182
183 #ifndef RTLD_START_SPECIAL_INIT
184 #define RTLD_START_SPECIAL_INIT /* nothing */
185 #endif
186
187 /* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry
188 or TLS variables, so undefined references should not be allowed
189 to define the value.
190
191 ELF_RTYPE_CLASS_COPY iff TYPE should not be allowed to resolve
192 to one of the main executable's symbols, as for a COPY reloc.
193 This is unused on Alpha. */
194
195 # define elf_machine_type_class(type) \
196 (((type) == R_ALPHA_JMP_SLOT \
197 || (type) == R_ALPHA_DTPMOD64 \
198 || (type) == R_ALPHA_DTPREL64 \
199 || (type) == R_ALPHA_TPREL64) * ELF_RTYPE_CLASS_PLT)
200
201 /* A reloc type used for ld.so cmdline arg lookups to reject PLT entries. */
202 #define ELF_MACHINE_JMP_SLOT R_ALPHA_JMP_SLOT
203
204 /* We define an initialization functions. This is called very early in
205 * _dl_sysdep_start. */
206 #define DL_PLATFORM_INIT dl_platform_init ()
207
208 static inline void __attribute__ ((unused))
209 dl_platform_init (void)
210 {
211 if (GLRO(dl_platform) != NULL && *GLRO(dl_platform) == '\0')
212 /* Avoid an empty string which would disturb us. */
213 GLRO(dl_platform) = NULL;
214 }
215
216 /* Fix up the instructions of a PLT entry to invoke the function
217 rather than the dynamic linker. */
218 static inline Elf64_Addr
219 elf_machine_fixup_plt (struct link_map *map, lookup_t t,
220 const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
221 const Elf64_Rela *reloc,
222 Elf64_Addr *got_addr, Elf64_Addr value)
223 {
224 const Elf64_Rela *rela_plt;
225 Elf64_Word *plte;
226 long int edisp;
227
228 /* Store the value we are going to load. */
229 *got_addr = value;
230
231 /* If this binary uses the read-only secure plt format, we're done. */
232 if (map->l_info[DT_ALPHA(PLTRO)])
233 return value;
234
235 /* Otherwise we have to modify the plt entry in place to do the branch. */
236
237 /* Recover the PLT entry address by calculating reloc's index into the
238 .rela.plt, and finding that entry in the .plt. */
239 rela_plt = (const Elf64_Rela *) D_PTR (map, l_info[DT_JMPREL]);
240 plte = (Elf64_Word *) (D_PTR (map, l_info[DT_PLTGOT]) + 32);
241 plte += 3 * (reloc - rela_plt);
242
243 /* Find the displacement from the plt entry to the function. */
244 edisp = (long int) (value - (Elf64_Addr)&plte[3]) / 4;
245
246 if (edisp >= -0x100000 && edisp < 0x100000)
247 {
248 /* If we are in range, use br to perfect branch prediction and
249 elide the dependency on the address load. This case happens,
250 e.g., when a shared library call is resolved to the same library. */
251
252 int hi, lo;
253 hi = value - (Elf64_Addr)&plte[0];
254 lo = (short int) hi;
255 hi = (hi - lo) >> 16;
256
257 /* Emit "lda $27,lo($27)" */
258 plte[1] = 0x237b0000 | (lo & 0xffff);
259
260 /* Emit "br $31,function" */
261 plte[2] = 0xc3e00000 | (edisp & 0x1fffff);
262
263 /* Think about thread-safety -- the previous instructions must be
264 committed to memory before the first is overwritten. */
265 __asm__ __volatile__("wmb" : : : "memory");
266
267 /* Emit "ldah $27,hi($27)" */
268 plte[0] = 0x277b0000 | (hi & 0xffff);
269 }
270 else
271 {
272 /* Don't bother with the hint since we already know the hint is
273 wrong. Eliding it prevents the wrong page from getting pulled
274 into the cache. */
275
276 int hi, lo;
277 hi = (Elf64_Addr)got_addr - (Elf64_Addr)&plte[0];
278 lo = (short)hi;
279 hi = (hi - lo) >> 16;
280
281 /* Emit "ldq $27,lo($27)" */
282 plte[1] = 0xa77b0000 | (lo & 0xffff);
283
284 /* Emit "jmp $31,($27)" */
285 plte[2] = 0x6bfb0000;
286
287 /* Think about thread-safety -- the previous instructions must be
288 committed to memory before the first is overwritten. */
289 __asm__ __volatile__("wmb" : : : "memory");
290
291 /* Emit "ldah $27,hi($27)" */
292 plte[0] = 0x277b0000 | (hi & 0xffff);
293 }
294
295 /* At this point, if we've been doing runtime resolution, Icache is dirty.
296 This will be taken care of in _dl_runtime_resolve. If instead we are
297 doing this as part of non-lazy startup relocation, that bit of code
298 hasn't made it into Icache yet, so there's nothing to clean up. */
299
300 return value;
301 }
302
303 /* Return the final value of a plt relocation. */
304 static inline Elf64_Addr
305 elf_machine_plt_value (struct link_map *map, const Elf64_Rela *reloc,
306 Elf64_Addr value)
307 {
308 return value + reloc->r_addend;
309 }
310
311 /* Names of the architecture-specific auditing callback functions. */
312 #define ARCH_LA_PLTENTER alpha_gnu_pltenter
313 #define ARCH_LA_PLTEXIT alpha_gnu_pltexit
314
315 #endif /* !dl_machine_h */
316
317 #ifdef RESOLVE_MAP
318
319 /* Perform the relocation specified by RELOC and SYM (which is fully resolved).
320 MAP is the object containing the reloc. */
321 static inline void
322 __attribute__ ((always_inline))
323 elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
324 const Elf64_Rela *reloc,
325 const Elf64_Sym *sym,
326 const struct r_found_version *version,
327 void *const reloc_addr_arg,
328 int skip_ifunc)
329 {
330 Elf64_Addr *const reloc_addr = reloc_addr_arg;
331 unsigned long int const r_type = ELF64_R_TYPE (reloc->r_info);
332
333 /* We cannot use a switch here because we cannot locate the switch
334 jump table until we've self-relocated. */
335
336 #if !defined RTLD_BOOTSTRAP
337 if (__builtin_expect (r_type == R_ALPHA_RELATIVE, 0))
338 {
339 /* XXX Make some timings. Maybe it's preferable to test for
340 unaligned access and only do it the complex way if necessary. */
341 Elf64_Addr reloc_addr_val;
342
343 /* Load value without causing unaligned trap. */
344 memcpy (&reloc_addr_val, reloc_addr_arg, 8);
345 reloc_addr_val += map->l_addr;
346
347 /* Store value without causing unaligned trap. */
348 memcpy (reloc_addr_arg, &reloc_addr_val, 8);
349 }
350 else
351 #endif
352 if (__builtin_expect (r_type == R_ALPHA_NONE, 0))
353 return;
354 else
355 {
356 struct link_map *sym_map = RESOLVE_MAP (map, scope, &sym, version,
357 r_type);
358 Elf64_Addr sym_value;
359 Elf64_Addr sym_raw_value;
360
361 sym_raw_value = sym_value = reloc->r_addend;
362 if (sym_map)
363 {
364 sym_raw_value += sym->st_value;
365 sym_value += SYMBOL_ADDRESS (sym_map, sym, true);
366 }
367
368 if (r_type == R_ALPHA_GLOB_DAT)
369 *reloc_addr = sym_value;
370 else if (r_type == R_ALPHA_JMP_SLOT)
371 elf_machine_fixup_plt (map, 0, 0, 0, reloc, reloc_addr, sym_value);
372 #ifndef RTLD_BOOTSTRAP
373 else if (r_type == R_ALPHA_REFQUAD)
374 {
375 /* Store value without causing unaligned trap. */
376 memcpy (reloc_addr_arg, &sym_value, 8);
377 }
378 #endif
379 else if (r_type == R_ALPHA_DTPMOD64)
380 {
381 # ifdef RTLD_BOOTSTRAP
382 /* During startup the dynamic linker is always index 1. */
383 *reloc_addr = 1;
384 # else
385 /* Get the information from the link map returned by the
386 resolv function. */
387 if (sym_map != NULL)
388 *reloc_addr = sym_map->l_tls_modid;
389 # endif
390 }
391 else if (r_type == R_ALPHA_DTPREL64)
392 {
393 # ifndef RTLD_BOOTSTRAP
394 /* During relocation all TLS symbols are defined and used.
395 Therefore the offset is already correct. */
396 *reloc_addr = sym_raw_value;
397 # endif
398 }
399 else if (r_type == R_ALPHA_TPREL64)
400 {
401 # ifdef RTLD_BOOTSTRAP
402 *reloc_addr = sym_raw_value + map->l_tls_offset;
403 # else
404 if (sym_map)
405 {
406 CHECK_STATIC_TLS (map, sym_map);
407 *reloc_addr = sym_raw_value + sym_map->l_tls_offset;
408 }
409 # endif
410 }
411 else
412 _dl_reloc_bad_type (map, r_type, 0);
413 }
414 }
415
416 /* Let do-rel.h know that on Alpha if l_addr is 0, all RELATIVE relocs
417 can be skipped. */
418 #define ELF_MACHINE_REL_RELATIVE 1
419
420 static inline void
421 __attribute__ ((always_inline))
422 elf_machine_rela_relative (Elf64_Addr l_addr, const Elf64_Rela *reloc,
423 void *const reloc_addr_arg)
424 {
425 /* XXX Make some timings. Maybe it's preferable to test for
426 unaligned access and only do it the complex way if necessary. */
427 Elf64_Addr reloc_addr_val;
428
429 /* Load value without causing unaligned trap. */
430 memcpy (&reloc_addr_val, reloc_addr_arg, 8);
431 reloc_addr_val += l_addr;
432
433 /* Store value without causing unaligned trap. */
434 memcpy (reloc_addr_arg, &reloc_addr_val, 8);
435 }
436
437 static inline void
438 __attribute__ ((always_inline))
439 elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
440 Elf64_Addr l_addr, const Elf64_Rela *reloc,
441 int skip_ifunc)
442 {
443 Elf64_Addr * const reloc_addr = (void *)(l_addr + reloc->r_offset);
444 unsigned long int const r_type = ELF64_R_TYPE (reloc->r_info);
445
446 if (r_type == R_ALPHA_JMP_SLOT)
447 {
448 /* Perform a RELATIVE reloc on the .got entry that transfers
449 to the .plt. */
450 *reloc_addr += l_addr;
451 }
452 else if (r_type == R_ALPHA_NONE)
453 return;
454 else
455 _dl_reloc_bad_type (map, r_type, 1);
456 }
457
458 #endif /* RESOLVE_MAP */