universally apply our cflags (no vsx, no altivec..)
[glibc.git] / fbtl / nptl-init.c
1 /* Copyright (C) 2002-2013 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19 #include <assert.h>
20 #include <errno.h>
21 #include <limits.h>
22 #include <signal.h>
23 #include <stdlib.h>
24 #include <unistd.h>
25 #include <sys/param.h>
26 #include <sys/resource.h>
27 #include <pthreadP.h>
28 #include <atomic.h>
29 #include <ldsodefs.h>
30 #include <tls.h>
31 #include <list.h>
32 #include <fork.h>
33 #include <version.h>
34 #include <shlib-compat.h>
35 #include <smp.h>
36 #include <lowlevellock.h>
37 #include <kernel-features.h>
38 #include <libc-internal.h>
39
40 /* Size and alignment of static TLS block. */
41 size_t __static_tls_size;
42 size_t __static_tls_align_m1;
43
44 #warning TODO whole file
45 #if 1
46 # define set_robust_list_not_avail() do { } while (0)
47 #else
48 #ifndef __ASSUME_SET_ROBUST_LIST
49 /* Negative if we do not have the system call and we can use it. */
50 int __set_robust_list_avail;
51 # define set_robust_list_not_avail() \
52 __set_robust_list_avail = -1
53 #else
54 # define set_robust_list_not_avail() do { } while (0)
55 #endif
56
57 #ifndef __ASSUME_FUTEX_CLOCK_REALTIME
58 /* Nonzero if we do not have FUTEX_CLOCK_REALTIME. */
59 int __have_futex_clock_realtime;
60 # define __set_futex_clock_realtime() \
61 __have_futex_clock_realtime = 1
62 #else
63 #define __set_futex_clock_realtime() do { } while (0)
64 #endif
65 #endif
66
67 /* Version of the library, used in libthread_db to detect mismatches. */
68 static const char nptl_version[] __attribute_used__ = VERSION;
69
70
71 #ifdef SHARED
72 static
73 #else
74 extern
75 #endif
76 void __nptl_set_robust (struct pthread *);
77
78 #ifdef SHARED
79 static void nptl_freeres (void);
80
81
82 static const struct pthread_functions pthread_functions =
83 {
84 .ptr_pthread_attr_destroy = __pthread_attr_destroy,
85 # if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
86 .ptr___pthread_attr_init_2_0 = __pthread_attr_init_2_0,
87 # endif
88 .ptr___pthread_attr_init_2_1 = __pthread_attr_init_2_1,
89 .ptr_pthread_attr_getdetachstate = __pthread_attr_getdetachstate,
90 .ptr_pthread_attr_setdetachstate = __pthread_attr_setdetachstate,
91 .ptr_pthread_attr_getinheritsched = __pthread_attr_getinheritsched,
92 .ptr_pthread_attr_setinheritsched = __pthread_attr_setinheritsched,
93 .ptr_pthread_attr_getschedparam = __pthread_attr_getschedparam,
94 .ptr_pthread_attr_setschedparam = __pthread_attr_setschedparam,
95 .ptr_pthread_attr_getschedpolicy = __pthread_attr_getschedpolicy,
96 .ptr_pthread_attr_setschedpolicy = __pthread_attr_setschedpolicy,
97 .ptr_pthread_attr_getscope = __pthread_attr_getscope,
98 .ptr_pthread_attr_setscope = __pthread_attr_setscope,
99 .ptr_pthread_condattr_destroy = __pthread_condattr_destroy,
100 .ptr_pthread_condattr_init = __pthread_condattr_init,
101 .ptr___pthread_cond_broadcast = __pthread_cond_broadcast,
102 .ptr___pthread_cond_destroy = __pthread_cond_destroy,
103 .ptr___pthread_cond_init = __pthread_cond_init,
104 .ptr___pthread_cond_signal = __pthread_cond_signal,
105 .ptr___pthread_cond_wait = __pthread_cond_wait,
106 .ptr___pthread_cond_timedwait = __pthread_cond_timedwait,
107 # if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_3_2)
108 .ptr___pthread_cond_broadcast_2_0 = __pthread_cond_broadcast_2_0,
109 .ptr___pthread_cond_destroy_2_0 = __pthread_cond_destroy_2_0,
110 .ptr___pthread_cond_init_2_0 = __pthread_cond_init_2_0,
111 .ptr___pthread_cond_signal_2_0 = __pthread_cond_signal_2_0,
112 .ptr___pthread_cond_wait_2_0 = __pthread_cond_wait_2_0,
113 .ptr___pthread_cond_timedwait_2_0 = __pthread_cond_timedwait_2_0,
114 # endif
115 .ptr_pthread_equal = __pthread_equal,
116 .ptr___pthread_exit = __pthread_exit,
117 .ptr_pthread_getschedparam = __pthread_getschedparam,
118 .ptr_pthread_setschedparam = __pthread_setschedparam,
119 .ptr_pthread_mutex_destroy = __pthread_mutex_destroy,
120 .ptr_pthread_mutex_init = __pthread_mutex_init,
121 .ptr_pthread_mutex_lock = __pthread_mutex_lock,
122 .ptr_pthread_mutex_unlock = __pthread_mutex_unlock,
123 .ptr_pthread_self = __pthread_self,
124 .ptr___pthread_setcancelstate = __pthread_setcancelstate,
125 .ptr_pthread_setcanceltype = __pthread_setcanceltype,
126 .ptr___pthread_cleanup_upto = __pthread_cleanup_upto,
127 .ptr___pthread_once = __pthread_once,
128 .ptr___pthread_rwlock_rdlock = __pthread_rwlock_rdlock,
129 .ptr___pthread_rwlock_wrlock = __pthread_rwlock_wrlock,
130 .ptr___pthread_rwlock_unlock = __pthread_rwlock_unlock,
131 .ptr___pthread_key_create = __pthread_key_create,
132 .ptr___pthread_getspecific = __pthread_getspecific,
133 .ptr___pthread_setspecific = __pthread_setspecific,
134 .ptr__pthread_cleanup_push_defer = __pthread_cleanup_push_defer,
135 .ptr__pthread_cleanup_pop_restore = __pthread_cleanup_pop_restore,
136 .ptr_nthreads = &__nptl_nthreads,
137 .ptr___pthread_unwind = &__pthread_unwind,
138 .ptr__nptl_deallocate_tsd = __nptl_deallocate_tsd,
139 .ptr__nptl_setxid = __nptl_setxid,
140 /* For now only the stack cache needs to be freed. */
141 .ptr_freeres = nptl_freeres,
142 .ptr_set_robust = __nptl_set_robust
143 };
144 # define ptr_pthread_functions &pthread_functions
145 #else
146 # define ptr_pthread_functions NULL
147 #endif
148
149
150 #ifdef SHARED
151 /* This function is called indirectly from the freeres code in libc. */
152 static void
153 __libc_freeres_fn_section
154 nptl_freeres (void)
155 {
156 __unwind_freeres ();
157 __free_stacks (0);
158 }
159
160
161 static
162 #endif
163 void
164 __nptl_set_robust (struct pthread *self)
165 {
166 #if 0
167 INTERNAL_SYSCALL_DECL (err);
168 INTERNAL_SYSCALL (set_robust_list, err, 2, &self->robust_head,
169 sizeof (struct robust_list_head));
170 #endif
171 }
172
173
174 /* For asynchronous cancellation we use a signal. This is the handler. */
175 static void
176 sigcancel_handler (int sig, siginfo_t *si, void *ctx)
177 {
178 /* Safety check. It would be possible to call this function for
179 other signals and send a signal from another process. This is not
180 correct and might even be a security problem. Try to catch as
181 many incorrect invocations as possible. */
182 if (sig != SIGCANCEL
183 || si->si_pid != __getpid()
184 #if 1
185 )
186 #else
187 || si->si_code != SI_TKILL)
188 #endif
189 return;
190
191 struct pthread *self = THREAD_SELF;
192
193 int oldval = THREAD_GETMEM (self, cancelhandling);
194 while (1)
195 {
196 /* We are canceled now. When canceled by another thread this flag
197 is already set but if the signal is directly send (internally or
198 from another process) is has to be done here. */
199 int newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;
200
201 if (oldval == newval || (oldval & EXITING_BITMASK) != 0)
202 /* Already canceled or exiting. */
203 break;
204
205 int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
206 oldval);
207 if (curval == oldval)
208 {
209 /* Set the return value. */
210 THREAD_SETMEM (self, result, PTHREAD_CANCELED);
211
212 /* Make sure asynchronous cancellation is still enabled. */
213 if ((newval & CANCELTYPE_BITMASK) != 0)
214 /* Run the registered destructors and terminate the thread. */
215 __do_cancel ();
216
217 break;
218 }
219
220 oldval = curval;
221 }
222 }
223
224
225 struct xid_command *__xidcmd attribute_hidden;
226
227 /* We use the SIGSETXID signal in the setuid, setgid, etc. implementations to
228 tell each thread to call the respective setxid syscall on itself. This is
229 the handler. */
230 static void
231 sighandler_setxid (int sig, siginfo_t *si, void *ctx)
232 {
233 #if 0
234 /* Safety check. It would be possible to call this function for
235 other signals and send a signal from another process. This is not
236 correct and might even be a security problem. Try to catch as
237 many incorrect invocations as possible. */
238 if (sig != SIGSETXID
239 || si->si_pid != pid
240 || si->si_code != SI_TKILL)
241 return;
242 INTERNAL_SYSCALL_DECL (err);
243 INTERNAL_SYSCALL_NCS (__xidcmd->syscall_no, err, 3, __xidcmd->id[0],
244 __xidcmd->id[1], __xidcmd->id[2]);
245 #endif
246 /* Reset the SETXID flag. */
247 struct pthread *self = THREAD_SELF;
248 int flags, newval;
249 do
250 {
251 flags = THREAD_GETMEM (self, cancelhandling);
252 newval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling,
253 flags & ~SETXID_BITMASK, flags);
254 }
255 while (flags != newval);
256
257 /* And release the futex. */
258 self->setxid_futex = 1;
259 lll_futex_wake (&self->setxid_futex, 1, LLL_PRIVATE);
260
261 if (atomic_decrement_val (&__xidcmd->cntr) == 0)
262 lll_futex_wake ((unsigned int *)&__xidcmd->cntr, 1, LLL_PRIVATE);
263 }
264
265
266 /* When using __thread for this, we do it in libc so as not
267 to give libpthread its own TLS segment just for this. */
268 extern void **__libc_dl_error_tsd (void) __attribute__ ((const));
269
270
271 /* This can be set by the debugger before initialization is complete. */
272 static bool __nptl_initial_report_events __attribute_used__;
273
274 void
275 __pthread_initialize_minimal_internal (void)
276 {
277 /* Minimal initialization of the thread descriptor. */
278 struct pthread *pd = THREAD_SELF;
279 #if 1
280 INLINE_SYSCALL(thr_self, 1, &(pd->ktid));
281 #else
282 INTERNAL_SYSCALL_DECL (err);
283 pd->tid = INTERNAL_SYSCALL (set_tid_address, err, 1, &pd->tid);
284 #endif
285 THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
286 THREAD_SETMEM (pd, user_stack, true);
287 if (LLL_LOCK_INITIALIZER != 0)
288 THREAD_SETMEM (pd, lock, LLL_LOCK_INITIALIZER);
289 #if HP_TIMING_AVAIL
290 THREAD_SETMEM (pd, cpuclock_offset, GL(dl_cpuclock_offset));
291 #endif
292
293 /* Initialize the robust mutex data. */
294 #ifdef __PTHREAD_MUTEX_HAVE_PREV
295 pd->robust_prev = &pd->robust_head;
296 #endif
297 pd->robust_head.list = &pd->robust_head;
298 #ifdef __NR_set_robust_list
299 pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
300 - offsetof (pthread_mutex_t,
301 __data.__list.__next));
302 int res = INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
303 sizeof (struct robust_list_head));
304 if (INTERNAL_SYSCALL_ERROR_P (res, err))
305 #endif
306 set_robust_list_not_avail ();
307
308 #if 0
309 #ifndef __ASSUME_PRIVATE_FUTEX
310 /* Private futexes are always used (at least internally) so that
311 doing the test once this early is beneficial. */
312 {
313 int word = 0;
314 word = INTERNAL_SYSCALL (futex, err, 3, &word,
315 FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1);
316 if (!INTERNAL_SYSCALL_ERROR_P (word, err))
317 THREAD_SETMEM (pd, header.private_futex, FUTEX_PRIVATE_FLAG);
318 }
319
320 /* Private futexes have been introduced earlier than the
321 FUTEX_CLOCK_REALTIME flag. We don't have to run the test if we
322 know the former are not supported. This also means we know the
323 kernel will return ENOSYS for unknown operations. */
324 if (THREAD_GETMEM (pd, header.private_futex) != 0)
325 #endif
326 #ifndef __ASSUME_FUTEX_CLOCK_REALTIME
327 {
328 int word = 0;
329 /* NB: the syscall actually takes six parameters. The last is the
330 bit mask. But since we will not actually wait at all the value
331 is irrelevant. Given that passing six parameters is difficult
332 on some architectures we just pass whatever random value the
333 calling convention calls for to the kernel. It causes no harm. */
334 word = INTERNAL_SYSCALL (futex, err, 5, &word,
335 FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME
336 | FUTEX_PRIVATE_FLAG, 1, NULL, 0);
337 assert (INTERNAL_SYSCALL_ERROR_P (word, err));
338 if (INTERNAL_SYSCALL_ERRNO (word, err) != ENOSYS)
339 __set_futex_clock_realtime ();
340 }
341 #endif
342 #endif
343 /* Set initial thread's stack block from 0 up to __libc_stack_end.
344 It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
345 purposes this is good enough. */
346 THREAD_SETMEM (pd, stackblock_size, (size_t) __libc_stack_end);
347
348 /* Initialize the list of all running threads with the main thread. */
349 INIT_LIST_HEAD (&__stack_user);
350 list_add (&pd->list, &__stack_user);
351
352 /* Before initializing __stack_user, the debugger could not find us and
353 had to set __nptl_initial_report_events. Propagate its setting. */
354 THREAD_SETMEM (pd, report_events, __nptl_initial_report_events);
355
356 /* Install the cancellation signal handler. If for some reason we
357 cannot install the handler we do not abort. Maybe we should, but
358 it is only asynchronous cancellation which is affected. */
359 struct sigaction sa;
360 sa.sa_sigaction = sigcancel_handler;
361 sa.sa_flags = SA_SIGINFO;
362 __sigemptyset (&sa.sa_mask);
363
364 (void) __libc_sigaction (SIGCANCEL, &sa, NULL);
365
366 /* Install the handle to change the threads' uid/gid. */
367 sa.sa_sigaction = sighandler_setxid;
368 sa.sa_flags = SA_SIGINFO | SA_RESTART;
369
370 (void) __libc_sigaction (SIGSETXID, &sa, NULL);
371
372 /* The parent process might have left the signals blocked. Just in
373 case, unblock it. We reuse the signal mask in the sigaction
374 structure. It is already cleared. */
375 __sigaddset (&sa.sa_mask, SIGCANCEL);
376 __sigaddset (&sa.sa_mask, SIGSETXID);
377 INLINE_SYSCALL (sigprocmask, 3, SIG_UNBLOCK, &sa.sa_mask, NULL);
378
379 /* Get the size of the static and alignment requirements for the TLS
380 block. */
381 size_t static_tls_align;
382 _dl_get_tls_static_info (&__static_tls_size, &static_tls_align);
383
384 /* Make sure the size takes all the alignments into account. */
385 if (STACK_ALIGN > static_tls_align)
386 static_tls_align = STACK_ALIGN;
387 __static_tls_align_m1 = static_tls_align - 1;
388
389 __static_tls_size = roundup (__static_tls_size, static_tls_align);
390
391 /* Determine the default allowed stack size. This is the size used
392 in case the user does not specify one. */
393 struct rlimit limit;
394 if (__getrlimit (RLIMIT_STACK, &limit) != 0
395 || limit.rlim_cur == RLIM_INFINITY)
396 /* The system limit is not usable. Use an architecture-specific
397 default. */
398 limit.rlim_cur = ARCH_STACK_DEFAULT_SIZE;
399 else if (limit.rlim_cur < PTHREAD_STACK_MIN)
400 /* The system limit is unusably small.
401 Use the minimal size acceptable. */
402 limit.rlim_cur = PTHREAD_STACK_MIN;
403 else if (limit.rlim_cur > (4 * ARCH_STACK_DEFAULT_SIZE))
404 /* The system limit is unusably high.
405 Use the maximal size acceptable. */
406 limit.rlim_cur = (4 * ARCH_STACK_DEFAULT_SIZE);
407
408 /* Make sure it meets the minimum size that allocate_stack
409 (allocatestack.c) will demand, which depends on the page size. */
410 const uintptr_t pagesz = GLRO(dl_pagesize);
411 const size_t minstack = pagesz + __static_tls_size + MINIMAL_REST_STACK;
412 if (limit.rlim_cur < minstack)
413 limit.rlim_cur = minstack;
414
415 /* Round the resource limit up to page size. */
416 limit.rlim_cur = ALIGN_UP (limit.rlim_cur, pagesz);
417 lll_lock (__default_pthread_attr_lock, LLL_PRIVATE);
418 __default_pthread_attr.stacksize = limit.rlim_cur;
419 __default_pthread_attr.guardsize = GLRO (dl_pagesize);
420 lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE);
421
422 #ifdef SHARED
423 /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
424 keep the lock count from the ld.so implementation. */
425 GL(dl_rtld_lock_recursive) = (void *) __pthread_mutex_lock;
426 GL(dl_rtld_unlock_recursive) = (void *) __pthread_mutex_unlock;
427 unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__data.__count;
428 GL(dl_load_lock).mutex.__data.__count = 0;
429 while (rtld_lock_count-- > 0)
430 __pthread_mutex_lock (&GL(dl_load_lock).mutex);
431
432 GL(dl_make_stack_executable_hook) = &__make_stacks_executable;
433 #endif
434
435 GL(dl_init_static_tls) = &__pthread_init_static_tls;
436
437 GL(dl_wait_lookup_done) = &__wait_lookup_done;
438
439 /* Register the fork generation counter with the libc. */
440 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
441 __libc_multiple_threads_ptr =
442 #endif
443 __libc_pthread_init (&__fork_generation, __reclaim_stacks,
444 ptr_pthread_functions);
445
446 /* Determine whether the machine is SMP or not. */
447 __is_smp = is_smp_system ();
448 }
449 strong_alias (__pthread_initialize_minimal_internal,
450 __pthread_initialize_minimal)
451
452
453 size_t
454 __pthread_get_minstack (const pthread_attr_t *attr)
455 {
456 struct pthread_attr *iattr = (struct pthread_attr *) attr;
457
458 return (GLRO(dl_pagesize) + __static_tls_size + PTHREAD_STACK_MIN
459 + iattr->guardsize);
460 }