initial commit
[glibc.git] / fbtl / sysdeps / pthread / createthread.c
1 /* Copyright (C) 2002-2013 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19 #include <sched.h>
20 #include <setjmp.h>
21 #include <signal.h>
22 #include <stdlib.h>
23 #include <atomic.h>
24 #include <ldsodefs.h>
25 #include <tls.h>
26 #include <stdint.h>
27
28 #include "kernel-features.h"
29
30
31 #define CLONE_SIGNAL (CLONE_SIGHAND | CLONE_THREAD)
32
33 /* Unless otherwise specified, the thread "register" is going to be
34 initialized with a pointer to the TCB. */
35 #ifndef TLS_VALUE
36 # define TLS_VALUE pd
37 #endif
38
39 #ifndef ARCH_CLONE
40 # define ARCH_CLONE __clone
41 #endif
42
43
44 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
45 /* Pointer to the corresponding variable in libc. */
46 int *__libc_multiple_threads_ptr attribute_hidden;
47 #endif
48
49
50 struct rtprio;
51 struct thr_param {
52 int (*start_func)(void *); /* thread entry function. */
53 void *arg; /* argument for entry function. */
54 char *stack_base; /* stack base address. */
55 size_t stack_size; /* stack size. */
56 char *tls_base; /* tls base address. */
57 size_t tls_size; /* tls size. */
58 long *child_tid; /* address to store new TID. */
59 long *parent_tid; /* parent accesses the new TID here. */
60 int flags; /* thread flags. */
61 struct rtprio *rtp; /* Real-time scheduling priority */
62 void *spare[3]; /* TODO: cpu affinity mask etc. */
63 };
64
65
66 static int
67 do_clone (struct pthread *pd, const struct pthread_attr *attr,
68 int clone_flags, int (*fct) (void *), STACK_VARIABLES_PARMS,
69 int stopped)
70 {
71 #ifdef PREPARE_CREATE
72 PREPARE_CREATE;
73 #endif
74
75 struct thr_param p;
76
77 if (__glibc_unlikely (stopped != 0))
78 /* We make sure the thread does not run far by forcing it to get a
79 lock. We lock it here too so that the new thread cannot continue
80 until we tell it to. */
81 lll_lock (pd->lock, LLL_PRIVATE);
82
83 /* One more thread. We cannot have the thread do this itself, since it
84 might exist but not have been scheduled yet by the time we've returned
85 and need to check the value to behave correctly. We must do it before
86 creating the thread, in case it does get scheduled first and then
87 might mistakenly think it was the only thread. In the failure case,
88 we momentarily store a false value; this doesn't matter because there
89 is no kosher thing a signal handler interrupting us right here can do
90 that cares whether the thread count is correct. */
91 atomic_increment (&__nptl_nthreads);
92 #if 0
93 int rc = ARCH_CLONE (fct, STACK_VARIABLES_ARGS, clone_flags,
94 pd, &pd->tid, TLS_VALUE, &pd->tid);
95
96 #else
97 memset(&p, 0, sizeof(p));
98 p.start_func = fct;
99 p.arg = pd;
100 p.stack_base = stackaddr; /* first in STACK_VARIABLES_ARGS */
101 p.stack_size = stacksize; /* second in STACK_VARIABLES_ARGS */
102 p.tls_base = (char*)pd;
103 p.child_tid = &(pd->ktid);
104
105 int rc = INLINE_SYSCALL(thr_new, 2, &p, sizeof(p));
106
107 if (rc)
108 {
109 errno = rc;
110 rc = -1;;
111 }
112 #endif
113
114
115 if (__glibc_unlikely (rc == -1))
116 {
117 atomic_decrement (&__nptl_nthreads); /* Oops, we lied for a second. */
118 pd->ktid = 0;
119
120 /* Perhaps a thread wants to change the IDs and if waiting
121 for this stillborn thread. */
122 if (__builtin_expect (atomic_exchange_acq (&pd->setxid_futex, 0)
123 == -2, 0))
124 lll_futex_wake (&pd->setxid_futex, 1, LLL_PRIVATE);
125
126 /* Free the resources. */
127 __deallocate_stack (pd);
128
129 /* We have to translate error codes. */
130 return errno == ENOMEM ? EAGAIN : errno;
131 }
132 #warning set scheduling parameters
133 #if 0
134 /* Now we have the possibility to set scheduling parameters etc. */
135 if (__glibc_unlikely (stopped != 0))
136 {
137 INTERNAL_SYSCALL_DECL (err);
138 int res = 0;
139
140 /* Set the affinity mask if necessary. */
141 if (attr->cpuset != NULL)
142 {
143 res = INTERNAL_SYSCALL (sched_setaffinity, err, 3, pd->tid,
144 attr->cpusetsize, attr->cpuset);
145
146 if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (res, err)))
147 {
148 /* The operation failed. We have to kill the thread. First
149 send it the cancellation signal. */
150 INTERNAL_SYSCALL_DECL (err2);
151 err_out:
152 (void) INTERNAL_SYSCALL (tgkill, err2, 3,
153 THREAD_GETMEM (THREAD_SELF, pid),
154 pd->tid, SIGCANCEL);
155
156 /* We do not free the stack here because the canceled thread
157 itself will do this. */
158
159 return (INTERNAL_SYSCALL_ERROR_P (res, err)
160 ? INTERNAL_SYSCALL_ERRNO (res, err)
161 : 0);
162 }
163 }
164
165 /* Set the scheduling parameters. */
166 if ((attr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0)
167 {
168 res = INTERNAL_SYSCALL (sched_setscheduler, err, 3, pd->tid,
169 pd->schedpolicy, &pd->schedparam);
170
171 if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (res, err)))
172 goto err_out;
173 }
174 }
175 #endif
176
177 /* We now have for sure more than one thread. The main thread might
178 not yet have the flag set. No need to set the global variable
179 again if this is what we use. */
180 THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1);
181
182 return 0;
183 }
184
185
186 static int
187 create_thread (struct pthread *pd, const struct pthread_attr *attr,
188 STACK_VARIABLES_PARMS)
189 {
190 #ifdef TLS_TCB_AT_TP
191 assert (pd->header.tcb != NULL);
192 #endif
193
194 /* We rely heavily on various flags the CLONE function understands:
195
196 CLONE_VM, CLONE_FS, CLONE_FILES
197 These flags select semantics with shared address space and
198 file descriptors according to what POSIX requires.
199
200 CLONE_SIGNAL
201 This flag selects the POSIX signal semantics.
202
203 CLONE_SETTLS
204 The sixth parameter to CLONE determines the TLS area for the
205 new thread.
206
207 CLONE_PARENT_SETTID
208 The kernels writes the thread ID of the newly created thread
209 into the location pointed to by the fifth parameters to CLONE.
210
211 Note that it would be semantically equivalent to use
212 CLONE_CHILD_SETTID but it is be more expensive in the kernel.
213
214 CLONE_CHILD_CLEARTID
215 The kernels clears the thread ID of a thread that has called
216 sys_exit() in the location pointed to by the seventh parameter
217 to CLONE.
218
219 The termination signal is chosen to be zero which means no signal
220 is sent. */
221 #if 1
222 #define clone_flags 123456
223 #warning clone
224 #else
225 int clone_flags = (CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGNAL
226 | CLONE_SETTLS | CLONE_PARENT_SETTID
227 | CLONE_CHILD_CLEARTID | CLONE_SYSVSEM
228 | 0);
229 #endif
230
231 if (__glibc_unlikely (THREAD_GETMEM (THREAD_SELF, report_events)))
232 {
233 /* The parent thread is supposed to report events. Check whether
234 the TD_CREATE event is needed, too. */
235 const int _idx = __td_eventword (TD_CREATE);
236 const uint32_t _mask = __td_eventmask (TD_CREATE);
237
238 if ((_mask & (__nptl_threads_events.event_bits[_idx]
239 | pd->eventbuf.eventmask.event_bits[_idx])) != 0)
240 {
241 /* We always must have the thread start stopped. */
242 pd->stopped_start = true;
243
244 /* Create the thread. We always create the thread stopped
245 so that it does not get far before we tell the debugger. */
246 int res = do_clone (pd, attr, clone_flags, start_thread,
247 STACK_VARIABLES_ARGS, 1);
248 if (res == 0)
249 {
250 /* Now fill in the information about the new thread in
251 the newly created thread's data structure. We cannot let
252 the new thread do this since we don't know whether it was
253 already scheduled when we send the event. */
254 pd->eventbuf.eventnum = TD_CREATE;
255 pd->eventbuf.eventdata = pd;
256
257 /* Enqueue the descriptor. */
258 do
259 pd->nextevent = __nptl_last_event;
260 while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event,
261 pd, pd->nextevent)
262 != 0);
263
264 /* Now call the function which signals the event. */
265 __nptl_create_event ();
266
267 /* And finally restart the new thread. */
268 lll_unlock (pd->lock, LLL_PRIVATE);
269 }
270
271 return res;
272 }
273 }
274
275 #ifdef NEED_DL_SYSINFO
276 assert (THREAD_SELF_SYSINFO == THREAD_SYSINFO (pd));
277 #endif
278
279 /* Determine whether the newly created threads has to be started
280 stopped since we have to set the scheduling parameters or set the
281 affinity. */
282 bool stopped = false;
283 if (attr != NULL && (attr->cpuset != NULL
284 || (attr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0))
285 stopped = true;
286 pd->stopped_start = stopped;
287 pd->parent_cancelhandling = THREAD_GETMEM (THREAD_SELF, cancelhandling);
288
289 /* Actually create the thread. */
290 int res = do_clone (pd, attr, clone_flags, start_thread,
291 STACK_VARIABLES_ARGS, stopped);
292
293 if (res == 0 && stopped)
294 /* And finally restart the new thread. */
295 lll_unlock (pd->lock, LLL_PRIVATE);
296
297 return res;
298 }