1 /* Copyright (C) 2002-2013 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
28 #include "kernel-features.h"
31 #define CLONE_SIGNAL (CLONE_SIGHAND | CLONE_THREAD)
33 /* Unless otherwise specified, the thread "register" is going to be
34 initialized with a pointer to the TCB. */
40 # define ARCH_CLONE __clone
44 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
45 /* Pointer to the corresponding variable in libc. */
46 int *__libc_multiple_threads_ptr attribute_hidden
;
52 int (*start_func
)(void *); /* thread entry function. */
53 void *arg
; /* argument for entry function. */
54 char *stack_base
; /* stack base address. */
55 size_t stack_size
; /* stack size. */
56 char *tls_base
; /* tls base address. */
57 size_t tls_size
; /* tls size. */
58 long *child_tid
; /* address to store new TID. */
59 long *parent_tid
; /* parent accesses the new TID here. */
60 int flags
; /* thread flags. */
61 struct rtprio
*rtp
; /* Real-time scheduling priority */
62 void *spare
[3]; /* TODO: cpu affinity mask etc. */
67 do_clone (struct pthread
*pd
, const struct pthread_attr
*attr
,
68 int clone_flags
, int (*fct
) (void *), STACK_VARIABLES_PARMS
,
77 if (__glibc_unlikely (stopped
!= 0))
78 /* We make sure the thread does not run far by forcing it to get a
79 lock. We lock it here too so that the new thread cannot continue
80 until we tell it to. */
81 lll_lock (pd
->lock
, LLL_PRIVATE
);
83 /* One more thread. We cannot have the thread do this itself, since it
84 might exist but not have been scheduled yet by the time we've returned
85 and need to check the value to behave correctly. We must do it before
86 creating the thread, in case it does get scheduled first and then
87 might mistakenly think it was the only thread. In the failure case,
88 we momentarily store a false value; this doesn't matter because there
89 is no kosher thing a signal handler interrupting us right here can do
90 that cares whether the thread count is correct. */
91 atomic_increment (&__nptl_nthreads
);
93 int rc
= ARCH_CLONE (fct
, STACK_VARIABLES_ARGS
, clone_flags
,
94 pd
, &pd
->tid
, TLS_VALUE
, &pd
->tid
);
97 memset(&p
, 0, sizeof(p
));
100 p
.stack_base
= stackaddr
; /* first in STACK_VARIABLES_ARGS */
101 p
.stack_size
= stacksize
; /* second in STACK_VARIABLES_ARGS */
102 p
.tls_base
= (char*)pd
;
103 p
.child_tid
= &(pd
->ktid
);
105 int rc
= INLINE_SYSCALL(thr_new
, 2, &p
, sizeof(p
));
115 if (__glibc_unlikely (rc
== -1))
117 atomic_decrement (&__nptl_nthreads
); /* Oops, we lied for a second. */
120 /* Perhaps a thread wants to change the IDs and if waiting
121 for this stillborn thread. */
122 if (__builtin_expect (atomic_exchange_acq (&pd
->setxid_futex
, 0)
124 lll_futex_wake (&pd
->setxid_futex
, 1, LLL_PRIVATE
);
126 /* Free the resources. */
127 __deallocate_stack (pd
);
129 /* We have to translate error codes. */
130 return errno
== ENOMEM
? EAGAIN
: errno
;
132 #warning set scheduling parameters
134 /* Now we have the possibility to set scheduling parameters etc. */
135 if (__glibc_unlikely (stopped
!= 0))
137 INTERNAL_SYSCALL_DECL (err
);
140 /* Set the affinity mask if necessary. */
141 if (attr
->cpuset
!= NULL
)
143 res
= INTERNAL_SYSCALL (sched_setaffinity
, err
, 3, pd
->tid
,
144 attr
->cpusetsize
, attr
->cpuset
);
146 if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (res
, err
)))
148 /* The operation failed. We have to kill the thread. First
149 send it the cancellation signal. */
150 INTERNAL_SYSCALL_DECL (err2
);
152 (void) INTERNAL_SYSCALL (tgkill
, err2
, 3,
153 THREAD_GETMEM (THREAD_SELF
, pid
),
156 /* We do not free the stack here because the canceled thread
157 itself will do this. */
159 return (INTERNAL_SYSCALL_ERROR_P (res
, err
)
160 ? INTERNAL_SYSCALL_ERRNO (res
, err
)
165 /* Set the scheduling parameters. */
166 if ((attr
->flags
& ATTR_FLAG_NOTINHERITSCHED
) != 0)
168 res
= INTERNAL_SYSCALL (sched_setscheduler
, err
, 3, pd
->tid
,
169 pd
->schedpolicy
, &pd
->schedparam
);
171 if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (res
, err
)))
177 /* We now have for sure more than one thread. The main thread might
178 not yet have the flag set. No need to set the global variable
179 again if this is what we use. */
180 THREAD_SETMEM (THREAD_SELF
, header
.multiple_threads
, 1);
187 create_thread (struct pthread
*pd
, const struct pthread_attr
*attr
,
188 STACK_VARIABLES_PARMS
)
191 assert (pd
->header
.tcb
!= NULL
);
194 /* We rely heavily on various flags the CLONE function understands:
196 CLONE_VM, CLONE_FS, CLONE_FILES
197 These flags select semantics with shared address space and
198 file descriptors according to what POSIX requires.
201 This flag selects the POSIX signal semantics.
204 The sixth parameter to CLONE determines the TLS area for the
208 The kernels writes the thread ID of the newly created thread
209 into the location pointed to by the fifth parameters to CLONE.
211 Note that it would be semantically equivalent to use
212 CLONE_CHILD_SETTID but it is be more expensive in the kernel.
215 The kernels clears the thread ID of a thread that has called
216 sys_exit() in the location pointed to by the seventh parameter
219 The termination signal is chosen to be zero which means no signal
222 #define clone_flags 123456
225 int clone_flags
= (CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGNAL
226 | CLONE_SETTLS
| CLONE_PARENT_SETTID
227 | CLONE_CHILD_CLEARTID
| CLONE_SYSVSEM
231 if (__glibc_unlikely (THREAD_GETMEM (THREAD_SELF
, report_events
)))
233 /* The parent thread is supposed to report events. Check whether
234 the TD_CREATE event is needed, too. */
235 const int _idx
= __td_eventword (TD_CREATE
);
236 const uint32_t _mask
= __td_eventmask (TD_CREATE
);
238 if ((_mask
& (__nptl_threads_events
.event_bits
[_idx
]
239 | pd
->eventbuf
.eventmask
.event_bits
[_idx
])) != 0)
241 /* We always must have the thread start stopped. */
242 pd
->stopped_start
= true;
244 /* Create the thread. We always create the thread stopped
245 so that it does not get far before we tell the debugger. */
246 int res
= do_clone (pd
, attr
, clone_flags
, start_thread
,
247 STACK_VARIABLES_ARGS
, 1);
250 /* Now fill in the information about the new thread in
251 the newly created thread's data structure. We cannot let
252 the new thread do this since we don't know whether it was
253 already scheduled when we send the event. */
254 pd
->eventbuf
.eventnum
= TD_CREATE
;
255 pd
->eventbuf
.eventdata
= pd
;
257 /* Enqueue the descriptor. */
259 pd
->nextevent
= __nptl_last_event
;
260 while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event
,
264 /* Now call the function which signals the event. */
265 __nptl_create_event ();
267 /* And finally restart the new thread. */
268 lll_unlock (pd
->lock
, LLL_PRIVATE
);
275 #ifdef NEED_DL_SYSINFO
276 assert (THREAD_SELF_SYSINFO
== THREAD_SYSINFO (pd
));
279 /* Determine whether the newly created threads has to be started
280 stopped since we have to set the scheduling parameters or set the
282 bool stopped
= false;
283 if (attr
!= NULL
&& (attr
->cpuset
!= NULL
284 || (attr
->flags
& ATTR_FLAG_NOTINHERITSCHED
) != 0))
286 pd
->stopped_start
= stopped
;
287 pd
->parent_cancelhandling
= THREAD_GETMEM (THREAD_SELF
, cancelhandling
);
289 /* Actually create the thread. */
290 int res
= do_clone (pd
, attr
, clone_flags
, start_thread
,
291 STACK_VARIABLES_ARGS
, stopped
);
293 if (res
== 0 && stopped
)
294 /* And finally restart the new thread. */
295 lll_unlock (pd
->lock
, LLL_PRIVATE
);