1 /* Copyright (C) 2002-2013 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
23 #include <not-cancel.h>
25 #include <lowlevellock.h>
26 #include <stap-probe.h>
29 #ifndef LLL_MUTEX_LOCK
30 # define LLL_MUTEX_LOCK(mutex) \
31 lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
32 # define LLL_MUTEX_TRYLOCK(mutex) \
33 lll_trylock ((mutex)->__data.__lock)
34 # define LLL_ROBUST_MUTEX_LOCK(mutex, id) \
35 lll_robust_lock ((mutex)->__data.__lock, id, \
36 PTHREAD_ROBUST_MUTEX_PSHARED (mutex))
40 static int __pthread_mutex_lock_full (pthread_mutex_t
*mutex
)
41 __attribute_noinline__
;
45 __pthread_mutex_lock (pthread_mutex_t
*mutex
)
47 assert (sizeof (mutex
->__size
) >= sizeof (mutex
->__data
));
49 unsigned int type
= PTHREAD_MUTEX_TYPE (mutex
);
51 LIBC_PROBE (mutex_entry
, 1, mutex
);
53 if (__builtin_expect (type
& ~PTHREAD_MUTEX_KIND_MASK_NP
, 0))
54 return __pthread_mutex_lock_full (mutex
);
56 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
58 if (__builtin_expect (type
, PTHREAD_MUTEX_TIMED_NP
)
59 == PTHREAD_MUTEX_TIMED_NP
)
63 LLL_MUTEX_LOCK (mutex
);
64 assert (mutex
->__data
.__owner
== 0);
66 else if (__builtin_expect (type
== PTHREAD_MUTEX_RECURSIVE_NP
, 1))
68 /* Recursive mutex. */
70 /* Check whether we already hold the mutex. */
71 if (mutex
->__data
.__owner
== id
)
73 /* Just bump the counter. */
74 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
75 /* Overflow of the counter. */
78 ++mutex
->__data
.__count
;
83 /* We have to get the mutex. */
84 LLL_MUTEX_LOCK (mutex
);
86 assert (mutex
->__data
.__owner
== 0);
87 mutex
->__data
.__count
= 1;
89 else if (__builtin_expect (type
== PTHREAD_MUTEX_ADAPTIVE_NP
, 1))
94 if (LLL_MUTEX_TRYLOCK (mutex
) != 0)
97 int max_cnt
= MIN (MAX_ADAPTIVE_COUNT
,
98 mutex
->__data
.__spins
* 2 + 10);
101 if (cnt
++ >= max_cnt
)
103 LLL_MUTEX_LOCK (mutex
);
111 while (LLL_MUTEX_TRYLOCK (mutex
) != 0);
113 mutex
->__data
.__spins
+= (cnt
- mutex
->__data
.__spins
) / 8;
115 assert (mutex
->__data
.__owner
== 0);
119 assert (type
== PTHREAD_MUTEX_ERRORCHECK_NP
);
120 /* Check whether we already hold the mutex. */
121 if (__glibc_unlikely (mutex
->__data
.__owner
== id
))
126 /* Record the ownership. */
127 mutex
->__data
.__owner
= id
;
129 ++mutex
->__data
.__nusers
;
132 LIBC_PROBE (mutex_acquired
, 1, mutex
);
138 __pthread_mutex_lock_full (pthread_mutex_t
*mutex
)
144 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
146 switch (PTHREAD_MUTEX_TYPE (mutex
))
148 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
:
149 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
:
150 case PTHREAD_MUTEX_ROBUST_NORMAL_NP
:
151 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
:
152 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
153 &mutex
->__data
.__list
.__next
);
155 oldval
= mutex
->__data
.__lock
;
159 if ((oldval
& FUTEX_OWNER_DIED
) != 0)
161 /* The previous owner died. Try locking the mutex. */
164 newval
|= FUTEX_WAITERS
;
166 newval
|= (oldval
& FUTEX_WAITERS
);
170 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
173 if (newval
!= oldval
)
179 /* We got the mutex. */
180 mutex
->__data
.__count
= 1;
181 /* But it is inconsistent unless marked otherwise. */
182 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
184 ENQUEUE_MUTEX (mutex
);
185 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
187 /* Note that we deliberately exit here. If we fall
188 through to the end of the function __nusers would be
189 incremented which is not correct because the old
190 owner has to be discounted. If we are not supposed
191 to increment __nusers we actually have to decrement
194 --mutex
->__data
.__nusers
;
200 /* Check whether we already hold the mutex. */
201 if (__glibc_unlikely ((oldval
& FUTEX_TID_MASK
) == id
))
203 int kind
= PTHREAD_MUTEX_TYPE (mutex
);
204 if (kind
== PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
)
206 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
211 if (kind
== PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
)
213 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
216 /* Just bump the counter. */
217 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
218 /* Overflow of the counter. */
221 ++mutex
->__data
.__count
;
227 oldval
= LLL_ROBUST_MUTEX_LOCK (mutex
, id
);
229 if (__builtin_expect (mutex
->__data
.__owner
230 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
232 /* This mutex is now not recoverable. */
233 mutex
->__data
.__count
= 0;
234 lll_unlock (mutex
->__data
.__lock
,
235 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
236 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
237 return ENOTRECOVERABLE
;
240 while ((oldval
& FUTEX_OWNER_DIED
) != 0);
242 mutex
->__data
.__count
= 1;
243 ENQUEUE_MUTEX (mutex
);
244 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
247 case PTHREAD_MUTEX_PI_RECURSIVE_NP
:
248 case PTHREAD_MUTEX_PI_ERRORCHECK_NP
:
249 case PTHREAD_MUTEX_PI_NORMAL_NP
:
250 case PTHREAD_MUTEX_PI_ADAPTIVE_NP
:
251 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
:
252 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
:
253 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
:
254 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
:
256 int kind
= mutex
->__data
.__kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
257 int robust
= mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
;
260 /* Note: robust PI futexes are signaled by setting bit 0. */
261 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
262 (void *) (((uintptr_t) &mutex
->__data
.__list
.__next
)
265 oldval
= mutex
->__data
.__lock
;
267 /* Check whether we already hold the mutex. */
268 if (__glibc_unlikely ((oldval
& FUTEX_TID_MASK
) == id
))
270 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
272 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
276 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
278 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
280 /* Just bump the counter. */
281 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
282 /* Overflow of the counter. */
285 ++mutex
->__data
.__count
;
293 newval
|= FUTEX_WAITERS
;
295 oldval
= atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
300 /* The mutex is locked. The kernel will now take care of
302 int private = (robust
303 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)
304 : PTHREAD_MUTEX_PSHARED (mutex
));
305 INTERNAL_SYSCALL_DECL (__err
);
306 int e
= INTERNAL_SYSCALL (futex
, __err
, 4, &mutex
->__data
.__lock
,
307 __lll_private_flag (FUTEX_LOCK_PI
,
310 if (INTERNAL_SYSCALL_ERROR_P (e
, __err
)
311 && (INTERNAL_SYSCALL_ERRNO (e
, __err
) == ESRCH
312 || INTERNAL_SYSCALL_ERRNO (e
, __err
) == EDEADLK
))
314 assert (INTERNAL_SYSCALL_ERRNO (e
, __err
) != EDEADLK
315 || (kind
!= PTHREAD_MUTEX_ERRORCHECK_NP
316 && kind
!= PTHREAD_MUTEX_RECURSIVE_NP
));
317 /* ESRCH can happen only for non-robust PI mutexes where
318 the owner of the lock died. */
319 assert (INTERNAL_SYSCALL_ERRNO (e
, __err
) != ESRCH
|| !robust
);
321 /* Delay the thread indefinitely. */
326 oldval
= mutex
->__data
.__lock
;
328 assert (robust
|| (oldval
& FUTEX_OWNER_DIED
) == 0);
331 if (__glibc_unlikely (oldval
& FUTEX_OWNER_DIED
))
333 atomic_and (&mutex
->__data
.__lock
, ~FUTEX_OWNER_DIED
);
335 /* We got the mutex. */
336 mutex
->__data
.__count
= 1;
337 /* But it is inconsistent unless marked otherwise. */
338 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
340 ENQUEUE_MUTEX_PI (mutex
);
341 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
343 /* Note that we deliberately exit here. If we fall
344 through to the end of the function __nusers would be
345 incremented which is not correct because the old owner
346 has to be discounted. If we are not supposed to
347 increment __nusers we actually have to decrement it here. */
349 --mutex
->__data
.__nusers
;
356 && __builtin_expect (mutex
->__data
.__owner
357 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
359 /* This mutex is now not recoverable. */
360 mutex
->__data
.__count
= 0;
362 INTERNAL_SYSCALL_DECL (__err
);
363 INTERNAL_SYSCALL (futex
, __err
, 4, &mutex
->__data
.__lock
,
364 __lll_private_flag (FUTEX_UNLOCK_PI
,
365 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)),
368 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
369 return ENOTRECOVERABLE
;
372 mutex
->__data
.__count
= 1;
375 ENQUEUE_MUTEX_PI (mutex
);
376 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
381 case PTHREAD_MUTEX_PP_RECURSIVE_NP
:
382 case PTHREAD_MUTEX_PP_ERRORCHECK_NP
:
383 case PTHREAD_MUTEX_PP_NORMAL_NP
:
384 case PTHREAD_MUTEX_PP_ADAPTIVE_NP
:
386 int kind
= mutex
->__data
.__kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
388 oldval
= mutex
->__data
.__lock
;
390 /* Check whether we already hold the mutex. */
391 if (mutex
->__data
.__owner
== id
)
393 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
396 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
398 /* Just bump the counter. */
399 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
400 /* Overflow of the counter. */
403 ++mutex
->__data
.__count
;
409 int oldprio
= -1, ceilval
;
412 int ceiling
= (oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
)
413 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
415 if (__pthread_current_priority () > ceiling
)
418 __pthread_tpp_change_priority (oldprio
, -1);
422 int retval
= __pthread_tpp_change_priority (oldprio
, ceiling
);
426 ceilval
= ceiling
<< PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
430 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
438 if (oldval
== ceilval
)
444 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
448 if ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
)
451 if (oldval
!= ceilval
)
452 lll_futex_wait (&mutex
->__data
.__lock
, ceilval
| 2,
453 PTHREAD_MUTEX_PSHARED (mutex
));
455 while (atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
456 ceilval
| 2, ceilval
)
459 while ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
);
461 assert (mutex
->__data
.__owner
== 0);
462 mutex
->__data
.__count
= 1;
467 /* Correct code cannot set any other type. */
471 /* Record the ownership. */
472 mutex
->__data
.__owner
= id
;
474 ++mutex
->__data
.__nusers
;
477 LIBC_PROBE (mutex_acquired
, 1, mutex
);
482 #ifndef __pthread_mutex_lock
483 strong_alias (__pthread_mutex_lock
, pthread_mutex_lock
)
484 hidden_def (__pthread_mutex_lock
)
491 __pthread_mutex_cond_lock_adjust (pthread_mutex_t
*mutex
)
493 assert ((mutex
->__data
.__kind
& PTHREAD_MUTEX_PRIO_INHERIT_NP
) != 0);
494 assert ((mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
) == 0);
495 assert ((mutex
->__data
.__kind
& PTHREAD_MUTEX_PSHARED_BIT
) == 0);
497 /* Record the ownership. */
498 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
499 mutex
->__data
.__owner
= id
;
501 if (mutex
->__data
.__kind
== PTHREAD_MUTEX_PI_RECURSIVE_NP
)
502 ++mutex
->__data
.__count
;