1 /* Copyright (C) 2002-2013 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
23 #include <lowlevellock.h>
24 #include <stap-probe.h>
28 __pthread_mutex_unlock_full (pthread_mutex_t
*mutex
, int decr
)
29 __attribute_noinline__
;
32 internal_function attribute_hidden
33 __pthread_mutex_unlock_usercnt (pthread_mutex_t
*mutex
, int decr
)
35 int type
= PTHREAD_MUTEX_TYPE (mutex
);
36 if (__builtin_expect (type
& ~PTHREAD_MUTEX_KIND_MASK_NP
, 0))
37 return __pthread_mutex_unlock_full (mutex
, decr
);
39 if (__builtin_expect (type
, PTHREAD_MUTEX_TIMED_NP
)
40 == PTHREAD_MUTEX_TIMED_NP
)
42 /* Always reset the owner field. */
44 mutex
->__data
.__owner
= 0;
47 --mutex
->__data
.__nusers
;
50 lll_unlock (mutex
->__data
.__lock
, PTHREAD_MUTEX_PSHARED (mutex
));
52 LIBC_PROBE (mutex_release
, 1, mutex
);
56 else if (__builtin_expect (type
== PTHREAD_MUTEX_RECURSIVE_NP
, 1))
58 /* Recursive mutex. */
59 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
))
62 if (--mutex
->__data
.__count
!= 0)
63 /* We still hold the mutex. */
67 else if (__builtin_expect (type
== PTHREAD_MUTEX_ADAPTIVE_NP
, 1))
71 /* Error checking mutex. */
72 assert (type
== PTHREAD_MUTEX_ERRORCHECK_NP
);
73 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
)
74 || ! lll_islocked (mutex
->__data
.__lock
))
83 __pthread_mutex_unlock_full (pthread_mutex_t
*mutex
, int decr
)
90 switch (PTHREAD_MUTEX_TYPE (mutex
))
92 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
:
93 /* Recursive mutex. */
94 if ((mutex
->__data
.__lock
& FUTEX_TID_MASK
)
95 == THREAD_GETMEM (THREAD_SELF
, tid
)
96 && __builtin_expect (mutex
->__data
.__owner
97 == PTHREAD_MUTEX_INCONSISTENT
, 0))
99 if (--mutex
->__data
.__count
!= 0)
100 /* We still hold the mutex. */
101 return ENOTRECOVERABLE
;
106 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
))
109 if (--mutex
->__data
.__count
!= 0)
110 /* We still hold the mutex. */
115 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
:
116 case PTHREAD_MUTEX_ROBUST_NORMAL_NP
:
117 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
:
118 if ((mutex
->__data
.__lock
& FUTEX_TID_MASK
)
119 != THREAD_GETMEM (THREAD_SELF
, tid
)
120 || ! lll_islocked (mutex
->__data
.__lock
))
123 /* If the previous owner died and the caller did not succeed in
124 making the state consistent, mark the mutex as unrecoverable
125 and make all waiters. */
126 if (__builtin_expect (mutex
->__data
.__owner
127 == PTHREAD_MUTEX_INCONSISTENT
, 0))
129 newowner
= PTHREAD_MUTEX_NOTRECOVERABLE
;
132 /* Remove mutex from the list. */
133 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
134 &mutex
->__data
.__list
.__next
);
135 DEQUEUE_MUTEX (mutex
);
137 mutex
->__data
.__owner
= newowner
;
140 --mutex
->__data
.__nusers
;
143 lll_robust_unlock (mutex
->__data
.__lock
,
144 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
146 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
149 case PTHREAD_MUTEX_PI_RECURSIVE_NP
:
150 /* Recursive mutex. */
151 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
))
154 if (--mutex
->__data
.__count
!= 0)
155 /* We still hold the mutex. */
157 goto continue_pi_non_robust
;
159 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
:
160 /* Recursive mutex. */
161 if ((mutex
->__data
.__lock
& FUTEX_TID_MASK
)
162 == THREAD_GETMEM (THREAD_SELF
, tid
)
163 && __builtin_expect (mutex
->__data
.__owner
164 == PTHREAD_MUTEX_INCONSISTENT
, 0))
166 if (--mutex
->__data
.__count
!= 0)
167 /* We still hold the mutex. */
168 return ENOTRECOVERABLE
;
170 goto pi_notrecoverable
;
173 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
))
176 if (--mutex
->__data
.__count
!= 0)
177 /* We still hold the mutex. */
180 goto continue_pi_robust
;
182 case PTHREAD_MUTEX_PI_ERRORCHECK_NP
:
183 case PTHREAD_MUTEX_PI_NORMAL_NP
:
184 case PTHREAD_MUTEX_PI_ADAPTIVE_NP
:
185 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
:
186 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
:
187 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
:
188 if ((mutex
->__data
.__lock
& FUTEX_TID_MASK
)
189 != THREAD_GETMEM (THREAD_SELF
, tid
)
190 || ! lll_islocked (mutex
->__data
.__lock
))
193 /* If the previous owner died and the caller did not succeed in
194 making the state consistent, mark the mutex as unrecoverable
195 and make all waiters. */
196 if ((mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
) != 0
197 && __builtin_expect (mutex
->__data
.__owner
198 == PTHREAD_MUTEX_INCONSISTENT
, 0))
200 newowner
= PTHREAD_MUTEX_NOTRECOVERABLE
;
202 if ((mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
) != 0)
205 /* Remove mutex from the list.
206 Note: robust PI futexes are signaled by setting bit 0. */
207 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
208 (void *) (((uintptr_t) &mutex
->__data
.__list
.__next
)
210 DEQUEUE_MUTEX (mutex
);
213 continue_pi_non_robust
:
214 mutex
->__data
.__owner
= newowner
;
217 --mutex
->__data
.__nusers
;
219 /* Unlock. Load all necessary mutex data before releasing the mutex
220 to not violate the mutex destruction requirements (see
222 int robust
= mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
;
223 int private = (robust
224 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)
225 : PTHREAD_MUTEX_PSHARED (mutex
));
226 /* Unlock the mutex using a CAS unless there are futex waiters or our
227 TID is not the value of __lock anymore, in which case we let the
228 kernel take care of the situation. Use release MO in the CAS to
229 synchronize with acquire MO in lock acquisitions. */
230 int l
= atomic_load_relaxed (&mutex
->__data
.__lock
);
233 if (((l
& FUTEX_WAITERS
) != 0)
234 || (l
!= THREAD_GETMEM (THREAD_SELF
, tid
)))
236 INTERNAL_SYSCALL_DECL (__err
);
237 INTERNAL_SYSCALL (futex
, __err
, 2, &mutex
->__data
.__lock
,
238 __lll_private_flag (FUTEX_UNLOCK_PI
, private));
242 while (!atomic_compare_exchange_weak_release (&mutex
->__data
.__lock
,
245 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
248 case PTHREAD_MUTEX_PP_RECURSIVE_NP
:
249 /* Recursive mutex. */
250 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
))
253 if (--mutex
->__data
.__count
!= 0)
254 /* We still hold the mutex. */
258 case PTHREAD_MUTEX_PP_ERRORCHECK_NP
:
259 /* Error checking mutex. */
260 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
)
261 || (mutex
->__data
.__lock
& ~ PTHREAD_MUTEX_PRIO_CEILING_MASK
) == 0)
265 case PTHREAD_MUTEX_PP_NORMAL_NP
:
266 case PTHREAD_MUTEX_PP_ADAPTIVE_NP
:
267 /* Always reset the owner field. */
269 mutex
->__data
.__owner
= 0;
273 --mutex
->__data
.__nusers
;
279 oldval
= mutex
->__data
.__lock
;
280 newval
= oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
;
282 while (atomic_compare_and_exchange_bool_rel (&mutex
->__data
.__lock
,
285 if ((oldval
& ~PTHREAD_MUTEX_PRIO_CEILING_MASK
) > 1)
286 lll_futex_wake (&mutex
->__data
.__lock
, 1,
287 PTHREAD_MUTEX_PSHARED (mutex
));
289 int oldprio
= newval
>> PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
291 LIBC_PROBE (mutex_release
, 1, mutex
);
293 return __pthread_tpp_change_priority (oldprio
, -1);
296 /* Correct code cannot set any other type. */
300 LIBC_PROBE (mutex_release
, 1, mutex
);
307 __pthread_mutex_unlock (pthread_mutex_t
*mutex
)
309 return __pthread_mutex_unlock_usercnt (mutex
, 1);
311 strong_alias (__pthread_mutex_unlock
, pthread_mutex_unlock
)
312 hidden_def (__pthread_mutex_unlock
)