initial commit
[glibc.git] / fbtl / pthread_mutex_unlock.c
1 /* Copyright (C) 2002-2013 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19 #include <assert.h>
20 #include <errno.h>
21 #include <stdlib.h>
22 #include "pthreadP.h"
23 #include <lowlevellock.h>
24 #include <stap-probe.h>
25
26 static int
27 internal_function
28 __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
29 __attribute_noinline__;
30
31 int
32 internal_function attribute_hidden
33 __pthread_mutex_unlock_usercnt (pthread_mutex_t *mutex, int decr)
34 {
35 int type = PTHREAD_MUTEX_TYPE (mutex);
36 if (__builtin_expect (type & ~PTHREAD_MUTEX_KIND_MASK_NP, 0))
37 return __pthread_mutex_unlock_full (mutex, decr);
38
39 if (__builtin_expect (type, PTHREAD_MUTEX_TIMED_NP)
40 == PTHREAD_MUTEX_TIMED_NP)
41 {
42 /* Always reset the owner field. */
43 normal:
44 mutex->__data.__owner = 0;
45 if (decr)
46 /* One less user. */
47 --mutex->__data.__nusers;
48
49 /* Unlock. */
50 lll_unlock (mutex->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex));
51
52 LIBC_PROBE (mutex_release, 1, mutex);
53
54 return 0;
55 }
56 else if (__builtin_expect (type == PTHREAD_MUTEX_RECURSIVE_NP, 1))
57 {
58 /* Recursive mutex. */
59 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
60 return EPERM;
61
62 if (--mutex->__data.__count != 0)
63 /* We still hold the mutex. */
64 return 0;
65 goto normal;
66 }
67 else if (__builtin_expect (type == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
68 goto normal;
69 else
70 {
71 /* Error checking mutex. */
72 assert (type == PTHREAD_MUTEX_ERRORCHECK_NP);
73 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
74 || ! lll_islocked (mutex->__data.__lock))
75 return EPERM;
76 goto normal;
77 }
78 }
79
80
81 static int
82 internal_function
83 __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
84 {
85 #if 1
86 return EINVAL;
87 #else
88 int newowner = 0;
89
90 switch (PTHREAD_MUTEX_TYPE (mutex))
91 {
92 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
93 /* Recursive mutex. */
94 if ((mutex->__data.__lock & FUTEX_TID_MASK)
95 == THREAD_GETMEM (THREAD_SELF, tid)
96 && __builtin_expect (mutex->__data.__owner
97 == PTHREAD_MUTEX_INCONSISTENT, 0))
98 {
99 if (--mutex->__data.__count != 0)
100 /* We still hold the mutex. */
101 return ENOTRECOVERABLE;
102
103 goto notrecoverable;
104 }
105
106 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
107 return EPERM;
108
109 if (--mutex->__data.__count != 0)
110 /* We still hold the mutex. */
111 return 0;
112
113 goto robust;
114
115 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
116 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
117 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
118 if ((mutex->__data.__lock & FUTEX_TID_MASK)
119 != THREAD_GETMEM (THREAD_SELF, tid)
120 || ! lll_islocked (mutex->__data.__lock))
121 return EPERM;
122
123 /* If the previous owner died and the caller did not succeed in
124 making the state consistent, mark the mutex as unrecoverable
125 and make all waiters. */
126 if (__builtin_expect (mutex->__data.__owner
127 == PTHREAD_MUTEX_INCONSISTENT, 0))
128 notrecoverable:
129 newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
130
131 robust:
132 /* Remove mutex from the list. */
133 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
134 &mutex->__data.__list.__next);
135 DEQUEUE_MUTEX (mutex);
136
137 mutex->__data.__owner = newowner;
138 if (decr)
139 /* One less user. */
140 --mutex->__data.__nusers;
141
142 /* Unlock. */
143 lll_robust_unlock (mutex->__data.__lock,
144 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
145
146 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
147 break;
148
149 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
150 /* Recursive mutex. */
151 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
152 return EPERM;
153
154 if (--mutex->__data.__count != 0)
155 /* We still hold the mutex. */
156 return 0;
157 goto continue_pi_non_robust;
158
159 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
160 /* Recursive mutex. */
161 if ((mutex->__data.__lock & FUTEX_TID_MASK)
162 == THREAD_GETMEM (THREAD_SELF, tid)
163 && __builtin_expect (mutex->__data.__owner
164 == PTHREAD_MUTEX_INCONSISTENT, 0))
165 {
166 if (--mutex->__data.__count != 0)
167 /* We still hold the mutex. */
168 return ENOTRECOVERABLE;
169
170 goto pi_notrecoverable;
171 }
172
173 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
174 return EPERM;
175
176 if (--mutex->__data.__count != 0)
177 /* We still hold the mutex. */
178 return 0;
179
180 goto continue_pi_robust;
181
182 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
183 case PTHREAD_MUTEX_PI_NORMAL_NP:
184 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
185 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
186 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
187 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
188 if ((mutex->__data.__lock & FUTEX_TID_MASK)
189 != THREAD_GETMEM (THREAD_SELF, tid)
190 || ! lll_islocked (mutex->__data.__lock))
191 return EPERM;
192
193 /* If the previous owner died and the caller did not succeed in
194 making the state consistent, mark the mutex as unrecoverable
195 and make all waiters. */
196 if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0
197 && __builtin_expect (mutex->__data.__owner
198 == PTHREAD_MUTEX_INCONSISTENT, 0))
199 pi_notrecoverable:
200 newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
201
202 if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0)
203 {
204 continue_pi_robust:
205 /* Remove mutex from the list.
206 Note: robust PI futexes are signaled by setting bit 0. */
207 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
208 (void *) (((uintptr_t) &mutex->__data.__list.__next)
209 | 1));
210 DEQUEUE_MUTEX (mutex);
211 }
212
213 continue_pi_non_robust:
214 mutex->__data.__owner = newowner;
215 if (decr)
216 /* One less user. */
217 --mutex->__data.__nusers;
218
219 /* Unlock. Load all necessary mutex data before releasing the mutex
220 to not violate the mutex destruction requirements (see
221 lll_unlock). */
222 int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
223 int private = (robust
224 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
225 : PTHREAD_MUTEX_PSHARED (mutex));
226 /* Unlock the mutex using a CAS unless there are futex waiters or our
227 TID is not the value of __lock anymore, in which case we let the
228 kernel take care of the situation. Use release MO in the CAS to
229 synchronize with acquire MO in lock acquisitions. */
230 int l = atomic_load_relaxed (&mutex->__data.__lock);
231 do
232 {
233 if (((l & FUTEX_WAITERS) != 0)
234 || (l != THREAD_GETMEM (THREAD_SELF, tid)))
235 {
236 INTERNAL_SYSCALL_DECL (__err);
237 INTERNAL_SYSCALL (futex, __err, 2, &mutex->__data.__lock,
238 __lll_private_flag (FUTEX_UNLOCK_PI, private));
239 break;
240 }
241 }
242 while (!atomic_compare_exchange_weak_release (&mutex->__data.__lock,
243 &l, 0));
244
245 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
246 break;
247
248 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
249 /* Recursive mutex. */
250 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
251 return EPERM;
252
253 if (--mutex->__data.__count != 0)
254 /* We still hold the mutex. */
255 return 0;
256 goto pp;
257
258 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
259 /* Error checking mutex. */
260 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
261 || (mutex->__data.__lock & ~ PTHREAD_MUTEX_PRIO_CEILING_MASK) == 0)
262 return EPERM;
263 /* FALLTHROUGH */
264
265 case PTHREAD_MUTEX_PP_NORMAL_NP:
266 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
267 /* Always reset the owner field. */
268 pp:
269 mutex->__data.__owner = 0;
270
271 if (decr)
272 /* One less user. */
273 --mutex->__data.__nusers;
274
275 /* Unlock. */
276 int newval, oldval;
277 do
278 {
279 oldval = mutex->__data.__lock;
280 newval = oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK;
281 }
282 while (atomic_compare_and_exchange_bool_rel (&mutex->__data.__lock,
283 newval, oldval));
284
285 if ((oldval & ~PTHREAD_MUTEX_PRIO_CEILING_MASK) > 1)
286 lll_futex_wake (&mutex->__data.__lock, 1,
287 PTHREAD_MUTEX_PSHARED (mutex));
288
289 int oldprio = newval >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
290
291 LIBC_PROBE (mutex_release, 1, mutex);
292
293 return __pthread_tpp_change_priority (oldprio, -1);
294
295 default:
296 /* Correct code cannot set any other type. */
297 return EINVAL;
298 }
299
300 LIBC_PROBE (mutex_release, 1, mutex);
301 return 0;
302 #endif
303 }
304
305
306 int
307 __pthread_mutex_unlock (pthread_mutex_t *mutex)
308 {
309 return __pthread_mutex_unlock_usercnt (mutex, 1);
310 }
311 strong_alias (__pthread_mutex_unlock, pthread_mutex_unlock)
312 hidden_def (__pthread_mutex_unlock)