1 /*
2 * pthread_mutex_timedlock.c
3 *
4 * Description:
5 * This translation unit implements mutual exclusion (mutex) primitives.
6 *
7 * --------------------------------------------------------------------------
8 *
9 * Pthreads4w - POSIX Threads for Windows
10 * Copyright 1998 John E. Bossom
11 * Copyright 1999-2018, Pthreads4w contributors
12 *
13 * Homepage: https://sourceforge.net/projects/pthreads4w/
14 *
15 * The current list of contributors is contained
16 * in the file CONTRIBUTORS included with the source
17 * code distribution. The list can also be seen at the
18 * following World Wide Web location:
19 *
20 * https://sourceforge.net/p/pthreads4w/wiki/Contributors/
21 *
22 * Licensed under the Apache License, Version 2.0 (the "License");
23 * you may not use this file except in compliance with the License.
24 * You may obtain a copy of the License at
25 *
26 * http://www.apache.org/licenses/LICENSE-2.0
27 *
28 * Unless required by applicable law or agreed to in writing, software
29 * distributed under the License is distributed on an "AS IS" BASIS,
30 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
31 * See the License for the specific language governing permissions and
32 * limitations under the License.
33 */
34
35 #ifdef HAVE_CONFIG_H
36 # include <config.h>
37 #endif
38
39 #include "pthread.h"
40 #include "implement.h"
41
42
43 static INLINE int
__ptw32_timed_eventwait(HANDLE event,const struct timespec * abstime)44 __ptw32_timed_eventwait (HANDLE event, const struct timespec *abstime)
45 /*
46 * ------------------------------------------------------
47 * DESCRIPTION
48 * This function waits on an event until signaled or until
49 * abstime passes.
50 * If abstime has passed when this routine is called then
51 * it returns a result to indicate this.
52 *
53 * If 'abstime' is a NULL pointer then this function will
54 * block until it can successfully decrease the value or
55 * until interrupted by a signal.
56 *
57 * This routine is not a cancellation point.
58 *
59 * RESULTS
60 * 0 successfully signaled,
61 * ETIMEDOUT abstime passed
62 * EINVAL 'event' is not a valid event,
63 *
64 * ------------------------------------------------------
65 */
66 {
67
68 DWORD milliseconds;
69 DWORD status;
70
71 if (event == NULL)
72 {
73 return EINVAL;
74 }
75 else
76 {
77 if (abstime == NULL)
78 {
79 milliseconds = INFINITE;
80 }
81 else
82 {
83 /*
84 * Calculate timeout as milliseconds from current system time.
85 */
86 milliseconds = __ptw32_relmillisecs (abstime);
87 }
88
89 status = WaitForSingleObject (event, milliseconds);
90
91 if (status != WAIT_OBJECT_0)
92 {
93 if (status == WAIT_TIMEOUT)
94 {
95 return ETIMEDOUT;
96 }
97 else
98 {
99 return EINVAL;
100 }
101 }
102 }
103
104 return 0;
105
106 } /* __ptw32_timed_semwait */
107
108
109 int
pthread_mutex_timedlock(pthread_mutex_t * mutex,const struct timespec * abstime)110 pthread_mutex_timedlock (pthread_mutex_t * mutex,
111 const struct timespec *abstime)
112 {
113 /*
114 * Let the system deal with invalid pointers.
115 */
116 pthread_mutex_t mx = *mutex;
117 int kind;
118 int result = 0;
119
120 if (mx == NULL)
121 {
122 return EINVAL;
123 }
124
125 /*
126 * We do a quick check to see if we need to do more work
127 * to initialise a static mutex. We check
128 * again inside the guarded section of __ptw32_mutex_check_need_init()
129 * to avoid race conditions.
130 */
131 if (mx >= PTHREAD_ERRORCHECK_MUTEX_INITIALIZER)
132 {
133 if ((result = __ptw32_mutex_check_need_init (mutex)) != 0)
134 {
135 return (result);
136 }
137 mx = *mutex;
138 }
139
140 kind = mx->kind;
141
142 if (kind >= 0)
143 {
144 if (mx->kind == PTHREAD_MUTEX_NORMAL)
145 {
146 if ((__PTW32_INTERLOCKED_LONG) __PTW32_INTERLOCKED_EXCHANGE_LONG(
147 (__PTW32_INTERLOCKED_LONGPTR) &mx->lock_idx,
148 (__PTW32_INTERLOCKED_LONG) 1) != 0)
149 {
150 while ((__PTW32_INTERLOCKED_LONG) __PTW32_INTERLOCKED_EXCHANGE_LONG(
151 (__PTW32_INTERLOCKED_LONGPTR) &mx->lock_idx,
152 (__PTW32_INTERLOCKED_LONG) -1) != 0)
153 {
154 if (0 != (result = __ptw32_timed_eventwait (mx->event, abstime)))
155 {
156 return result;
157 }
158 }
159 }
160 }
161 else
162 {
163 pthread_t self = pthread_self();
164
165 if ((__PTW32_INTERLOCKED_LONG) __PTW32_INTERLOCKED_COMPARE_EXCHANGE_LONG(
166 (__PTW32_INTERLOCKED_LONGPTR) &mx->lock_idx,
167 (__PTW32_INTERLOCKED_LONG) 1,
168 (__PTW32_INTERLOCKED_LONG) 0) == 0)
169 {
170 mx->recursive_count = 1;
171 mx->ownerThread = self;
172 }
173 else
174 {
175 if (pthread_equal (mx->ownerThread, self))
176 {
177 if (mx->kind == PTHREAD_MUTEX_RECURSIVE)
178 {
179 mx->recursive_count++;
180 }
181 else
182 {
183 return EDEADLK;
184 }
185 }
186 else
187 {
188 while ((__PTW32_INTERLOCKED_LONG) __PTW32_INTERLOCKED_EXCHANGE_LONG(
189 (__PTW32_INTERLOCKED_LONGPTR) &mx->lock_idx,
190 (__PTW32_INTERLOCKED_LONG) -1) != 0)
191 {
192 if (0 != (result = __ptw32_timed_eventwait (mx->event, abstime)))
193 {
194 return result;
195 }
196 }
197
198 mx->recursive_count = 1;
199 mx->ownerThread = self;
200 }
201 }
202 }
203 }
204 else
205 {
206 /*
207 * Robust types
208 * All types record the current owner thread.
209 * The mutex is added to a per thread list when ownership is acquired.
210 */
211 __ptw32_robust_state_t* statePtr = &mx->robustNode->stateInconsistent;
212
213 if ((__PTW32_INTERLOCKED_LONG)__PTW32_ROBUST_NOTRECOVERABLE == __PTW32_INTERLOCKED_EXCHANGE_ADD_LONG(
214 (__PTW32_INTERLOCKED_LONGPTR)statePtr,
215 (__PTW32_INTERLOCKED_LONG)0))
216 {
217 result = ENOTRECOVERABLE;
218 }
219 else
220 {
221 pthread_t self = pthread_self();
222
223 kind = -kind - 1; /* Convert to non-robust range */
224
225 if (PTHREAD_MUTEX_NORMAL == kind)
226 {
227 if ((__PTW32_INTERLOCKED_LONG) __PTW32_INTERLOCKED_EXCHANGE_LONG(
228 (__PTW32_INTERLOCKED_LONGPTR) &mx->lock_idx,
229 (__PTW32_INTERLOCKED_LONG) 1) != 0)
230 {
231 while (0 == (result = __ptw32_robust_mutex_inherit(mutex))
232 && (__PTW32_INTERLOCKED_LONG) __PTW32_INTERLOCKED_EXCHANGE_LONG(
233 (__PTW32_INTERLOCKED_LONGPTR) &mx->lock_idx,
234 (__PTW32_INTERLOCKED_LONG) -1) != 0)
235 {
236 if (0 != (result = __ptw32_timed_eventwait (mx->event, abstime)))
237 {
238 return result;
239 }
240 if ((__PTW32_INTERLOCKED_LONG)__PTW32_ROBUST_NOTRECOVERABLE ==
241 __PTW32_INTERLOCKED_EXCHANGE_ADD_LONG(
242 (__PTW32_INTERLOCKED_LONGPTR)statePtr,
243 (__PTW32_INTERLOCKED_LONG)0))
244 {
245 /* Unblock the next thread */
246 SetEvent(mx->event);
247 result = ENOTRECOVERABLE;
248 break;
249 }
250 }
251
252 if (0 == result || EOWNERDEAD == result)
253 {
254 /*
255 * Add mutex to the per-thread robust mutex currently-held list.
256 * If the thread terminates, all mutexes in this list will be unlocked.
257 */
258 __ptw32_robust_mutex_add(mutex, self);
259 }
260 }
261 }
262 else
263 {
264 pthread_t self = pthread_self();
265
266 if (0 == (__PTW32_INTERLOCKED_LONG) __PTW32_INTERLOCKED_COMPARE_EXCHANGE_LONG(
267 (__PTW32_INTERLOCKED_LONGPTR) &mx->lock_idx,
268 (__PTW32_INTERLOCKED_LONG) 1,
269 (__PTW32_INTERLOCKED_LONG) 0))
270 {
271 mx->recursive_count = 1;
272 /*
273 * Add mutex to the per-thread robust mutex currently-held list.
274 * If the thread terminates, all mutexes in this list will be unlocked.
275 */
276 __ptw32_robust_mutex_add(mutex, self);
277 }
278 else
279 {
280 if (pthread_equal (mx->ownerThread, self))
281 {
282 if (PTHREAD_MUTEX_RECURSIVE == kind)
283 {
284 mx->recursive_count++;
285 }
286 else
287 {
288 return EDEADLK;
289 }
290 }
291 else
292 {
293 while (0 == (result = __ptw32_robust_mutex_inherit(mutex))
294 && (__PTW32_INTERLOCKED_LONG) __PTW32_INTERLOCKED_EXCHANGE_LONG(
295 (__PTW32_INTERLOCKED_LONGPTR) &mx->lock_idx,
296 (__PTW32_INTERLOCKED_LONG) -1) != 0)
297 {
298 if (0 != (result = __ptw32_timed_eventwait (mx->event, abstime)))
299 {
300 return result;
301 }
302 }
303
304 if ((__PTW32_INTERLOCKED_LONG)__PTW32_ROBUST_NOTRECOVERABLE ==
305 __PTW32_INTERLOCKED_EXCHANGE_ADD_LONG(
306 (__PTW32_INTERLOCKED_LONGPTR)statePtr,
307 (__PTW32_INTERLOCKED_LONG)0))
308 {
309 /* Unblock the next thread */
310 SetEvent(mx->event);
311 result = ENOTRECOVERABLE;
312 }
313 else if (0 == result || EOWNERDEAD == result)
314 {
315 mx->recursive_count = 1;
316 /*
317 * Add mutex to the per-thread robust mutex currently-held list.
318 * If the thread terminates, all mutexes in this list will be unlocked.
319 */
320 __ptw32_robust_mutex_add(mutex, self);
321 }
322 }
323 }
324 }
325 }
326 }
327
328 return result;
329 }
330