1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright 2015, Joyent, Inc.
26 * Copyright (c) 2016 by Delphix. All rights reserved.
27 */
28
29 #include "lint.h"
30 #include "thr_uberdata.h"
31 #include <sys/rtpriocntl.h>
32 #include <sys/sdt.h>
33 #include <atomic.h>
34
35 #if defined(DEBUG)
36 #define INCR32(x) (((x) != UINT32_MAX)? (x)++ : 0)
37 #define INCR(x) ((x)++)
38 #define DECR(x) ((x)--)
39 #define MAXINCR(m, x) ((m < ++x)? (m = x) : 0)
40 #else
41 #define INCR32(x)
42 #define INCR(x)
43 #define DECR(x)
44 #define MAXINCR(m, x)
45 #endif
46
47 /*
48 * This mutex is initialized to be held by lwp#1.
49 * It is used to block a thread that has returned from a mutex_lock()
50 * of a LOCK_PRIO_INHERIT mutex with an unrecoverable error.
51 */
52 mutex_t stall_mutex = DEFAULTMUTEX;
53
54 static int shared_mutex_held(mutex_t *);
55 static int mutex_queuelock_adaptive(mutex_t *);
56 static void mutex_wakeup_all(mutex_t *);
57
58 /*
59 * Lock statistics support functions.
60 */
61 void
record_begin_hold(tdb_mutex_stats_t * msp)62 record_begin_hold(tdb_mutex_stats_t *msp)
63 {
64 tdb_incr(msp->mutex_lock);
65 msp->mutex_begin_hold = gethrtime();
66 }
67
68 hrtime_t
record_hold_time(tdb_mutex_stats_t * msp)69 record_hold_time(tdb_mutex_stats_t *msp)
70 {
71 hrtime_t now = gethrtime();
72
73 if (msp->mutex_begin_hold)
74 msp->mutex_hold_time += now - msp->mutex_begin_hold;
75 msp->mutex_begin_hold = 0;
76 return (now);
77 }
78
79 /*
80 * Called once at library initialization.
81 */
82 void
mutex_setup(void)83 mutex_setup(void)
84 {
85 if (set_lock_byte(&stall_mutex.mutex_lockw))
86 thr_panic("mutex_setup() cannot acquire stall_mutex");
87 stall_mutex.mutex_owner = (uintptr_t)curthread;
88 }
89
90 /*
91 * The default spin count of 1000 is experimentally determined.
92 * On sun4u machines with any number of processors it could be raised
93 * to 10,000 but that (experimentally) makes almost no difference.
94 * The environment variable:
95 * _THREAD_ADAPTIVE_SPIN=count
96 * can be used to override and set the count in the range [0 .. 1,000,000].
97 */
98 int thread_adaptive_spin = 1000;
99 uint_t thread_max_spinners = 100;
100 int thread_queue_verify = 0;
101 static int ncpus;
102
103 /*
104 * Distinguish spinning for queue locks from spinning for regular locks.
105 * We try harder to acquire queue locks by spinning.
106 * The environment variable:
107 * _THREAD_QUEUE_SPIN=count
108 * can be used to override and set the count in the range [0 .. 1,000,000].
109 */
110 int thread_queue_spin = 10000;
111
112 #define ALL_ATTRIBUTES \
113 (LOCK_RECURSIVE | LOCK_ERRORCHECK | \
114 LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT | \
115 LOCK_ROBUST)
116
117 /*
118 * 'type' can be one of USYNC_THREAD, USYNC_PROCESS, or USYNC_PROCESS_ROBUST,
119 * augmented by zero or more the flags:
120 * LOCK_RECURSIVE
121 * LOCK_ERRORCHECK
122 * LOCK_PRIO_INHERIT
123 * LOCK_PRIO_PROTECT
124 * LOCK_ROBUST
125 */
126 #pragma weak _mutex_init = mutex_init
127 /* ARGSUSED2 */
128 int
mutex_init(mutex_t * mp,int type,void * arg)129 mutex_init(mutex_t *mp, int type, void *arg)
130 {
131 int basetype = (type & ~ALL_ATTRIBUTES);
132 const pcclass_t *pccp;
133 int error = 0;
134 int ceil;
135
136 if (basetype == USYNC_PROCESS_ROBUST) {
137 /*
138 * USYNC_PROCESS_ROBUST is a deprecated historical type.
139 * We change it into (USYNC_PROCESS | LOCK_ROBUST) but
140 * retain the USYNC_PROCESS_ROBUST flag so we can return
141 * ELOCKUNMAPPED when necessary (only USYNC_PROCESS_ROBUST
142 * mutexes will ever draw ELOCKUNMAPPED).
143 */
144 type |= (USYNC_PROCESS | LOCK_ROBUST);
145 basetype = USYNC_PROCESS;
146 }
147
148 if (type & LOCK_PRIO_PROTECT)
149 pccp = get_info_by_policy(SCHED_FIFO);
150 if ((basetype != USYNC_THREAD && basetype != USYNC_PROCESS) ||
151 (type & (LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT))
152 == (LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT) ||
153 ((type & LOCK_PRIO_PROTECT) &&
154 ((ceil = *(int *)arg) < pccp->pcc_primin ||
155 ceil > pccp->pcc_primax))) {
156 error = EINVAL;
157 } else if (type & LOCK_ROBUST) {
158 /*
159 * Callers of mutex_init() with the LOCK_ROBUST attribute
160 * are required to pass an initially all-zero mutex.
161 * Multiple calls to mutex_init() are allowed; all but
162 * the first return EBUSY. A call to mutex_init() is
163 * allowed to make an inconsistent robust lock consistent
164 * (for historical usage, even though the proper interface
165 * for this is mutex_consistent()). Note that we use
166 * atomic_or_16() to set the LOCK_INITED flag so as
167 * not to disturb surrounding bits (LOCK_OWNERDEAD, etc).
168 */
169 if (!(mp->mutex_flag & LOCK_INITED)) {
170 mp->mutex_type = (uint8_t)type;
171 atomic_or_16(&mp->mutex_flag, LOCK_INITED);
172 mp->mutex_magic = MUTEX_MAGIC;
173 } else if (type != mp->mutex_type ||
174 ((type & LOCK_PRIO_PROTECT) && mp->mutex_ceiling != ceil)) {
175 error = EINVAL;
176 } else if (mutex_consistent(mp) != 0) {
177 error = EBUSY;
178 }
179 /* register a process robust mutex with the kernel */
180 if (basetype == USYNC_PROCESS)
181 register_lock(mp);
182 } else {
183 (void) memset(mp, 0, sizeof (*mp));
184 mp->mutex_type = (uint8_t)type;
185 mp->mutex_flag = LOCK_INITED;
186 mp->mutex_magic = MUTEX_MAGIC;
187 }
188
189 if (error == 0 && (type & LOCK_PRIO_PROTECT)) {
190 mp->mutex_ceiling = ceil;
191 }
192
193 /*
194 * This should be at the beginning of the function,
195 * but for the sake of old broken applications that
196 * do not have proper alignment for their mutexes
197 * (and don't check the return code from mutex_init),
198 * we put it here, after initializing the mutex regardless.
199 */
200 if (error == 0 &&
201 ((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) &&
202 curthread->ul_misaligned == 0)
203 error = EINVAL;
204
205 return (error);
206 }
207
208 /*
209 * Delete mp from list of ceiling mutexes owned by curthread.
210 * Return 1 if the head of the chain was updated.
211 */
212 int
_ceil_mylist_del(mutex_t * mp)213 _ceil_mylist_del(mutex_t *mp)
214 {
215 ulwp_t *self = curthread;
216 mxchain_t **mcpp;
217 mxchain_t *mcp;
218
219 for (mcpp = &self->ul_mxchain;
220 (mcp = *mcpp) != NULL;
221 mcpp = &mcp->mxchain_next) {
222 if (mcp->mxchain_mx == mp) {
223 *mcpp = mcp->mxchain_next;
224 lfree(mcp, sizeof (*mcp));
225 return (mcpp == &self->ul_mxchain);
226 }
227 }
228 return (0);
229 }
230
231 /*
232 * Add mp to the list of ceiling mutexes owned by curthread.
233 * Return ENOMEM if no memory could be allocated.
234 */
235 int
_ceil_mylist_add(mutex_t * mp)236 _ceil_mylist_add(mutex_t *mp)
237 {
238 ulwp_t *self = curthread;
239 mxchain_t *mcp;
240
241 if ((mcp = lmalloc(sizeof (*mcp))) == NULL)
242 return (ENOMEM);
243 mcp->mxchain_mx = mp;
244 mcp->mxchain_next = self->ul_mxchain;
245 self->ul_mxchain = mcp;
246 return (0);
247 }
248
249 /*
250 * Helper function for _ceil_prio_inherit() and _ceil_prio_waive(), below.
251 */
252 static void
set_rt_priority(ulwp_t * self,int prio)253 set_rt_priority(ulwp_t *self, int prio)
254 {
255 pcparms_t pcparm;
256
257 pcparm.pc_cid = self->ul_rtclassid;
258 ((rtparms_t *)pcparm.pc_clparms)->rt_tqnsecs = RT_NOCHANGE;
259 ((rtparms_t *)pcparm.pc_clparms)->rt_pri = prio;
260 (void) priocntl(P_LWPID, self->ul_lwpid, PC_SETPARMS, &pcparm);
261 }
262
263 /*
264 * Inherit priority from ceiling.
265 * This changes the effective priority, not the assigned priority.
266 */
267 void
_ceil_prio_inherit(int prio)268 _ceil_prio_inherit(int prio)
269 {
270 ulwp_t *self = curthread;
271
272 self->ul_epri = prio;
273 set_rt_priority(self, prio);
274 }
275
276 /*
277 * Waive inherited ceiling priority. Inherit from head of owned ceiling locks
278 * if holding at least one ceiling lock. If no ceiling locks are held at this
279 * point, disinherit completely, reverting back to assigned priority.
280 */
281 void
_ceil_prio_waive(void)282 _ceil_prio_waive(void)
283 {
284 ulwp_t *self = curthread;
285 mxchain_t *mcp = self->ul_mxchain;
286 int prio;
287
288 if (mcp == NULL) {
289 prio = self->ul_pri;
290 self->ul_epri = 0;
291 } else {
292 prio = mcp->mxchain_mx->mutex_ceiling;
293 self->ul_epri = prio;
294 }
295 set_rt_priority(self, prio);
296 }
297
298 /*
299 * Clear the lock byte. Retain the waiters byte and the spinners byte.
300 * Return the old value of the lock word.
301 */
302 static uint32_t
clear_lockbyte(volatile uint32_t * lockword)303 clear_lockbyte(volatile uint32_t *lockword)
304 {
305 uint32_t old;
306 uint32_t new;
307
308 do {
309 old = *lockword;
310 new = old & ~LOCKMASK;
311 } while (atomic_cas_32(lockword, old, new) != old);
312
313 return (old);
314 }
315
316 /*
317 * Same as clear_lockbyte(), but operates on mutex_lockword64.
318 * The mutex_ownerpid field is cleared along with the lock byte.
319 */
320 static uint64_t
clear_lockbyte64(volatile uint64_t * lockword64)321 clear_lockbyte64(volatile uint64_t *lockword64)
322 {
323 uint64_t old;
324 uint64_t new;
325
326 do {
327 old = *lockword64;
328 new = old & ~LOCKMASK64;
329 } while (atomic_cas_64(lockword64, old, new) != old);
330
331 return (old);
332 }
333
334 /*
335 * Similar to set_lock_byte(), which only tries to set the lock byte.
336 * Here, we attempt to set the lock byte AND the mutex_ownerpid, keeping
337 * the remaining bytes constant. This atomic operation is required for the
338 * correctness of process-shared robust locks, otherwise there would be
339 * a window or vulnerability in which the lock byte had been set but the
340 * mutex_ownerpid had not yet been set. If the process were to die in
341 * this window of vulnerability (due to some other thread calling exit()
342 * or the process receiving a fatal signal), the mutex would be left locked
343 * but without a process-ID to determine which process was holding the lock.
344 * The kernel would then be unable to mark the robust mutex as LOCK_OWNERDEAD
345 * when the process died. For all other cases of process-shared locks, this
346 * operation is just a convenience, for the sake of common code.
347 *
348 * This operation requires process-shared robust locks to be properly
349 * aligned on an 8-byte boundary, at least on sparc machines, lest the
350 * operation incur an alignment fault. This is automatic when locks
351 * are declared properly using the mutex_t or pthread_mutex_t data types
352 * and the application does not allocate dynamic memory on less than an
353 * 8-byte boundary. See the 'horrible hack' comments below for cases
354 * dealing with such broken applications.
355 */
356 static int
set_lock_byte64(volatile uint64_t * lockword64,pid_t ownerpid)357 set_lock_byte64(volatile uint64_t *lockword64, pid_t ownerpid)
358 {
359 uint64_t old;
360 uint64_t new;
361
362 old = *lockword64 & ~LOCKMASK64;
363 new = old | ((uint64_t)(uint_t)ownerpid << PIDSHIFT) | LOCKBYTE64;
364 if (atomic_cas_64(lockword64, old, new) == old)
365 return (LOCKCLEAR);
366
367 return (LOCKSET);
368 }
369
370 /*
371 * Increment the spinners count in the mutex lock word.
372 * Return 0 on success. Return -1 if the count would overflow.
373 */
374 static int
spinners_incr(volatile uint32_t * lockword,uint8_t max_spinners)375 spinners_incr(volatile uint32_t *lockword, uint8_t max_spinners)
376 {
377 uint32_t old;
378 uint32_t new;
379
380 do {
381 old = *lockword;
382 if (((old & SPINNERMASK) >> SPINNERSHIFT) >= max_spinners)
383 return (-1);
384 new = old + (1 << SPINNERSHIFT);
385 } while (atomic_cas_32(lockword, old, new) != old);
386
387 return (0);
388 }
389
390 /*
391 * Decrement the spinners count in the mutex lock word.
392 * Return the new value of the lock word.
393 */
394 static uint32_t
spinners_decr(volatile uint32_t * lockword)395 spinners_decr(volatile uint32_t *lockword)
396 {
397 uint32_t old;
398 uint32_t new;
399
400 do {
401 new = old = *lockword;
402 if (new & SPINNERMASK)
403 new -= (1 << SPINNERSHIFT);
404 } while (atomic_cas_32(lockword, old, new) != old);
405
406 return (new);
407 }
408
409 /*
410 * Non-preemptive spin locks. Used by queue_lock().
411 * No lock statistics are gathered for these locks.
412 * No DTrace probes are provided for these locks.
413 */
414 void
spin_lock_set(mutex_t * mp)415 spin_lock_set(mutex_t *mp)
416 {
417 ulwp_t *self = curthread;
418
419 no_preempt(self);
420 if (set_lock_byte(&mp->mutex_lockw) == 0) {
421 mp->mutex_owner = (uintptr_t)self;
422 return;
423 }
424 /*
425 * Spin for a while, attempting to acquire the lock.
426 */
427 INCR32(self->ul_spin_lock_spin);
428 if (mutex_queuelock_adaptive(mp) == 0 ||
429 set_lock_byte(&mp->mutex_lockw) == 0) {
430 mp->mutex_owner = (uintptr_t)self;
431 return;
432 }
433 /*
434 * Try harder if we were previously at a no premption level.
435 */
436 if (self->ul_preempt > 1) {
437 INCR32(self->ul_spin_lock_spin2);
438 if (mutex_queuelock_adaptive(mp) == 0 ||
439 set_lock_byte(&mp->mutex_lockw) == 0) {
440 mp->mutex_owner = (uintptr_t)self;
441 return;
442 }
443 }
444 /*
445 * Give up and block in the kernel for the mutex.
446 */
447 INCR32(self->ul_spin_lock_sleep);
448 (void) ___lwp_mutex_timedlock(mp, NULL, self);
449 }
450
451 void
spin_lock_clear(mutex_t * mp)452 spin_lock_clear(mutex_t *mp)
453 {
454 ulwp_t *self = curthread;
455
456 mp->mutex_owner = 0;
457 if (atomic_swap_32(&mp->mutex_lockword, 0) & WAITERMASK) {
458 (void) ___lwp_mutex_wakeup(mp, 0);
459 INCR32(self->ul_spin_lock_wakeup);
460 }
461 preempt(self);
462 }
463
464 /*
465 * Allocate the sleep queue hash table.
466 */
467 void
queue_alloc(void)468 queue_alloc(void)
469 {
470 ulwp_t *self = curthread;
471 uberdata_t *udp = self->ul_uberdata;
472 queue_head_t *qp;
473 void *data;
474 int i;
475
476 /*
477 * No locks are needed; we call here only when single-threaded.
478 */
479 ASSERT(self == udp->ulwp_one);
480 ASSERT(!udp->uberflags.uf_mt);
481 if ((data = mmap(NULL, 2 * QHASHSIZE * sizeof (queue_head_t),
482 PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, (off_t)0))
483 == MAP_FAILED)
484 thr_panic("cannot allocate thread queue_head table");
485 udp->queue_head = qp = (queue_head_t *)data;
486 for (i = 0; i < 2 * QHASHSIZE; qp++, i++) {
487 qp->qh_type = (i < QHASHSIZE)? MX : CV;
488 qp->qh_lock.mutex_flag = LOCK_INITED;
489 qp->qh_lock.mutex_magic = MUTEX_MAGIC;
490 qp->qh_hlist = &qp->qh_def_root;
491 #if defined(DEBUG)
492 qp->qh_hlen = 1;
493 qp->qh_hmax = 1;
494 #endif
495 }
496 }
497
498 #if defined(DEBUG)
499
500 /*
501 * Debugging: verify correctness of a sleep queue.
502 */
503 void
QVERIFY(queue_head_t * qp)504 QVERIFY(queue_head_t *qp)
505 {
506 ulwp_t *self = curthread;
507 uberdata_t *udp = self->ul_uberdata;
508 queue_root_t *qrp;
509 ulwp_t *ulwp;
510 ulwp_t *prev;
511 uint_t index;
512 uint32_t cnt;
513 char qtype;
514 void *wchan;
515
516 ASSERT(qp >= udp->queue_head && (qp - udp->queue_head) < 2 * QHASHSIZE);
517 ASSERT(MUTEX_OWNED(&qp->qh_lock, self));
518 for (cnt = 0, qrp = qp->qh_hlist; qrp != NULL; qrp = qrp->qr_next) {
519 cnt++;
520 ASSERT((qrp->qr_head != NULL && qrp->qr_tail != NULL) ||
521 (qrp->qr_head == NULL && qrp->qr_tail == NULL));
522 }
523 ASSERT(qp->qh_hlen == cnt && qp->qh_hmax >= cnt);
524 qtype = ((qp - udp->queue_head) < QHASHSIZE)? MX : CV;
525 ASSERT(qp->qh_type == qtype);
526 if (!thread_queue_verify)
527 return;
528 /* real expensive stuff, only for _THREAD_QUEUE_VERIFY */
529 for (cnt = 0, qrp = qp->qh_hlist; qrp != NULL; qrp = qrp->qr_next) {
530 for (prev = NULL, ulwp = qrp->qr_head; ulwp != NULL;
531 prev = ulwp, ulwp = ulwp->ul_link) {
532 cnt++;
533 if (ulwp->ul_writer)
534 ASSERT(prev == NULL || prev->ul_writer);
535 ASSERT(ulwp->ul_qtype == qtype);
536 ASSERT(ulwp->ul_wchan != NULL);
537 ASSERT(ulwp->ul_sleepq == qp);
538 wchan = ulwp->ul_wchan;
539 ASSERT(qrp->qr_wchan == wchan);
540 index = QUEUE_HASH(wchan, qtype);
541 ASSERT(&udp->queue_head[index] == qp);
542 }
543 ASSERT(qrp->qr_tail == prev);
544 }
545 ASSERT(qp->qh_qlen == cnt);
546 }
547
548 #else /* DEBUG */
549
550 #define QVERIFY(qp)
551
552 #endif /* DEBUG */
553
554 /*
555 * Acquire a queue head.
556 */
557 queue_head_t *
queue_lock(void * wchan,int qtype)558 queue_lock(void *wchan, int qtype)
559 {
560 uberdata_t *udp = curthread->ul_uberdata;
561 queue_head_t *qp;
562 queue_root_t *qrp;
563
564 ASSERT(qtype == MX || qtype == CV);
565
566 /*
567 * It is possible that we could be called while still single-threaded.
568 * If so, we call queue_alloc() to allocate the queue_head[] array.
569 */
570 if ((qp = udp->queue_head) == NULL) {
571 queue_alloc();
572 qp = udp->queue_head;
573 }
574 qp += QUEUE_HASH(wchan, qtype);
575 spin_lock_set(&qp->qh_lock);
576 for (qrp = qp->qh_hlist; qrp != NULL; qrp = qrp->qr_next)
577 if (qrp->qr_wchan == wchan)
578 break;
579 if (qrp == NULL && qp->qh_def_root.qr_head == NULL) {
580 /* the default queue root is available; use it */
581 qrp = &qp->qh_def_root;
582 qrp->qr_wchan = wchan;
583 ASSERT(qrp->qr_next == NULL);
584 ASSERT(qrp->qr_tail == NULL &&
585 qrp->qr_rtcount == 0 && qrp->qr_qlen == 0);
586 }
587 qp->qh_wchan = wchan; /* valid until queue_unlock() is called */
588 qp->qh_root = qrp; /* valid until queue_unlock() is called */
589 INCR32(qp->qh_lockcount);
590 QVERIFY(qp);
591 return (qp);
592 }
593
594 /*
595 * Release a queue head.
596 */
597 void
queue_unlock(queue_head_t * qp)598 queue_unlock(queue_head_t *qp)
599 {
600 QVERIFY(qp);
601 spin_lock_clear(&qp->qh_lock);
602 }
603
604 /*
605 * For rwlock queueing, we must queue writers ahead of readers of the
606 * same priority. We do this by making writers appear to have a half
607 * point higher priority for purposes of priority comparisons below.
608 */
609 #define CMP_PRIO(ulwp) ((real_priority(ulwp) << 1) + (ulwp)->ul_writer)
610
611 void
enqueue(queue_head_t * qp,ulwp_t * ulwp,int force_fifo)612 enqueue(queue_head_t *qp, ulwp_t *ulwp, int force_fifo)
613 {
614 queue_root_t *qrp;
615 ulwp_t **ulwpp;
616 ulwp_t *next;
617 int pri = CMP_PRIO(ulwp);
618
619 ASSERT(MUTEX_OWNED(&qp->qh_lock, curthread));
620 ASSERT(ulwp->ul_sleepq != qp);
621
622 if ((qrp = qp->qh_root) == NULL) {
623 /* use the thread's queue root for the linkage */
624 qrp = &ulwp->ul_queue_root;
625 qrp->qr_next = qp->qh_hlist;
626 qrp->qr_prev = NULL;
627 qrp->qr_head = NULL;
628 qrp->qr_tail = NULL;
629 qrp->qr_wchan = qp->qh_wchan;
630 qrp->qr_rtcount = 0;
631 qrp->qr_qlen = 0;
632 qrp->qr_qmax = 0;
633 qp->qh_hlist->qr_prev = qrp;
634 qp->qh_hlist = qrp;
635 qp->qh_root = qrp;
636 MAXINCR(qp->qh_hmax, qp->qh_hlen);
637 }
638
639 /*
640 * LIFO queue ordering is unfair and can lead to starvation,
641 * but it gives better performance for heavily contended locks.
642 * We use thread_queue_fifo (range is 0..8) to determine
643 * the frequency of FIFO vs LIFO queuing:
644 * 0 : every 256th time (almost always LIFO)
645 * 1 : every 128th time
646 * 2 : every 64th time
647 * 3 : every 32nd time
648 * 4 : every 16th time (the default value, mostly LIFO)
649 * 5 : every 8th time
650 * 6 : every 4th time
651 * 7 : every 2nd time
652 * 8 : every time (never LIFO, always FIFO)
653 * Note that there is always some degree of FIFO ordering.
654 * This breaks live lock conditions that occur in applications
655 * that are written assuming (incorrectly) that threads acquire
656 * locks fairly, that is, in roughly round-robin order.
657 * In any event, the queue is maintained in kernel priority order.
658 *
659 * If force_fifo is non-zero, fifo queueing is forced.
660 * SUSV3 requires this for semaphores.
661 */
662 if (qrp->qr_head == NULL) {
663 /*
664 * The queue is empty. LIFO/FIFO doesn't matter.
665 */
666 ASSERT(qrp->qr_tail == NULL);
667 ulwpp = &qrp->qr_head;
668 } else if (force_fifo |
669 (((++qp->qh_qcnt << curthread->ul_queue_fifo) & 0xff) == 0)) {
670 /*
671 * Enqueue after the last thread whose priority is greater
672 * than or equal to the priority of the thread being queued.
673 * Attempt first to go directly onto the tail of the queue.
674 */
675 if (pri <= CMP_PRIO(qrp->qr_tail))
676 ulwpp = &qrp->qr_tail->ul_link;
677 else {
678 for (ulwpp = &qrp->qr_head; (next = *ulwpp) != NULL;
679 ulwpp = &next->ul_link)
680 if (pri > CMP_PRIO(next))
681 break;
682 }
683 } else {
684 /*
685 * Enqueue before the first thread whose priority is less
686 * than or equal to the priority of the thread being queued.
687 * Hopefully we can go directly onto the head of the queue.
688 */
689 for (ulwpp = &qrp->qr_head; (next = *ulwpp) != NULL;
690 ulwpp = &next->ul_link)
691 if (pri >= CMP_PRIO(next))
692 break;
693 }
694 if ((ulwp->ul_link = *ulwpp) == NULL)
695 qrp->qr_tail = ulwp;
696 *ulwpp = ulwp;
697
698 ulwp->ul_sleepq = qp;
699 ulwp->ul_wchan = qp->qh_wchan;
700 ulwp->ul_qtype = qp->qh_type;
701 if ((ulwp->ul_schedctl != NULL &&
702 ulwp->ul_schedctl->sc_cid == ulwp->ul_rtclassid) |
703 ulwp->ul_pilocks) {
704 ulwp->ul_rtqueued = 1;
705 qrp->qr_rtcount++;
706 }
707 MAXINCR(qrp->qr_qmax, qrp->qr_qlen);
708 MAXINCR(qp->qh_qmax, qp->qh_qlen);
709 }
710
711 /*
712 * Helper function for queue_slot() and queue_slot_rt().
713 * Try to find a non-suspended thread on the queue.
714 */
715 static ulwp_t **
queue_slot_runnable(ulwp_t ** ulwpp,ulwp_t ** prevp,int rt)716 queue_slot_runnable(ulwp_t **ulwpp, ulwp_t **prevp, int rt)
717 {
718 ulwp_t *ulwp;
719 ulwp_t **foundpp = NULL;
720 int priority = -1;
721 ulwp_t *prev;
722 int tpri;
723
724 for (prev = NULL;
725 (ulwp = *ulwpp) != NULL;
726 prev = ulwp, ulwpp = &ulwp->ul_link) {
727 if (ulwp->ul_stop) /* skip suspended threads */
728 continue;
729 tpri = rt? CMP_PRIO(ulwp) : 0;
730 if (tpri > priority) {
731 foundpp = ulwpp;
732 *prevp = prev;
733 priority = tpri;
734 if (!rt)
735 break;
736 }
737 }
738 return (foundpp);
739 }
740
741 /*
742 * For real-time, we search the entire queue because the dispatch
743 * (kernel) priorities may have changed since enqueueing.
744 */
745 static ulwp_t **
queue_slot_rt(ulwp_t ** ulwpp_org,ulwp_t ** prevp)746 queue_slot_rt(ulwp_t **ulwpp_org, ulwp_t **prevp)
747 {
748 ulwp_t **ulwpp = ulwpp_org;
749 ulwp_t *ulwp = *ulwpp;
750 ulwp_t **foundpp = ulwpp;
751 int priority = CMP_PRIO(ulwp);
752 ulwp_t *prev;
753 int tpri;
754
755 for (prev = ulwp, ulwpp = &ulwp->ul_link;
756 (ulwp = *ulwpp) != NULL;
757 prev = ulwp, ulwpp = &ulwp->ul_link) {
758 tpri = CMP_PRIO(ulwp);
759 if (tpri > priority) {
760 foundpp = ulwpp;
761 *prevp = prev;
762 priority = tpri;
763 }
764 }
765 ulwp = *foundpp;
766
767 /*
768 * Try not to return a suspended thread.
769 * This mimics the old libthread's behavior.
770 */
771 if (ulwp->ul_stop &&
772 (ulwpp = queue_slot_runnable(ulwpp_org, prevp, 1)) != NULL) {
773 foundpp = ulwpp;
774 ulwp = *foundpp;
775 }
776 ulwp->ul_rt = 1;
777 return (foundpp);
778 }
779
780 ulwp_t **
queue_slot(queue_head_t * qp,ulwp_t ** prevp,int * more)781 queue_slot(queue_head_t *qp, ulwp_t **prevp, int *more)
782 {
783 queue_root_t *qrp;
784 ulwp_t **ulwpp;
785 ulwp_t *ulwp;
786 int rt;
787
788 ASSERT(MUTEX_OWNED(&qp->qh_lock, curthread));
789
790 if ((qrp = qp->qh_root) == NULL || (ulwp = qrp->qr_head) == NULL) {
791 *more = 0;
792 return (NULL); /* no lwps on the queue */
793 }
794 rt = (qrp->qr_rtcount != 0);
795 *prevp = NULL;
796 if (ulwp->ul_link == NULL) { /* only one lwp on the queue */
797 *more = 0;
798 ulwp->ul_rt = rt;
799 return (&qrp->qr_head);
800 }
801 *more = 1;
802
803 if (rt) /* real-time queue */
804 return (queue_slot_rt(&qrp->qr_head, prevp));
805 /*
806 * Try not to return a suspended thread.
807 * This mimics the old libthread's behavior.
808 */
809 if (ulwp->ul_stop &&
810 (ulwpp = queue_slot_runnable(&qrp->qr_head, prevp, 0)) != NULL) {
811 ulwp = *ulwpp;
812 ulwp->ul_rt = 0;
813 return (ulwpp);
814 }
815 /*
816 * The common case; just pick the first thread on the queue.
817 */
818 ulwp->ul_rt = 0;
819 return (&qrp->qr_head);
820 }
821
822 /*
823 * Common code for unlinking an lwp from a user-level sleep queue.
824 */
825 void
queue_unlink(queue_head_t * qp,ulwp_t ** ulwpp,ulwp_t * prev)826 queue_unlink(queue_head_t *qp, ulwp_t **ulwpp, ulwp_t *prev)
827 {
828 queue_root_t *qrp = qp->qh_root;
829 queue_root_t *nqrp;
830 ulwp_t *ulwp = *ulwpp;
831 ulwp_t *next;
832
833 ASSERT(MUTEX_OWNED(&qp->qh_lock, curthread));
834 ASSERT(qp->qh_wchan != NULL && ulwp->ul_wchan == qp->qh_wchan);
835
836 DECR(qp->qh_qlen);
837 DECR(qrp->qr_qlen);
838 if (ulwp->ul_rtqueued) {
839 ulwp->ul_rtqueued = 0;
840 qrp->qr_rtcount--;
841 }
842 next = ulwp->ul_link;
843 *ulwpp = next;
844 ulwp->ul_link = NULL;
845 if (qrp->qr_tail == ulwp)
846 qrp->qr_tail = prev;
847 if (qrp == &ulwp->ul_queue_root) {
848 /*
849 * We can't continue to use the unlinked thread's
850 * queue root for the linkage.
851 */
852 queue_root_t *qr_next = qrp->qr_next;
853 queue_root_t *qr_prev = qrp->qr_prev;
854
855 if (qrp->qr_tail) {
856 /* switch to using the last thread's queue root */
857 ASSERT(qrp->qr_qlen != 0);
858 nqrp = &qrp->qr_tail->ul_queue_root;
859 *nqrp = *qrp;
860 if (qr_next)
861 qr_next->qr_prev = nqrp;
862 if (qr_prev)
863 qr_prev->qr_next = nqrp;
864 else
865 qp->qh_hlist = nqrp;
866 qp->qh_root = nqrp;
867 } else {
868 /* empty queue root; just delete from the hash list */
869 ASSERT(qrp->qr_qlen == 0);
870 if (qr_next)
871 qr_next->qr_prev = qr_prev;
872 if (qr_prev)
873 qr_prev->qr_next = qr_next;
874 else
875 qp->qh_hlist = qr_next;
876 qp->qh_root = NULL;
877 DECR(qp->qh_hlen);
878 }
879 }
880 }
881
882 ulwp_t *
dequeue(queue_head_t * qp,int * more)883 dequeue(queue_head_t *qp, int *more)
884 {
885 ulwp_t **ulwpp;
886 ulwp_t *ulwp;
887 ulwp_t *prev;
888
889 if ((ulwpp = queue_slot(qp, &prev, more)) == NULL)
890 return (NULL);
891 ulwp = *ulwpp;
892 queue_unlink(qp, ulwpp, prev);
893 ulwp->ul_sleepq = NULL;
894 ulwp->ul_wchan = NULL;
895 return (ulwp);
896 }
897
898 /*
899 * Return a pointer to the highest priority thread sleeping on wchan.
900 */
901 ulwp_t *
queue_waiter(queue_head_t * qp)902 queue_waiter(queue_head_t *qp)
903 {
904 ulwp_t **ulwpp;
905 ulwp_t *prev;
906 int more;
907
908 if ((ulwpp = queue_slot(qp, &prev, &more)) == NULL)
909 return (NULL);
910 return (*ulwpp);
911 }
912
913 int
dequeue_self(queue_head_t * qp)914 dequeue_self(queue_head_t *qp)
915 {
916 ulwp_t *self = curthread;
917 queue_root_t *qrp;
918 ulwp_t **ulwpp;
919 ulwp_t *ulwp;
920 ulwp_t *prev;
921 int found = 0;
922
923 ASSERT(MUTEX_OWNED(&qp->qh_lock, self));
924
925 /* find self on the sleep queue */
926 if ((qrp = qp->qh_root) != NULL) {
927 for (prev = NULL, ulwpp = &qrp->qr_head;
928 (ulwp = *ulwpp) != NULL;
929 prev = ulwp, ulwpp = &ulwp->ul_link) {
930 if (ulwp == self) {
931 queue_unlink(qp, ulwpp, prev);
932 self->ul_cvmutex = NULL;
933 self->ul_sleepq = NULL;
934 self->ul_wchan = NULL;
935 found = 1;
936 break;
937 }
938 }
939 }
940
941 if (!found)
942 thr_panic("dequeue_self(): curthread not found on queue");
943
944 return ((qrp = qp->qh_root) != NULL && qrp->qr_head != NULL);
945 }
946
947 /*
948 * Called from call_user_handler() and _thrp_suspend() to take
949 * ourself off of our sleep queue so we can grab locks.
950 */
951 void
unsleep_self(void)952 unsleep_self(void)
953 {
954 ulwp_t *self = curthread;
955 queue_head_t *qp;
956
957 /*
958 * Calling enter_critical()/exit_critical() here would lead
959 * to recursion. Just manipulate self->ul_critical directly.
960 */
961 self->ul_critical++;
962 while (self->ul_sleepq != NULL) {
963 qp = queue_lock(self->ul_wchan, self->ul_qtype);
964 /*
965 * We may have been moved from a CV queue to a
966 * mutex queue while we were attempting queue_lock().
967 * If so, just loop around and try again.
968 * dequeue_self() clears self->ul_sleepq.
969 */
970 if (qp == self->ul_sleepq)
971 (void) dequeue_self(qp);
972 queue_unlock(qp);
973 }
974 self->ul_writer = 0;
975 self->ul_critical--;
976 }
977
978 /*
979 * Common code for calling the the ___lwp_mutex_timedlock() system call.
980 * Returns with mutex_owner and mutex_ownerpid set correctly.
981 */
982 static int
mutex_lock_kernel(mutex_t * mp,timespec_t * tsp,tdb_mutex_stats_t * msp)983 mutex_lock_kernel(mutex_t *mp, timespec_t *tsp, tdb_mutex_stats_t *msp)
984 {
985 ulwp_t *self = curthread;
986 uberdata_t *udp = self->ul_uberdata;
987 int mtype = mp->mutex_type;
988 hrtime_t begin_sleep;
989 int acquired;
990 int error;
991
992 self->ul_sp = stkptr();
993 self->ul_wchan = mp;
994 if (__td_event_report(self, TD_SLEEP, udp)) {
995 self->ul_td_evbuf.eventnum = TD_SLEEP;
996 self->ul_td_evbuf.eventdata = mp;
997 tdb_event(TD_SLEEP, udp);
998 }
999 if (msp) {
1000 tdb_incr(msp->mutex_sleep);
1001 begin_sleep = gethrtime();
1002 }
1003
1004 DTRACE_PROBE1(plockstat, mutex__block, mp);
1005
1006 for (;;) {
1007 /*
1008 * A return value of EOWNERDEAD or ELOCKUNMAPPED
1009 * means we successfully acquired the lock.
1010 */
1011 if ((error = ___lwp_mutex_timedlock(mp, tsp, self)) != 0 &&
1012 error != EOWNERDEAD && error != ELOCKUNMAPPED) {
1013 acquired = 0;
1014 break;
1015 }
1016
1017 if (mtype & USYNC_PROCESS) {
1018 /*
1019 * Defend against forkall(). We may be the child,
1020 * in which case we don't actually own the mutex.
1021 */
1022 enter_critical(self);
1023 if (mp->mutex_ownerpid == udp->pid) {
1024 exit_critical(self);
1025 acquired = 1;
1026 break;
1027 }
1028 exit_critical(self);
1029 } else {
1030 acquired = 1;
1031 break;
1032 }
1033 }
1034
1035 if (msp)
1036 msp->mutex_sleep_time += gethrtime() - begin_sleep;
1037 self->ul_wchan = NULL;
1038 self->ul_sp = 0;
1039
1040 if (acquired) {
1041 ASSERT(mp->mutex_owner == (uintptr_t)self);
1042 DTRACE_PROBE2(plockstat, mutex__blocked, mp, 1);
1043 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
1044 } else {
1045 DTRACE_PROBE2(plockstat, mutex__blocked, mp, 0);
1046 DTRACE_PROBE2(plockstat, mutex__error, mp, error);
1047 }
1048
1049 return (error);
1050 }
1051
1052 /*
1053 * Common code for calling the ___lwp_mutex_trylock() system call.
1054 * Returns with mutex_owner and mutex_ownerpid set correctly.
1055 */
1056 int
mutex_trylock_kernel(mutex_t * mp)1057 mutex_trylock_kernel(mutex_t *mp)
1058 {
1059 ulwp_t *self = curthread;
1060 uberdata_t *udp = self->ul_uberdata;
1061 int mtype = mp->mutex_type;
1062 int error;
1063 int acquired;
1064
1065 for (;;) {
1066 /*
1067 * A return value of EOWNERDEAD or ELOCKUNMAPPED
1068 * means we successfully acquired the lock.
1069 */
1070 if ((error = ___lwp_mutex_trylock(mp, self)) != 0 &&
1071 error != EOWNERDEAD && error != ELOCKUNMAPPED) {
1072 acquired = 0;
1073 break;
1074 }
1075
1076 if (mtype & USYNC_PROCESS) {
1077 /*
1078 * Defend against forkall(). We may be the child,
1079 * in which case we don't actually own the mutex.
1080 */
1081 enter_critical(self);
1082 if (mp->mutex_ownerpid == udp->pid) {
1083 exit_critical(self);
1084 acquired = 1;
1085 break;
1086 }
1087 exit_critical(self);
1088 } else {
1089 acquired = 1;
1090 break;
1091 }
1092 }
1093
1094 if (acquired) {
1095 ASSERT(mp->mutex_owner == (uintptr_t)self);
1096 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
1097 } else if (error != EBUSY) {
1098 DTRACE_PROBE2(plockstat, mutex__error, mp, error);
1099 }
1100
1101 return (error);
1102 }
1103
1104 volatile sc_shared_t *
setup_schedctl(void)1105 setup_schedctl(void)
1106 {
1107 ulwp_t *self = curthread;
1108 volatile sc_shared_t *scp;
1109 sc_shared_t *tmp;
1110
1111 if ((scp = self->ul_schedctl) == NULL && /* no shared state yet */
1112 !self->ul_vfork && /* not a child of vfork() */
1113 !self->ul_schedctl_called) { /* haven't been called before */
1114 enter_critical(self);
1115 self->ul_schedctl_called = &self->ul_uberdata->uberflags;
1116 if ((tmp = __schedctl()) != (sc_shared_t *)(-1))
1117 self->ul_schedctl = scp = tmp;
1118 exit_critical(self);
1119 }
1120 /*
1121 * Unless the call to setup_schedctl() is surrounded
1122 * by enter_critical()/exit_critical(), the address
1123 * we are returning could be invalid due to a forkall()
1124 * having occurred in another thread.
1125 */
1126 return (scp);
1127 }
1128
1129 /*
1130 * Interfaces from libsched, incorporated into libc.
1131 * libsched.so.1 is now a filter library onto libc.
1132 */
1133 #pragma weak schedctl_lookup = schedctl_init
1134 schedctl_t *
schedctl_init(void)1135 schedctl_init(void)
1136 {
1137 volatile sc_shared_t *scp = setup_schedctl();
1138 return ((scp == NULL)? NULL : (schedctl_t *)&scp->sc_preemptctl);
1139 }
1140
1141 void
schedctl_exit(void)1142 schedctl_exit(void)
1143 {
1144 }
1145
1146 /*
1147 * Contract private interface for java.
1148 * Set up the schedctl data if it doesn't exist yet.
1149 * Return a pointer to the pointer to the schedctl data.
1150 */
1151 volatile sc_shared_t *volatile *
_thr_schedctl(void)1152 _thr_schedctl(void)
1153 {
1154 ulwp_t *self = curthread;
1155 volatile sc_shared_t *volatile *ptr;
1156
1157 if (self->ul_vfork)
1158 return (NULL);
1159 if (*(ptr = &self->ul_schedctl) == NULL)
1160 (void) setup_schedctl();
1161 return (ptr);
1162 }
1163
1164 /*
1165 * Block signals and attempt to block preemption.
1166 * no_preempt()/preempt() must be used in pairs but can be nested.
1167 */
1168 void
no_preempt(ulwp_t * self)1169 no_preempt(ulwp_t *self)
1170 {
1171 volatile sc_shared_t *scp;
1172
1173 if (self->ul_preempt++ == 0) {
1174 enter_critical(self);
1175 if ((scp = self->ul_schedctl) != NULL ||
1176 (scp = setup_schedctl()) != NULL) {
1177 /*
1178 * Save the pre-existing preempt value.
1179 */
1180 self->ul_savpreempt = scp->sc_preemptctl.sc_nopreempt;
1181 scp->sc_preemptctl.sc_nopreempt = 1;
1182 }
1183 }
1184 }
1185
1186 /*
1187 * Undo the effects of no_preempt().
1188 */
1189 void
preempt(ulwp_t * self)1190 preempt(ulwp_t *self)
1191 {
1192 volatile sc_shared_t *scp;
1193
1194 ASSERT(self->ul_preempt > 0);
1195 if (--self->ul_preempt == 0) {
1196 if ((scp = self->ul_schedctl) != NULL) {
1197 /*
1198 * Restore the pre-existing preempt value.
1199 */
1200 scp->sc_preemptctl.sc_nopreempt = self->ul_savpreempt;
1201 if (scp->sc_preemptctl.sc_yield &&
1202 scp->sc_preemptctl.sc_nopreempt == 0) {
1203 yield();
1204 if (scp->sc_preemptctl.sc_yield) {
1205 /*
1206 * Shouldn't happen. This is either
1207 * a race condition or the thread
1208 * just entered the real-time class.
1209 */
1210 yield();
1211 scp->sc_preemptctl.sc_yield = 0;
1212 }
1213 }
1214 }
1215 exit_critical(self);
1216 }
1217 }
1218
1219 /*
1220 * If a call to preempt() would cause the current thread to yield or to
1221 * take deferred actions in exit_critical(), then unpark the specified
1222 * lwp so it can run while we delay. Return the original lwpid if the
1223 * unpark was not performed, else return zero. The tests are a repeat
1224 * of some of the tests in preempt(), above. This is a statistical
1225 * optimization solely for cond_sleep_queue(), below.
1226 */
1227 static lwpid_t
preempt_unpark(ulwp_t * self,lwpid_t lwpid)1228 preempt_unpark(ulwp_t *self, lwpid_t lwpid)
1229 {
1230 volatile sc_shared_t *scp = self->ul_schedctl;
1231
1232 ASSERT(self->ul_preempt == 1 && self->ul_critical > 0);
1233 if ((scp != NULL && scp->sc_preemptctl.sc_yield) ||
1234 (self->ul_curplease && self->ul_critical == 1)) {
1235 (void) __lwp_unpark(lwpid);
1236 lwpid = 0;
1237 }
1238 return (lwpid);
1239 }
1240
1241 /*
1242 * Spin for a while (if 'tryhard' is true), trying to grab the lock.
1243 * If this fails, return EBUSY and let the caller deal with it.
1244 * If this succeeds, return 0 with mutex_owner set to curthread.
1245 */
1246 static int
mutex_trylock_adaptive(mutex_t * mp,int tryhard)1247 mutex_trylock_adaptive(mutex_t *mp, int tryhard)
1248 {
1249 ulwp_t *self = curthread;
1250 int error = EBUSY;
1251 ulwp_t *ulwp;
1252 volatile sc_shared_t *scp;
1253 volatile uint8_t *lockp = (volatile uint8_t *)&mp->mutex_lockw;
1254 volatile uint64_t *ownerp = (volatile uint64_t *)&mp->mutex_owner;
1255 uint32_t new_lockword;
1256 int count = 0;
1257 int max_count;
1258 uint8_t max_spinners;
1259
1260 ASSERT(!(mp->mutex_type & USYNC_PROCESS));
1261
1262 if (MUTEX_OWNED(mp, self))
1263 return (EBUSY);
1264
1265 enter_critical(self);
1266
1267 /* short-cut, not definitive (see below) */
1268 if (mp->mutex_flag & LOCK_NOTRECOVERABLE) {
1269 ASSERT(mp->mutex_type & LOCK_ROBUST);
1270 error = ENOTRECOVERABLE;
1271 goto done;
1272 }
1273
1274 /*
1275 * Make one attempt to acquire the lock before
1276 * incurring the overhead of the spin loop.
1277 */
1278 if (set_lock_byte(lockp) == 0) {
1279 *ownerp = (uintptr_t)self;
1280 error = 0;
1281 goto done;
1282 }
1283 if (!tryhard)
1284 goto done;
1285 if (ncpus == 0)
1286 ncpus = (int)_sysconf(_SC_NPROCESSORS_ONLN);
1287 if ((max_spinners = self->ul_max_spinners) >= ncpus)
1288 max_spinners = ncpus - 1;
1289 max_count = (max_spinners != 0)? self->ul_adaptive_spin : 0;
1290 if (max_count == 0)
1291 goto done;
1292
1293 /*
1294 * This spin loop is unfair to lwps that have already dropped into
1295 * the kernel to sleep. They will starve on a highly-contended mutex.
1296 * This is just too bad. The adaptive spin algorithm is intended
1297 * to allow programs with highly-contended locks (that is, broken
1298 * programs) to execute with reasonable speed despite their contention.
1299 * Being fair would reduce the speed of such programs and well-written
1300 * programs will not suffer in any case.
1301 */
1302 if (spinners_incr(&mp->mutex_lockword, max_spinners) == -1)
1303 goto done;
1304 DTRACE_PROBE1(plockstat, mutex__spin, mp);
1305 for (count = 1; ; count++) {
1306 if (*lockp == 0 && set_lock_byte(lockp) == 0) {
1307 *ownerp = (uintptr_t)self;
1308 error = 0;
1309 break;
1310 }
1311 if (count == max_count)
1312 break;
1313 SMT_PAUSE();
1314 /*
1315 * Stop spinning if the mutex owner is not running on
1316 * a processor; it will not drop the lock any time soon
1317 * and we would just be wasting time to keep spinning.
1318 *
1319 * Note that we are looking at another thread (ulwp_t)
1320 * without ensuring that the other thread does not exit.
1321 * The scheme relies on ulwp_t structures never being
1322 * deallocated by the library (the library employs a free
1323 * list of ulwp_t structs that are reused when new threads
1324 * are created) and on schedctl shared memory never being
1325 * deallocated once created via __schedctl().
1326 *
1327 * Thus, the worst that can happen when the spinning thread
1328 * looks at the owner's schedctl data is that it is looking
1329 * at some other thread's schedctl data. This almost never
1330 * happens and is benign when it does.
1331 */
1332 if ((ulwp = (ulwp_t *)(uintptr_t)*ownerp) != NULL &&
1333 ((scp = ulwp->ul_schedctl) == NULL ||
1334 scp->sc_state != SC_ONPROC))
1335 break;
1336 }
1337 new_lockword = spinners_decr(&mp->mutex_lockword);
1338 if (error && (new_lockword & (LOCKMASK | SPINNERMASK)) == 0) {
1339 /*
1340 * We haven't yet acquired the lock, the lock
1341 * is free, and there are no other spinners.
1342 * Make one final attempt to acquire the lock.
1343 *
1344 * This isn't strictly necessary since mutex_lock_queue()
1345 * (the next action this thread will take if it doesn't
1346 * acquire the lock here) makes one attempt to acquire
1347 * the lock before putting the thread to sleep.
1348 *
1349 * If the next action for this thread (on failure here)
1350 * were not to call mutex_lock_queue(), this would be
1351 * necessary for correctness, to avoid ending up with an
1352 * unheld mutex with waiters but no one to wake them up.
1353 */
1354 if (set_lock_byte(lockp) == 0) {
1355 *ownerp = (uintptr_t)self;
1356 error = 0;
1357 }
1358 count++;
1359 }
1360
1361 done:
1362 if (error == 0 && (mp->mutex_flag & LOCK_NOTRECOVERABLE)) {
1363 ASSERT(mp->mutex_type & LOCK_ROBUST);
1364 /*
1365 * We shouldn't own the mutex.
1366 * Just clear the lock; everyone has already been waked up.
1367 */
1368 *ownerp = 0;
1369 (void) clear_lockbyte(&mp->mutex_lockword);
1370 error = ENOTRECOVERABLE;
1371 }
1372
1373 exit_critical(self);
1374
1375 if (error) {
1376 if (count) {
1377 DTRACE_PROBE3(plockstat, mutex__spun, mp, 0, count);
1378 }
1379 if (error != EBUSY) {
1380 DTRACE_PROBE2(plockstat, mutex__error, mp, error);
1381 }
1382 } else {
1383 if (count) {
1384 DTRACE_PROBE3(plockstat, mutex__spun, mp, 1, count);
1385 }
1386 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count);
1387 if (mp->mutex_flag & LOCK_OWNERDEAD) {
1388 ASSERT(mp->mutex_type & LOCK_ROBUST);
1389 error = EOWNERDEAD;
1390 }
1391 }
1392
1393 return (error);
1394 }
1395
1396 /*
1397 * Same as mutex_trylock_adaptive(), except specifically for queue locks.
1398 * The owner field is not set here; the caller (spin_lock_set()) sets it.
1399 */
1400 static int
mutex_queuelock_adaptive(mutex_t * mp)1401 mutex_queuelock_adaptive(mutex_t *mp)
1402 {
1403 ulwp_t *ulwp;
1404 volatile sc_shared_t *scp;
1405 volatile uint8_t *lockp;
1406 volatile uint64_t *ownerp;
1407 int count = curthread->ul_queue_spin;
1408
1409 ASSERT(mp->mutex_type == USYNC_THREAD);
1410
1411 if (count == 0)
1412 return (EBUSY);
1413
1414 lockp = (volatile uint8_t *)&mp->mutex_lockw;
1415 ownerp = (volatile uint64_t *)&mp->mutex_owner;
1416 while (--count >= 0) {
1417 if (*lockp == 0 && set_lock_byte(lockp) == 0)
1418 return (0);
1419 SMT_PAUSE();
1420 if ((ulwp = (ulwp_t *)(uintptr_t)*ownerp) != NULL &&
1421 ((scp = ulwp->ul_schedctl) == NULL ||
1422 scp->sc_state != SC_ONPROC))
1423 break;
1424 }
1425
1426 return (EBUSY);
1427 }
1428
1429 /*
1430 * Like mutex_trylock_adaptive(), but for process-shared mutexes.
1431 * Spin for a while (if 'tryhard' is true), trying to grab the lock.
1432 * If this fails, return EBUSY and let the caller deal with it.
1433 * If this succeeds, return 0 with mutex_owner set to curthread
1434 * and mutex_ownerpid set to the current pid.
1435 */
1436 static int
mutex_trylock_process(mutex_t * mp,int tryhard)1437 mutex_trylock_process(mutex_t *mp, int tryhard)
1438 {
1439 ulwp_t *self = curthread;
1440 uberdata_t *udp = self->ul_uberdata;
1441 int error = EBUSY;
1442 volatile uint64_t *lockp = (volatile uint64_t *)&mp->mutex_lockword64;
1443 uint32_t new_lockword;
1444 int count = 0;
1445 int max_count;
1446 uint8_t max_spinners;
1447
1448 #if defined(__sparc) && !defined(_LP64)
1449 /* horrible hack, necessary only on 32-bit sparc */
1450 int fix_alignment_problem =
1451 (((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) &&
1452 self->ul_misaligned && !(mp->mutex_type & LOCK_ROBUST));
1453 #endif
1454
1455 ASSERT(mp->mutex_type & USYNC_PROCESS);
1456
1457 if (shared_mutex_held(mp))
1458 return (EBUSY);
1459
1460 enter_critical(self);
1461
1462 /* short-cut, not definitive (see below) */
1463 if (mp->mutex_flag & LOCK_NOTRECOVERABLE) {
1464 ASSERT(mp->mutex_type & LOCK_ROBUST);
1465 error = ENOTRECOVERABLE;
1466 goto done;
1467 }
1468
1469 /*
1470 * Make one attempt to acquire the lock before
1471 * incurring the overhead of the spin loop.
1472 */
1473 #if defined(__sparc) && !defined(_LP64)
1474 /* horrible hack, necessary only on 32-bit sparc */
1475 if (fix_alignment_problem) {
1476 if (set_lock_byte(&mp->mutex_lockw) == 0) {
1477 mp->mutex_ownerpid = udp->pid;
1478 mp->mutex_owner = (uintptr_t)self;
1479 error = 0;
1480 goto done;
1481 }
1482 } else
1483 #endif
1484 if (set_lock_byte64(lockp, udp->pid) == 0) {
1485 mp->mutex_owner = (uintptr_t)self;
1486 /* mp->mutex_ownerpid was set by set_lock_byte64() */
1487 error = 0;
1488 goto done;
1489 }
1490 if (!tryhard)
1491 goto done;
1492 if (ncpus == 0)
1493 ncpus = (int)_sysconf(_SC_NPROCESSORS_ONLN);
1494 if ((max_spinners = self->ul_max_spinners) >= ncpus)
1495 max_spinners = ncpus - 1;
1496 max_count = (max_spinners != 0)? self->ul_adaptive_spin : 0;
1497 if (max_count == 0)
1498 goto done;
1499
1500 /*
1501 * This is a process-shared mutex.
1502 * We cannot know if the owner is running on a processor.
1503 * We just spin and hope that it is on a processor.
1504 */
1505 if (spinners_incr(&mp->mutex_lockword, max_spinners) == -1)
1506 goto done;
1507 DTRACE_PROBE1(plockstat, mutex__spin, mp);
1508 for (count = 1; ; count++) {
1509 #if defined(__sparc) && !defined(_LP64)
1510 /* horrible hack, necessary only on 32-bit sparc */
1511 if (fix_alignment_problem) {
1512 if ((*lockp & LOCKMASK64) == 0 &&
1513 set_lock_byte(&mp->mutex_lockw) == 0) {
1514 mp->mutex_ownerpid = udp->pid;
1515 mp->mutex_owner = (uintptr_t)self;
1516 error = 0;
1517 break;
1518 }
1519 } else
1520 #endif
1521 if ((*lockp & LOCKMASK64) == 0 &&
1522 set_lock_byte64(lockp, udp->pid) == 0) {
1523 mp->mutex_owner = (uintptr_t)self;
1524 /* mp->mutex_ownerpid was set by set_lock_byte64() */
1525 error = 0;
1526 break;
1527 }
1528 if (count == max_count)
1529 break;
1530 SMT_PAUSE();
1531 }
1532 new_lockword = spinners_decr(&mp->mutex_lockword);
1533 if (error && (new_lockword & (LOCKMASK | SPINNERMASK)) == 0) {
1534 /*
1535 * We haven't yet acquired the lock, the lock
1536 * is free, and there are no other spinners.
1537 * Make one final attempt to acquire the lock.
1538 *
1539 * This isn't strictly necessary since mutex_lock_kernel()
1540 * (the next action this thread will take if it doesn't
1541 * acquire the lock here) makes one attempt to acquire
1542 * the lock before putting the thread to sleep.
1543 *
1544 * If the next action for this thread (on failure here)
1545 * were not to call mutex_lock_kernel(), this would be
1546 * necessary for correctness, to avoid ending up with an
1547 * unheld mutex with waiters but no one to wake them up.
1548 */
1549 #if defined(__sparc) && !defined(_LP64)
1550 /* horrible hack, necessary only on 32-bit sparc */
1551 if (fix_alignment_problem) {
1552 if (set_lock_byte(&mp->mutex_lockw) == 0) {
1553 mp->mutex_ownerpid = udp->pid;
1554 mp->mutex_owner = (uintptr_t)self;
1555 error = 0;
1556 }
1557 } else
1558 #endif
1559 if (set_lock_byte64(lockp, udp->pid) == 0) {
1560 mp->mutex_owner = (uintptr_t)self;
1561 /* mp->mutex_ownerpid was set by set_lock_byte64() */
1562 error = 0;
1563 }
1564 count++;
1565 }
1566
1567 done:
1568 if (error == 0 && (mp->mutex_flag & LOCK_NOTRECOVERABLE)) {
1569 ASSERT(mp->mutex_type & LOCK_ROBUST);
1570 /*
1571 * We shouldn't own the mutex.
1572 * Just clear the lock; everyone has already been waked up.
1573 */
1574 mp->mutex_owner = 0;
1575 /* mp->mutex_ownerpid is cleared by clear_lockbyte64() */
1576 (void) clear_lockbyte64(&mp->mutex_lockword64);
1577 error = ENOTRECOVERABLE;
1578 }
1579
1580 exit_critical(self);
1581
1582 if (error) {
1583 if (count) {
1584 DTRACE_PROBE3(plockstat, mutex__spun, mp, 0, count);
1585 }
1586 if (error != EBUSY) {
1587 DTRACE_PROBE2(plockstat, mutex__error, mp, error);
1588 }
1589 } else {
1590 if (count) {
1591 DTRACE_PROBE3(plockstat, mutex__spun, mp, 1, count);
1592 }
1593 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count);
1594 if (mp->mutex_flag & (LOCK_OWNERDEAD | LOCK_UNMAPPED)) {
1595 ASSERT(mp->mutex_type & LOCK_ROBUST);
1596 if (mp->mutex_flag & LOCK_OWNERDEAD)
1597 error = EOWNERDEAD;
1598 else if (mp->mutex_type & USYNC_PROCESS_ROBUST)
1599 error = ELOCKUNMAPPED;
1600 else
1601 error = EOWNERDEAD;
1602 }
1603 }
1604
1605 return (error);
1606 }
1607
1608 /*
1609 * Mutex wakeup code for releasing a USYNC_THREAD mutex.
1610 * Returns the lwpid of the thread that was dequeued, if any.
1611 * The caller of mutex_wakeup() must call __lwp_unpark(lwpid)
1612 * to wake up the specified lwp.
1613 */
1614 static lwpid_t
mutex_wakeup(mutex_t * mp)1615 mutex_wakeup(mutex_t *mp)
1616 {
1617 lwpid_t lwpid = 0;
1618 int more;
1619 queue_head_t *qp;
1620 ulwp_t *ulwp;
1621
1622 /*
1623 * Dequeue a waiter from the sleep queue. Don't touch the mutex
1624 * waiters bit if no one was found on the queue because the mutex
1625 * might have been deallocated or reallocated for another purpose.
1626 */
1627 qp = queue_lock(mp, MX);
1628 if ((ulwp = dequeue(qp, &more)) != NULL) {
1629 lwpid = ulwp->ul_lwpid;
1630 mp->mutex_waiters = more;
1631 }
1632 queue_unlock(qp);
1633 return (lwpid);
1634 }
1635
1636 /*
1637 * Mutex wakeup code for releasing all waiters on a USYNC_THREAD mutex.
1638 */
1639 static void
mutex_wakeup_all(mutex_t * mp)1640 mutex_wakeup_all(mutex_t *mp)
1641 {
1642 queue_head_t *qp;
1643 queue_root_t *qrp;
1644 int nlwpid = 0;
1645 int maxlwps = MAXLWPS;
1646 ulwp_t *ulwp;
1647 lwpid_t buffer[MAXLWPS];
1648 lwpid_t *lwpid = buffer;
1649
1650 /*
1651 * Walk the list of waiters and prepare to wake up all of them.
1652 * The waiters flag has already been cleared from the mutex.
1653 *
1654 * We keep track of lwpids that are to be unparked in lwpid[].
1655 * __lwp_unpark_all() is called to unpark all of them after
1656 * they have been removed from the sleep queue and the sleep
1657 * queue lock has been dropped. If we run out of space in our
1658 * on-stack buffer, we need to allocate more but we can't call
1659 * lmalloc() because we are holding a queue lock when the overflow
1660 * occurs and lmalloc() acquires a lock. We can't use alloca()
1661 * either because the application may have allocated a small
1662 * stack and we don't want to overrun the stack. So we call
1663 * alloc_lwpids() to allocate a bigger buffer using the mmap()
1664 * system call directly since that path acquires no locks.
1665 */
1666 qp = queue_lock(mp, MX);
1667 for (;;) {
1668 if ((qrp = qp->qh_root) == NULL ||
1669 (ulwp = qrp->qr_head) == NULL)
1670 break;
1671 ASSERT(ulwp->ul_wchan == mp);
1672 queue_unlink(qp, &qrp->qr_head, NULL);
1673 ulwp->ul_sleepq = NULL;
1674 ulwp->ul_wchan = NULL;
1675 if (nlwpid == maxlwps)
1676 lwpid = alloc_lwpids(lwpid, &nlwpid, &maxlwps);
1677 lwpid[nlwpid++] = ulwp->ul_lwpid;
1678 }
1679
1680 if (nlwpid == 0) {
1681 queue_unlock(qp);
1682 } else {
1683 mp->mutex_waiters = 0;
1684 no_preempt(curthread);
1685 queue_unlock(qp);
1686 if (nlwpid == 1)
1687 (void) __lwp_unpark(lwpid[0]);
1688 else
1689 (void) __lwp_unpark_all(lwpid, nlwpid);
1690 preempt(curthread);
1691 }
1692
1693 if (lwpid != buffer)
1694 (void) munmap((caddr_t)lwpid, maxlwps * sizeof (lwpid_t));
1695 }
1696
1697 /*
1698 * Release a process-private mutex.
1699 * As an optimization, if there are waiters but there are also spinners
1700 * attempting to acquire the mutex, then don't bother waking up a waiter;
1701 * one of the spinners will acquire the mutex soon and it would be a waste
1702 * of resources to wake up some thread just to have it spin for a while
1703 * and then possibly go back to sleep. See mutex_trylock_adaptive().
1704 */
1705 static lwpid_t
mutex_unlock_queue(mutex_t * mp,int release_all)1706 mutex_unlock_queue(mutex_t *mp, int release_all)
1707 {
1708 ulwp_t *self = curthread;
1709 lwpid_t lwpid = 0;
1710 uint32_t old_lockword;
1711
1712 DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
1713 sigoff(self);
1714 mp->mutex_owner = 0;
1715 old_lockword = clear_lockbyte(&mp->mutex_lockword);
1716 if ((old_lockword & WAITERMASK) &&
1717 (release_all || (old_lockword & SPINNERMASK) == 0)) {
1718 no_preempt(self); /* ensure a prompt wakeup */
1719 if (release_all)
1720 mutex_wakeup_all(mp);
1721 else
1722 lwpid = mutex_wakeup(mp);
1723 if (lwpid == 0)
1724 preempt(self);
1725 }
1726 sigon(self);
1727 return (lwpid);
1728 }
1729
1730 /*
1731 * Like mutex_unlock_queue(), but for process-shared mutexes.
1732 */
1733 static void
mutex_unlock_process(mutex_t * mp,int release_all)1734 mutex_unlock_process(mutex_t *mp, int release_all)
1735 {
1736 ulwp_t *self = curthread;
1737 uint64_t old_lockword64;
1738
1739 DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
1740 sigoff(self);
1741 mp->mutex_owner = 0;
1742 #if defined(__sparc) && !defined(_LP64)
1743 /* horrible hack, necessary only on 32-bit sparc */
1744 if (((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) &&
1745 self->ul_misaligned && !(mp->mutex_type & LOCK_ROBUST)) {
1746 uint32_t old_lockword;
1747 mp->mutex_ownerpid = 0;
1748 old_lockword = clear_lockbyte(&mp->mutex_lockword);
1749 if ((old_lockword & WAITERMASK) &&
1750 (release_all || (old_lockword & SPINNERMASK) == 0)) {
1751 no_preempt(self); /* ensure a prompt wakeup */
1752 (void) ___lwp_mutex_wakeup(mp, release_all);
1753 preempt(self);
1754 }
1755 sigon(self);
1756 return;
1757 }
1758 #endif
1759 /* mp->mutex_ownerpid is cleared by clear_lockbyte64() */
1760 old_lockword64 = clear_lockbyte64(&mp->mutex_lockword64);
1761 if ((old_lockword64 & WAITERMASK64) &&
1762 (release_all || (old_lockword64 & SPINNERMASK64) == 0)) {
1763 no_preempt(self); /* ensure a prompt wakeup */
1764 (void) ___lwp_mutex_wakeup(mp, release_all);
1765 preempt(self);
1766 }
1767 sigon(self);
1768 }
1769
1770 void
stall(void)1771 stall(void)
1772 {
1773 for (;;)
1774 (void) mutex_lock_kernel(&stall_mutex, NULL, NULL);
1775 }
1776
1777 /*
1778 * Acquire a USYNC_THREAD mutex via user-level sleep queues.
1779 * We failed set_lock_byte(&mp->mutex_lockw) before coming here.
1780 * If successful, returns with mutex_owner set correctly.
1781 */
1782 int
mutex_lock_queue(ulwp_t * self,tdb_mutex_stats_t * msp,mutex_t * mp,timespec_t * tsp)1783 mutex_lock_queue(ulwp_t *self, tdb_mutex_stats_t *msp, mutex_t *mp,
1784 timespec_t *tsp)
1785 {
1786 uberdata_t *udp = curthread->ul_uberdata;
1787 queue_head_t *qp;
1788 hrtime_t begin_sleep;
1789 int error = 0;
1790
1791 self->ul_sp = stkptr();
1792 if (__td_event_report(self, TD_SLEEP, udp)) {
1793 self->ul_wchan = mp;
1794 self->ul_td_evbuf.eventnum = TD_SLEEP;
1795 self->ul_td_evbuf.eventdata = mp;
1796 tdb_event(TD_SLEEP, udp);
1797 }
1798 if (msp) {
1799 tdb_incr(msp->mutex_sleep);
1800 begin_sleep = gethrtime();
1801 }
1802
1803 DTRACE_PROBE1(plockstat, mutex__block, mp);
1804
1805 /*
1806 * Put ourself on the sleep queue, and while we are
1807 * unable to grab the lock, go park in the kernel.
1808 * Take ourself off the sleep queue after we acquire the lock.
1809 * The waiter bit can be set/cleared only while holding the queue lock.
1810 */
1811 qp = queue_lock(mp, MX);
1812 enqueue(qp, self, 0);
1813 mp->mutex_waiters = 1;
1814 for (;;) {
1815 if (set_lock_byte(&mp->mutex_lockw) == 0) {
1816 mp->mutex_owner = (uintptr_t)self;
1817 mp->mutex_waiters = dequeue_self(qp);
1818 break;
1819 }
1820 set_parking_flag(self, 1);
1821 queue_unlock(qp);
1822 /*
1823 * __lwp_park() will return the residual time in tsp
1824 * if we are unparked before the timeout expires.
1825 */
1826 error = __lwp_park(tsp, 0);
1827 set_parking_flag(self, 0);
1828 /*
1829 * We could have taken a signal or suspended ourself.
1830 * If we did, then we removed ourself from the queue.
1831 * Someone else may have removed us from the queue
1832 * as a consequence of mutex_unlock(). We may have
1833 * gotten a timeout from __lwp_park(). Or we may still
1834 * be on the queue and this is just a spurious wakeup.
1835 */
1836 qp = queue_lock(mp, MX);
1837 if (self->ul_sleepq == NULL) {
1838 if (error) {
1839 mp->mutex_waiters = queue_waiter(qp)? 1 : 0;
1840 if (error != EINTR)
1841 break;
1842 error = 0;
1843 }
1844 if (set_lock_byte(&mp->mutex_lockw) == 0) {
1845 mp->mutex_owner = (uintptr_t)self;
1846 break;
1847 }
1848 enqueue(qp, self, 0);
1849 mp->mutex_waiters = 1;
1850 }
1851 ASSERT(self->ul_sleepq == qp &&
1852 self->ul_qtype == MX &&
1853 self->ul_wchan == mp);
1854 if (error) {
1855 if (error != EINTR) {
1856 mp->mutex_waiters = dequeue_self(qp);
1857 break;
1858 }
1859 error = 0;
1860 }
1861 }
1862 ASSERT(self->ul_sleepq == NULL && self->ul_link == NULL &&
1863 self->ul_wchan == NULL);
1864 self->ul_sp = 0;
1865
1866 ASSERT(error == 0 || error == EINVAL || error == ETIME);
1867
1868 if (error == 0 && (mp->mutex_flag & LOCK_NOTRECOVERABLE)) {
1869 ASSERT(mp->mutex_type & LOCK_ROBUST);
1870 /*
1871 * We shouldn't own the mutex.
1872 * Just clear the lock; everyone has already been waked up.
1873 */
1874 mp->mutex_owner = 0;
1875 (void) clear_lockbyte(&mp->mutex_lockword);
1876 error = ENOTRECOVERABLE;
1877 }
1878
1879 queue_unlock(qp);
1880
1881 if (msp)
1882 msp->mutex_sleep_time += gethrtime() - begin_sleep;
1883
1884 if (error) {
1885 DTRACE_PROBE2(plockstat, mutex__blocked, mp, 0);
1886 DTRACE_PROBE2(plockstat, mutex__error, mp, error);
1887 } else {
1888 DTRACE_PROBE2(plockstat, mutex__blocked, mp, 1);
1889 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
1890 if (mp->mutex_flag & LOCK_OWNERDEAD) {
1891 ASSERT(mp->mutex_type & LOCK_ROBUST);
1892 error = EOWNERDEAD;
1893 }
1894 }
1895
1896 return (error);
1897 }
1898
1899 static int
mutex_recursion(mutex_t * mp,int mtype,int try)1900 mutex_recursion(mutex_t *mp, int mtype, int try)
1901 {
1902 ASSERT(mutex_held(mp));
1903 ASSERT(mtype & (LOCK_RECURSIVE|LOCK_ERRORCHECK));
1904 ASSERT(try == MUTEX_TRY || try == MUTEX_LOCK);
1905
1906 if (mtype & LOCK_RECURSIVE) {
1907 if (mp->mutex_rcount == RECURSION_MAX) {
1908 DTRACE_PROBE2(plockstat, mutex__error, mp, EAGAIN);
1909 return (EAGAIN);
1910 }
1911 mp->mutex_rcount++;
1912 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 1, 0);
1913 return (0);
1914 }
1915 if (try == MUTEX_LOCK) {
1916 DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK);
1917 return (EDEADLK);
1918 }
1919 return (EBUSY);
1920 }
1921
1922 /*
1923 * Register this USYNC_PROCESS|LOCK_ROBUST mutex with the kernel so
1924 * it can apply LOCK_OWNERDEAD|LOCK_UNMAPPED if it becomes necessary.
1925 * We use tdb_hash_lock here and in the synch object tracking code in
1926 * the tdb_agent.c file. There is no conflict between these two usages.
1927 */
1928 void
register_lock(mutex_t * mp)1929 register_lock(mutex_t *mp)
1930 {
1931 uberdata_t *udp = curthread->ul_uberdata;
1932 uint_t hash = LOCK_HASH(mp);
1933 robust_t *rlp;
1934 robust_t *invalid;
1935 robust_t **rlpp;
1936 robust_t **table;
1937
1938 if ((table = udp->robustlocks) == NULL) {
1939 lmutex_lock(&udp->tdb_hash_lock);
1940 if ((table = udp->robustlocks) == NULL) {
1941 table = lmalloc(LOCKHASHSZ * sizeof (robust_t *));
1942 membar_producer();
1943 udp->robustlocks = table;
1944 }
1945 lmutex_unlock(&udp->tdb_hash_lock);
1946 }
1947 membar_consumer();
1948
1949 /*
1950 * First search the registered table with no locks held.
1951 * This is safe because the table never shrinks
1952 * and we can only get a false negative.
1953 */
1954 for (rlp = table[hash]; rlp != NULL; rlp = rlp->robust_next) {
1955 if (rlp->robust_lock == mp) /* already registered */
1956 return;
1957 }
1958
1959 /*
1960 * The lock was not found.
1961 * Repeat the operation with tdb_hash_lock held.
1962 */
1963 lmutex_lock(&udp->tdb_hash_lock);
1964
1965 invalid = NULL;
1966 for (rlpp = &table[hash];
1967 (rlp = *rlpp) != NULL;
1968 rlpp = &rlp->robust_next) {
1969 if (rlp->robust_lock == mp) { /* already registered */
1970 lmutex_unlock(&udp->tdb_hash_lock);
1971 return;
1972 }
1973 /* remember the first invalid entry, if any */
1974 if (rlp->robust_lock == INVALID_ADDR && invalid == NULL)
1975 invalid = rlp;
1976 }
1977
1978 /*
1979 * The lock has never been registered.
1980 * Add it to the table and register it now.
1981 */
1982 if ((rlp = invalid) != NULL) {
1983 /*
1984 * Reuse the invalid entry we found above.
1985 * The linkages are still correct.
1986 */
1987 rlp->robust_lock = mp;
1988 membar_producer();
1989 } else {
1990 /*
1991 * Allocate a new entry and add it to
1992 * the hash table and to the global list.
1993 */
1994 rlp = lmalloc(sizeof (*rlp));
1995 rlp->robust_lock = mp;
1996 rlp->robust_next = NULL;
1997 rlp->robust_list = udp->robustlist;
1998 udp->robustlist = rlp;
1999 membar_producer();
2000 *rlpp = rlp;
2001 }
2002
2003 lmutex_unlock(&udp->tdb_hash_lock);
2004
2005 (void) ___lwp_mutex_register(mp, &rlp->robust_lock);
2006 }
2007
2008 /*
2009 * This is called in the child of fork()/forkall() to start over
2010 * with a clean slate. (Each process must register its own locks.)
2011 * No locks are needed because all other threads are suspended or gone.
2012 */
2013 void
unregister_locks(void)2014 unregister_locks(void)
2015 {
2016 uberdata_t *udp = curthread->ul_uberdata;
2017 robust_t **table;
2018 robust_t *rlp;
2019 robust_t *next;
2020
2021 /*
2022 * Do this first, before calling lfree().
2023 */
2024 table = udp->robustlocks;
2025 udp->robustlocks = NULL;
2026 rlp = udp->robustlist;
2027 udp->robustlist = NULL;
2028
2029 /*
2030 * Do this by traversing the global list, not the hash table.
2031 */
2032 while (rlp != NULL) {
2033 next = rlp->robust_list;
2034 lfree(rlp, sizeof (*rlp));
2035 rlp = next;
2036 }
2037 if (table != NULL)
2038 lfree(table, LOCKHASHSZ * sizeof (robust_t *));
2039 }
2040
2041 /*
2042 * Returns with mutex_owner set correctly.
2043 */
2044 int
mutex_lock_internal(mutex_t * mp,timespec_t * tsp,int try)2045 mutex_lock_internal(mutex_t *mp, timespec_t *tsp, int try)
2046 {
2047 ulwp_t *self = curthread;
2048 uberdata_t *udp = self->ul_uberdata;
2049 int mtype = mp->mutex_type;
2050 tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp);
2051 int error = 0;
2052 int noceil = try & MUTEX_NOCEIL;
2053 uint8_t ceil;
2054 int myprio;
2055
2056 try &= ~MUTEX_NOCEIL;
2057 ASSERT(try == MUTEX_TRY || try == MUTEX_LOCK);
2058
2059 if (!self->ul_schedctl_called)
2060 (void) setup_schedctl();
2061
2062 if (msp && try == MUTEX_TRY)
2063 tdb_incr(msp->mutex_try);
2064
2065 if ((mtype & (LOCK_RECURSIVE|LOCK_ERRORCHECK)) && mutex_held(mp))
2066 return (mutex_recursion(mp, mtype, try));
2067
2068 if (self->ul_error_detection && try == MUTEX_LOCK &&
2069 tsp == NULL && mutex_held(mp))
2070 lock_error(mp, "mutex_lock", NULL, NULL);
2071
2072 if ((mtype & LOCK_PRIO_PROTECT) && noceil == 0) {
2073 update_sched(self);
2074 if (self->ul_cid != self->ul_rtclassid) {
2075 DTRACE_PROBE2(plockstat, mutex__error, mp, EPERM);
2076 return (EPERM);
2077 }
2078 ceil = mp->mutex_ceiling;
2079 myprio = self->ul_epri? self->ul_epri : self->ul_pri;
2080 if (myprio > ceil) {
2081 DTRACE_PROBE2(plockstat, mutex__error, mp, EINVAL);
2082 return (EINVAL);
2083 }
2084 if ((error = _ceil_mylist_add(mp)) != 0) {
2085 DTRACE_PROBE2(plockstat, mutex__error, mp, error);
2086 return (error);
2087 }
2088 if (myprio < ceil)
2089 _ceil_prio_inherit(ceil);
2090 }
2091
2092 if ((mtype & (USYNC_PROCESS | LOCK_ROBUST))
2093 == (USYNC_PROCESS | LOCK_ROBUST))
2094 register_lock(mp);
2095
2096 if (mtype & LOCK_PRIO_INHERIT) {
2097 /* go straight to the kernel */
2098 if (try == MUTEX_TRY)
2099 error = mutex_trylock_kernel(mp);
2100 else /* MUTEX_LOCK */
2101 error = mutex_lock_kernel(mp, tsp, msp);
2102 /*
2103 * The kernel never sets or clears the lock byte
2104 * for LOCK_PRIO_INHERIT mutexes.
2105 * Set it here for consistency.
2106 */
2107 switch (error) {
2108 case 0:
2109 self->ul_pilocks++;
2110 mp->mutex_lockw = LOCKSET;
2111 break;
2112 case EOWNERDEAD:
2113 case ELOCKUNMAPPED:
2114 self->ul_pilocks++;
2115 mp->mutex_lockw = LOCKSET;
2116 /* FALLTHROUGH */
2117 case ENOTRECOVERABLE:
2118 ASSERT(mtype & LOCK_ROBUST);
2119 break;
2120 case EDEADLK:
2121 if (try == MUTEX_TRY) {
2122 error = EBUSY;
2123 } else if (tsp != NULL) { /* simulate a timeout */
2124 /*
2125 * Note: mutex_timedlock() never returns EINTR.
2126 */
2127 timespec_t ts = *tsp;
2128 timespec_t rts;
2129
2130 while (__nanosleep(&ts, &rts) == EINTR)
2131 ts = rts;
2132 error = ETIME;
2133 } else { /* simulate a deadlock */
2134 stall();
2135 }
2136 break;
2137 }
2138 } else if (mtype & USYNC_PROCESS) {
2139 error = mutex_trylock_process(mp, try == MUTEX_LOCK);
2140 if (error == EBUSY && try == MUTEX_LOCK)
2141 error = mutex_lock_kernel(mp, tsp, msp);
2142 } else { /* USYNC_THREAD */
2143 error = mutex_trylock_adaptive(mp, try == MUTEX_LOCK);
2144 if (error == EBUSY && try == MUTEX_LOCK)
2145 error = mutex_lock_queue(self, msp, mp, tsp);
2146 }
2147
2148 switch (error) {
2149 case 0:
2150 case EOWNERDEAD:
2151 case ELOCKUNMAPPED:
2152 if (mtype & LOCK_ROBUST)
2153 remember_lock(mp);
2154 if (msp)
2155 record_begin_hold(msp);
2156 break;
2157 default:
2158 if ((mtype & LOCK_PRIO_PROTECT) && noceil == 0) {
2159 (void) _ceil_mylist_del(mp);
2160 if (myprio < ceil)
2161 _ceil_prio_waive();
2162 }
2163 if (try == MUTEX_TRY) {
2164 if (msp)
2165 tdb_incr(msp->mutex_try_fail);
2166 if (__td_event_report(self, TD_LOCK_TRY, udp)) {
2167 self->ul_td_evbuf.eventnum = TD_LOCK_TRY;
2168 tdb_event(TD_LOCK_TRY, udp);
2169 }
2170 }
2171 break;
2172 }
2173
2174 return (error);
2175 }
2176
2177 int
fast_process_lock(mutex_t * mp,timespec_t * tsp,int mtype,int try)2178 fast_process_lock(mutex_t *mp, timespec_t *tsp, int mtype, int try)
2179 {
2180 ulwp_t *self = curthread;
2181 uberdata_t *udp = self->ul_uberdata;
2182
2183 /*
2184 * We know that USYNC_PROCESS is set in mtype and that
2185 * zero, one, or both of the flags LOCK_RECURSIVE and
2186 * LOCK_ERRORCHECK are set, and that no other flags are set.
2187 */
2188 ASSERT((mtype & ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK)) == 0);
2189 enter_critical(self);
2190 #if defined(__sparc) && !defined(_LP64)
2191 /* horrible hack, necessary only on 32-bit sparc */
2192 if (((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) &&
2193 self->ul_misaligned) {
2194 if (set_lock_byte(&mp->mutex_lockw) == 0) {
2195 mp->mutex_ownerpid = udp->pid;
2196 mp->mutex_owner = (uintptr_t)self;
2197 exit_critical(self);
2198 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
2199 return (0);
2200 }
2201 } else
2202 #endif
2203 if (set_lock_byte64(&mp->mutex_lockword64, udp->pid) == 0) {
2204 mp->mutex_owner = (uintptr_t)self;
2205 /* mp->mutex_ownerpid was set by set_lock_byte64() */
2206 exit_critical(self);
2207 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
2208 return (0);
2209 }
2210 exit_critical(self);
2211
2212 if ((mtype & (LOCK_RECURSIVE|LOCK_ERRORCHECK)) && shared_mutex_held(mp))
2213 return (mutex_recursion(mp, mtype, try));
2214
2215 if (try == MUTEX_LOCK) {
2216 if (mutex_trylock_process(mp, 1) == 0)
2217 return (0);
2218 return (mutex_lock_kernel(mp, tsp, NULL));
2219 }
2220
2221 if (__td_event_report(self, TD_LOCK_TRY, udp)) {
2222 self->ul_td_evbuf.eventnum = TD_LOCK_TRY;
2223 tdb_event(TD_LOCK_TRY, udp);
2224 }
2225 return (EBUSY);
2226 }
2227
2228 static int
mutex_lock_impl(mutex_t * mp,timespec_t * tsp)2229 mutex_lock_impl(mutex_t *mp, timespec_t *tsp)
2230 {
2231 ulwp_t *self = curthread;
2232 int mtype = mp->mutex_type;
2233 uberflags_t *gflags;
2234
2235 if (((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) &&
2236 self->ul_error_detection && self->ul_misaligned == 0)
2237 lock_error(mp, "mutex_lock", NULL, "mutex is misaligned");
2238
2239 /*
2240 * Optimize the case of USYNC_THREAD, including
2241 * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases,
2242 * no error detection, no lock statistics,
2243 * and the process has only a single thread.
2244 * (Most likely a traditional single-threaded application.)
2245 */
2246 if (((mtype & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) |
2247 self->ul_uberdata->uberflags.uf_all) == 0) {
2248 /*
2249 * Only one thread exists so we don't need an atomic operation.
2250 * We do, however, need to protect against signals.
2251 */
2252 if (mp->mutex_lockw == 0) {
2253 sigoff(self);
2254 mp->mutex_lockw = LOCKSET;
2255 mp->mutex_owner = (uintptr_t)self;
2256 sigon(self);
2257 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
2258 return (0);
2259 }
2260 if (mtype && MUTEX_OWNER(mp) == self)
2261 return (mutex_recursion(mp, mtype, MUTEX_LOCK));
2262 /*
2263 * We have reached a deadlock, probably because the
2264 * process is executing non-async-signal-safe code in
2265 * a signal handler and is attempting to acquire a lock
2266 * that it already owns. This is not surprising, given
2267 * bad programming practices over the years that has
2268 * resulted in applications calling printf() and such
2269 * in their signal handlers. Unless the user has told
2270 * us that the signal handlers are safe by setting:
2271 * export _THREAD_ASYNC_SAFE=1
2272 * we return EDEADLK rather than actually deadlocking.
2273 *
2274 * A lock may explicitly override this with the
2275 * LOCK_DEADLOCK flag which is currently set for POSIX
2276 * NORMAL mutexes as the specification requires deadlock
2277 * behavior and applications _do_ rely on that for their
2278 * correctness guarantees.
2279 */
2280 if (tsp == NULL &&
2281 MUTEX_OWNER(mp) == self && !self->ul_async_safe &&
2282 (mp->mutex_flag & LOCK_DEADLOCK) == 0) {
2283 DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK);
2284 return (EDEADLK);
2285 }
2286 }
2287
2288 /*
2289 * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS,
2290 * no error detection, and no lock statistics.
2291 * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases.
2292 */
2293 if ((gflags = self->ul_schedctl_called) != NULL &&
2294 (gflags->uf_trs_ted |
2295 (mtype & ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK))) == 0) {
2296 if (mtype & USYNC_PROCESS)
2297 return (fast_process_lock(mp, tsp, mtype, MUTEX_LOCK));
2298 sigoff(self);
2299 if (set_lock_byte(&mp->mutex_lockw) == 0) {
2300 mp->mutex_owner = (uintptr_t)self;
2301 sigon(self);
2302 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
2303 return (0);
2304 }
2305 sigon(self);
2306 if (mtype && MUTEX_OWNER(mp) == self)
2307 return (mutex_recursion(mp, mtype, MUTEX_LOCK));
2308 if (mutex_trylock_adaptive(mp, 1) != 0)
2309 return (mutex_lock_queue(self, NULL, mp, tsp));
2310 return (0);
2311 }
2312
2313 /* else do it the long way */
2314 return (mutex_lock_internal(mp, tsp, MUTEX_LOCK));
2315 }
2316
2317 #pragma weak pthread_mutex_lock = mutex_lock
2318 #pragma weak _mutex_lock = mutex_lock
2319 int
mutex_lock(mutex_t * mp)2320 mutex_lock(mutex_t *mp)
2321 {
2322 ASSERT(!curthread->ul_critical || curthread->ul_bindflags);
2323 return (mutex_lock_impl(mp, NULL));
2324 }
2325
2326 #pragma weak pthread_mutex_enter_np = mutex_enter
2327 void
mutex_enter(mutex_t * mp)2328 mutex_enter(mutex_t *mp)
2329 {
2330 int ret;
2331 int attr = mp->mutex_type & ALL_ATTRIBUTES;
2332
2333 /*
2334 * Require LOCK_ERRORCHECK, accept LOCK_RECURSIVE.
2335 */
2336 if (attr != LOCK_ERRORCHECK &&
2337 attr != (LOCK_ERRORCHECK | LOCK_RECURSIVE)) {
2338 mutex_panic(mp, "mutex_enter: bad mutex type");
2339 }
2340 ret = mutex_lock(mp);
2341 if (ret == EDEADLK) {
2342 mutex_panic(mp, "recursive mutex_enter");
2343 } else if (ret == EAGAIN) {
2344 mutex_panic(mp, "excessive recursive mutex_enter");
2345 } else if (ret != 0) {
2346 mutex_panic(mp, "unknown mutex_enter failure");
2347 }
2348 }
2349
2350 int
pthread_mutex_timedlock(pthread_mutex_t * _RESTRICT_KYWD mp,const struct timespec * _RESTRICT_KYWD abstime)2351 pthread_mutex_timedlock(pthread_mutex_t *_RESTRICT_KYWD mp,
2352 const struct timespec *_RESTRICT_KYWD abstime)
2353 {
2354 timespec_t tslocal;
2355 int error;
2356
2357 ASSERT(!curthread->ul_critical || curthread->ul_bindflags);
2358 abstime_to_reltime(CLOCK_REALTIME, abstime, &tslocal);
2359 error = mutex_lock_impl((mutex_t *)mp, &tslocal);
2360 if (error == ETIME)
2361 error = ETIMEDOUT;
2362 return (error);
2363 }
2364
2365 int
pthread_mutex_reltimedlock_np(pthread_mutex_t * _RESTRICT_KYWD mp,const struct timespec * _RESTRICT_KYWD reltime)2366 pthread_mutex_reltimedlock_np(pthread_mutex_t *_RESTRICT_KYWD mp,
2367 const struct timespec *_RESTRICT_KYWD reltime)
2368 {
2369 timespec_t tslocal;
2370 int error;
2371
2372 ASSERT(!curthread->ul_critical || curthread->ul_bindflags);
2373 tslocal = *reltime;
2374 error = mutex_lock_impl((mutex_t *)mp, &tslocal);
2375 if (error == ETIME)
2376 error = ETIMEDOUT;
2377 return (error);
2378 }
2379
2380 #pragma weak pthread_mutex_trylock = mutex_trylock
2381 int
mutex_trylock(mutex_t * mp)2382 mutex_trylock(mutex_t *mp)
2383 {
2384 ulwp_t *self = curthread;
2385 uberdata_t *udp = self->ul_uberdata;
2386 int mtype = mp->mutex_type;
2387 uberflags_t *gflags;
2388
2389 ASSERT(!curthread->ul_critical || curthread->ul_bindflags);
2390
2391 /*
2392 * Optimize the case of USYNC_THREAD, including
2393 * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases,
2394 * no error detection, no lock statistics,
2395 * and the process has only a single thread.
2396 * (Most likely a traditional single-threaded application.)
2397 */
2398 if (((mtype & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) |
2399 udp->uberflags.uf_all) == 0) {
2400 /*
2401 * Only one thread exists so we don't need an atomic operation.
2402 * We do, however, need to protect against signals.
2403 */
2404 if (mp->mutex_lockw == 0) {
2405 sigoff(self);
2406 mp->mutex_lockw = LOCKSET;
2407 mp->mutex_owner = (uintptr_t)self;
2408 sigon(self);
2409 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
2410 return (0);
2411 }
2412 if (mtype && MUTEX_OWNER(mp) == self)
2413 return (mutex_recursion(mp, mtype, MUTEX_TRY));
2414 return (EBUSY);
2415 }
2416
2417 /*
2418 * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS,
2419 * no error detection, and no lock statistics.
2420 * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases.
2421 */
2422 if ((gflags = self->ul_schedctl_called) != NULL &&
2423 (gflags->uf_trs_ted |
2424 (mtype & ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK))) == 0) {
2425 if (mtype & USYNC_PROCESS)
2426 return (fast_process_lock(mp, NULL, mtype, MUTEX_TRY));
2427 sigoff(self);
2428 if (set_lock_byte(&mp->mutex_lockw) == 0) {
2429 mp->mutex_owner = (uintptr_t)self;
2430 sigon(self);
2431 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
2432 return (0);
2433 }
2434 sigon(self);
2435 if (mtype && MUTEX_OWNER(mp) == self)
2436 return (mutex_recursion(mp, mtype, MUTEX_TRY));
2437 if (__td_event_report(self, TD_LOCK_TRY, udp)) {
2438 self->ul_td_evbuf.eventnum = TD_LOCK_TRY;
2439 tdb_event(TD_LOCK_TRY, udp);
2440 }
2441 return (EBUSY);
2442 }
2443
2444 /* else do it the long way */
2445 return (mutex_lock_internal(mp, NULL, MUTEX_TRY));
2446 }
2447
2448 int
mutex_unlock_internal(mutex_t * mp,int retain_robust_flags)2449 mutex_unlock_internal(mutex_t *mp, int retain_robust_flags)
2450 {
2451 ulwp_t *self = curthread;
2452 uberdata_t *udp = self->ul_uberdata;
2453 int mtype = mp->mutex_type;
2454 tdb_mutex_stats_t *msp;
2455 int error = 0;
2456 int release_all;
2457 lwpid_t lwpid;
2458
2459 if ((mtype & (LOCK_ERRORCHECK | LOCK_ROBUST)) &&
2460 !mutex_held(mp))
2461 return (EPERM);
2462
2463 if (self->ul_error_detection && !mutex_held(mp))
2464 lock_error(mp, "mutex_unlock", NULL, NULL);
2465
2466 if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) {
2467 mp->mutex_rcount--;
2468 DTRACE_PROBE2(plockstat, mutex__release, mp, 1);
2469 return (0);
2470 }
2471
2472 if ((msp = MUTEX_STATS(mp, udp)) != NULL)
2473 (void) record_hold_time(msp);
2474
2475 if (!retain_robust_flags && !(mtype & LOCK_PRIO_INHERIT) &&
2476 (mp->mutex_flag & (LOCK_OWNERDEAD | LOCK_UNMAPPED))) {
2477 ASSERT(mtype & LOCK_ROBUST);
2478 mp->mutex_flag &= ~(LOCK_OWNERDEAD | LOCK_UNMAPPED);
2479 mp->mutex_flag |= LOCK_NOTRECOVERABLE;
2480 }
2481 release_all = ((mp->mutex_flag & LOCK_NOTRECOVERABLE) != 0);
2482
2483 if (mtype & LOCK_PRIO_INHERIT) {
2484 no_preempt(self);
2485 mp->mutex_owner = 0;
2486 /* mp->mutex_ownerpid is cleared by ___lwp_mutex_unlock() */
2487 DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
2488 mp->mutex_lockw = LOCKCLEAR;
2489 self->ul_pilocks--;
2490 error = ___lwp_mutex_unlock(mp);
2491 preempt(self);
2492 } else if (mtype & USYNC_PROCESS) {
2493 mutex_unlock_process(mp, release_all);
2494 } else { /* USYNC_THREAD */
2495 if ((lwpid = mutex_unlock_queue(mp, release_all)) != 0) {
2496 (void) __lwp_unpark(lwpid);
2497 preempt(self);
2498 }
2499 }
2500
2501 if (mtype & LOCK_ROBUST)
2502 forget_lock(mp);
2503
2504 if ((mtype & LOCK_PRIO_PROTECT) && _ceil_mylist_del(mp))
2505 _ceil_prio_waive();
2506
2507 return (error);
2508 }
2509
2510 #pragma weak pthread_mutex_unlock = mutex_unlock
2511 #pragma weak _mutex_unlock = mutex_unlock
2512 int
mutex_unlock(mutex_t * mp)2513 mutex_unlock(mutex_t *mp)
2514 {
2515 ulwp_t *self = curthread;
2516 int mtype = mp->mutex_type;
2517 uberflags_t *gflags;
2518 lwpid_t lwpid;
2519 short el;
2520
2521 /*
2522 * Optimize the case of USYNC_THREAD, including
2523 * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases,
2524 * no error detection, no lock statistics,
2525 * and the process has only a single thread.
2526 * (Most likely a traditional single-threaded application.)
2527 */
2528 if (((mtype & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) |
2529 self->ul_uberdata->uberflags.uf_all) == 0) {
2530 if (mtype) {
2531 /*
2532 * At this point we know that one or both of the
2533 * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set.
2534 */
2535 if ((mtype & LOCK_ERRORCHECK) && !MUTEX_OWNED(mp, self))
2536 return (EPERM);
2537 if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) {
2538 mp->mutex_rcount--;
2539 DTRACE_PROBE2(plockstat, mutex__release, mp, 1);
2540 return (0);
2541 }
2542 }
2543 /*
2544 * Only one thread exists so we don't need an atomic operation.
2545 * Also, there can be no waiters.
2546 */
2547 sigoff(self);
2548 mp->mutex_owner = 0;
2549 mp->mutex_lockword = 0;
2550 sigon(self);
2551 DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
2552 return (0);
2553 }
2554
2555 /*
2556 * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS,
2557 * no error detection, and no lock statistics.
2558 * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases.
2559 */
2560 if ((gflags = self->ul_schedctl_called) != NULL) {
2561 if (((el = gflags->uf_trs_ted) | mtype) == 0) {
2562 fast_unlock:
2563 if ((lwpid = mutex_unlock_queue(mp, 0)) != 0) {
2564 (void) __lwp_unpark(lwpid);
2565 preempt(self);
2566 }
2567 return (0);
2568 }
2569 if (el) /* error detection or lock statistics */
2570 goto slow_unlock;
2571 if ((mtype & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) == 0) {
2572 /*
2573 * At this point we know that one or both of the
2574 * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set.
2575 */
2576 if ((mtype & LOCK_ERRORCHECK) && !MUTEX_OWNED(mp, self))
2577 return (EPERM);
2578 if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) {
2579 mp->mutex_rcount--;
2580 DTRACE_PROBE2(plockstat, mutex__release, mp, 1);
2581 return (0);
2582 }
2583 goto fast_unlock;
2584 }
2585 if ((mtype &
2586 ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK)) == 0) {
2587 /*
2588 * At this point we know that zero, one, or both of the
2589 * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set and
2590 * that the USYNC_PROCESS flag is set.
2591 */
2592 if ((mtype & LOCK_ERRORCHECK) && !shared_mutex_held(mp))
2593 return (EPERM);
2594 if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) {
2595 mp->mutex_rcount--;
2596 DTRACE_PROBE2(plockstat, mutex__release, mp, 1);
2597 return (0);
2598 }
2599 mutex_unlock_process(mp, 0);
2600 return (0);
2601 }
2602 }
2603
2604 /* else do it the long way */
2605 slow_unlock:
2606 return (mutex_unlock_internal(mp, 0));
2607 }
2608
2609 #pragma weak pthread_mutex_exit_np = mutex_exit
2610 void
mutex_exit(mutex_t * mp)2611 mutex_exit(mutex_t *mp)
2612 {
2613 int ret;
2614 int attr = mp->mutex_type & ALL_ATTRIBUTES;
2615
2616 if (attr != LOCK_ERRORCHECK &&
2617 attr != (LOCK_ERRORCHECK | LOCK_RECURSIVE)) {
2618 mutex_panic(mp, "mutex_exit: bad mutex type");
2619 }
2620 ret = mutex_unlock(mp);
2621 if (ret == EPERM) {
2622 mutex_panic(mp, "mutex_exit: not owner");
2623 } else if (ret != 0) {
2624 mutex_panic(mp, "unknown mutex_exit failure");
2625 }
2626
2627 }
2628
2629 /*
2630 * Internally to the library, almost all mutex lock/unlock actions
2631 * go through these lmutex_ functions, to protect critical regions.
2632 * We replicate a bit of code from mutex_lock() and mutex_unlock()
2633 * to make these functions faster since we know that the mutex type
2634 * of all internal locks is USYNC_THREAD. We also know that internal
2635 * locking can never fail, so we panic if it does.
2636 */
2637 void
lmutex_lock(mutex_t * mp)2638 lmutex_lock(mutex_t *mp)
2639 {
2640 ulwp_t *self = curthread;
2641 uberdata_t *udp = self->ul_uberdata;
2642
2643 ASSERT(mp->mutex_type == USYNC_THREAD);
2644
2645 enter_critical(self);
2646 /*
2647 * Optimize the case of no lock statistics and only a single thread.
2648 * (Most likely a traditional single-threaded application.)
2649 */
2650 if (udp->uberflags.uf_all == 0) {
2651 /*
2652 * Only one thread exists; the mutex must be free.
2653 */
2654 ASSERT(mp->mutex_lockw == 0);
2655 mp->mutex_lockw = LOCKSET;
2656 mp->mutex_owner = (uintptr_t)self;
2657 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
2658 } else {
2659 tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp);
2660
2661 if (!self->ul_schedctl_called)
2662 (void) setup_schedctl();
2663
2664 if (set_lock_byte(&mp->mutex_lockw) == 0) {
2665 mp->mutex_owner = (uintptr_t)self;
2666 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
2667 } else if (mutex_trylock_adaptive(mp, 1) != 0) {
2668 (void) mutex_lock_queue(self, msp, mp, NULL);
2669 }
2670
2671 if (msp)
2672 record_begin_hold(msp);
2673 }
2674 }
2675
2676 void
lmutex_unlock(mutex_t * mp)2677 lmutex_unlock(mutex_t *mp)
2678 {
2679 ulwp_t *self = curthread;
2680 uberdata_t *udp = self->ul_uberdata;
2681
2682 ASSERT(mp->mutex_type == USYNC_THREAD);
2683
2684 /*
2685 * Optimize the case of no lock statistics and only a single thread.
2686 * (Most likely a traditional single-threaded application.)
2687 */
2688 if (udp->uberflags.uf_all == 0) {
2689 /*
2690 * Only one thread exists so there can be no waiters.
2691 */
2692 mp->mutex_owner = 0;
2693 mp->mutex_lockword = 0;
2694 DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
2695 } else {
2696 tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp);
2697 lwpid_t lwpid;
2698
2699 if (msp)
2700 (void) record_hold_time(msp);
2701 if ((lwpid = mutex_unlock_queue(mp, 0)) != 0) {
2702 (void) __lwp_unpark(lwpid);
2703 preempt(self);
2704 }
2705 }
2706 exit_critical(self);
2707 }
2708
2709 /*
2710 * For specialized code in libc, like the asynchronous i/o code,
2711 * the following sig_*() locking primitives are used in order
2712 * to make the code asynchronous signal safe. Signals are
2713 * deferred while locks acquired by these functions are held.
2714 */
2715 void
sig_mutex_lock(mutex_t * mp)2716 sig_mutex_lock(mutex_t *mp)
2717 {
2718 ulwp_t *self = curthread;
2719
2720 sigoff(self);
2721 (void) mutex_lock(mp);
2722 }
2723
2724 void
sig_mutex_unlock(mutex_t * mp)2725 sig_mutex_unlock(mutex_t *mp)
2726 {
2727 ulwp_t *self = curthread;
2728
2729 (void) mutex_unlock(mp);
2730 sigon(self);
2731 }
2732
2733 int
sig_mutex_trylock(mutex_t * mp)2734 sig_mutex_trylock(mutex_t *mp)
2735 {
2736 ulwp_t *self = curthread;
2737 int error;
2738
2739 sigoff(self);
2740 if ((error = mutex_trylock(mp)) != 0)
2741 sigon(self);
2742 return (error);
2743 }
2744
2745 /*
2746 * sig_cond_wait() is a cancellation point.
2747 */
2748 int
sig_cond_wait(cond_t * cv,mutex_t * mp)2749 sig_cond_wait(cond_t *cv, mutex_t *mp)
2750 {
2751 int error;
2752
2753 ASSERT(curthread->ul_sigdefer != 0);
2754 pthread_testcancel();
2755 error = __cond_wait(cv, mp);
2756 if (error == EINTR && curthread->ul_cursig) {
2757 sig_mutex_unlock(mp);
2758 /* take the deferred signal here */
2759 sig_mutex_lock(mp);
2760 }
2761 pthread_testcancel();
2762 return (error);
2763 }
2764
2765 /*
2766 * sig_cond_reltimedwait() is a cancellation point.
2767 */
2768 int
sig_cond_reltimedwait(cond_t * cv,mutex_t * mp,const timespec_t * ts)2769 sig_cond_reltimedwait(cond_t *cv, mutex_t *mp, const timespec_t *ts)
2770 {
2771 int error;
2772
2773 ASSERT(curthread->ul_sigdefer != 0);
2774 pthread_testcancel();
2775 error = __cond_reltimedwait(cv, mp, ts);
2776 if (error == EINTR && curthread->ul_cursig) {
2777 sig_mutex_unlock(mp);
2778 /* take the deferred signal here */
2779 sig_mutex_lock(mp);
2780 }
2781 pthread_testcancel();
2782 return (error);
2783 }
2784
2785 /*
2786 * For specialized code in libc, like the stdio code.
2787 * the following cancel_safe_*() locking primitives are used in
2788 * order to make the code cancellation-safe. Cancellation is
2789 * deferred while locks acquired by these functions are held.
2790 */
2791 void
cancel_safe_mutex_lock(mutex_t * mp)2792 cancel_safe_mutex_lock(mutex_t *mp)
2793 {
2794 (void) mutex_lock(mp);
2795 curthread->ul_libc_locks++;
2796 }
2797
2798 int
cancel_safe_mutex_trylock(mutex_t * mp)2799 cancel_safe_mutex_trylock(mutex_t *mp)
2800 {
2801 int error;
2802
2803 if ((error = mutex_trylock(mp)) == 0)
2804 curthread->ul_libc_locks++;
2805 return (error);
2806 }
2807
2808 void
cancel_safe_mutex_unlock(mutex_t * mp)2809 cancel_safe_mutex_unlock(mutex_t *mp)
2810 {
2811 ulwp_t *self = curthread;
2812
2813 ASSERT(self->ul_libc_locks != 0);
2814
2815 (void) mutex_unlock(mp);
2816
2817 /*
2818 * Decrement the count of locks held by cancel_safe_mutex_lock().
2819 * If we are then in a position to terminate cleanly and
2820 * if there is a pending cancellation and cancellation
2821 * is not disabled and we received EINTR from a recent
2822 * system call then perform the cancellation action now.
2823 */
2824 if (--self->ul_libc_locks == 0 &&
2825 !(self->ul_vfork | self->ul_nocancel |
2826 self->ul_critical | self->ul_sigdefer) &&
2827 cancel_active())
2828 pthread_exit(PTHREAD_CANCELED);
2829 }
2830
2831 static int
shared_mutex_held(mutex_t * mparg)2832 shared_mutex_held(mutex_t *mparg)
2833 {
2834 /*
2835 * The 'volatile' is necessary to make sure the compiler doesn't
2836 * reorder the tests of the various components of the mutex.
2837 * They must be tested in this order:
2838 * mutex_lockw
2839 * mutex_owner
2840 * mutex_ownerpid
2841 * This relies on the fact that everywhere mutex_lockw is cleared,
2842 * mutex_owner and mutex_ownerpid are cleared before mutex_lockw
2843 * is cleared, and that everywhere mutex_lockw is set, mutex_owner
2844 * and mutex_ownerpid are set after mutex_lockw is set, and that
2845 * mutex_lockw is set or cleared with a memory barrier.
2846 */
2847 volatile mutex_t *mp = (volatile mutex_t *)mparg;
2848 ulwp_t *self = curthread;
2849 uberdata_t *udp = self->ul_uberdata;
2850
2851 return (MUTEX_OWNED(mp, self) && mp->mutex_ownerpid == udp->pid);
2852 }
2853
2854 #pragma weak _mutex_held = mutex_held
2855 int
mutex_held(mutex_t * mparg)2856 mutex_held(mutex_t *mparg)
2857 {
2858 volatile mutex_t *mp = (volatile mutex_t *)mparg;
2859
2860 if (mparg->mutex_type & USYNC_PROCESS)
2861 return (shared_mutex_held(mparg));
2862 return (MUTEX_OWNED(mp, curthread));
2863 }
2864
2865 #pragma weak pthread_mutex_destroy = mutex_destroy
2866 #pragma weak _mutex_destroy = mutex_destroy
2867 int
mutex_destroy(mutex_t * mp)2868 mutex_destroy(mutex_t *mp)
2869 {
2870 if (mp->mutex_type & USYNC_PROCESS)
2871 forget_lock(mp);
2872 (void) memset(mp, 0, sizeof (*mp));
2873 tdb_sync_obj_deregister(mp);
2874 return (0);
2875 }
2876
2877 #pragma weak pthread_mutex_consistent_np = mutex_consistent
2878 #pragma weak pthread_mutex_consistent = mutex_consistent
2879 int
mutex_consistent(mutex_t * mp)2880 mutex_consistent(mutex_t *mp)
2881 {
2882 /*
2883 * Do this only for an inconsistent, initialized robust lock
2884 * that we hold. For all other cases, return EINVAL.
2885 */
2886 if (mutex_held(mp) &&
2887 (mp->mutex_type & LOCK_ROBUST) &&
2888 (mp->mutex_flag & LOCK_INITED) &&
2889 (mp->mutex_flag & (LOCK_OWNERDEAD | LOCK_UNMAPPED))) {
2890 mp->mutex_flag &= ~(LOCK_OWNERDEAD | LOCK_UNMAPPED);
2891 mp->mutex_rcount = 0;
2892 return (0);
2893 }
2894 return (EINVAL);
2895 }
2896
2897 /*
2898 * Spin locks are separate from ordinary mutexes,
2899 * but we use the same data structure for them.
2900 */
2901
2902 int
pthread_spin_init(pthread_spinlock_t * lock,int pshared)2903 pthread_spin_init(pthread_spinlock_t *lock, int pshared)
2904 {
2905 mutex_t *mp = (mutex_t *)lock;
2906
2907 (void) memset(mp, 0, sizeof (*mp));
2908 if (pshared == PTHREAD_PROCESS_SHARED)
2909 mp->mutex_type = USYNC_PROCESS;
2910 else
2911 mp->mutex_type = USYNC_THREAD;
2912 mp->mutex_flag = LOCK_INITED;
2913 mp->mutex_magic = MUTEX_MAGIC;
2914
2915 /*
2916 * This should be at the beginning of the function,
2917 * but for the sake of old broken applications that
2918 * do not have proper alignment for their mutexes
2919 * (and don't check the return code from pthread_spin_init),
2920 * we put it here, after initializing the mutex regardless.
2921 */
2922 if (((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) &&
2923 curthread->ul_misaligned == 0)
2924 return (EINVAL);
2925
2926 return (0);
2927 }
2928
2929 int
pthread_spin_destroy(pthread_spinlock_t * lock)2930 pthread_spin_destroy(pthread_spinlock_t *lock)
2931 {
2932 (void) memset(lock, 0, sizeof (*lock));
2933 return (0);
2934 }
2935
2936 int
pthread_spin_trylock(pthread_spinlock_t * lock)2937 pthread_spin_trylock(pthread_spinlock_t *lock)
2938 {
2939 mutex_t *mp = (mutex_t *)lock;
2940 ulwp_t *self = curthread;
2941 int error = 0;
2942
2943 no_preempt(self);
2944 if (set_lock_byte(&mp->mutex_lockw) != 0)
2945 error = EBUSY;
2946 else {
2947 mp->mutex_owner = (uintptr_t)self;
2948 if (mp->mutex_type == USYNC_PROCESS)
2949 mp->mutex_ownerpid = self->ul_uberdata->pid;
2950 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
2951 }
2952 preempt(self);
2953 return (error);
2954 }
2955
2956 int
pthread_spin_lock(pthread_spinlock_t * lock)2957 pthread_spin_lock(pthread_spinlock_t *lock)
2958 {
2959 mutex_t *mp = (mutex_t *)lock;
2960 ulwp_t *self = curthread;
2961 volatile uint8_t *lockp = (volatile uint8_t *)&mp->mutex_lockw;
2962 int count = 0;
2963
2964 ASSERT(!self->ul_critical || self->ul_bindflags);
2965
2966 DTRACE_PROBE1(plockstat, mutex__spin, mp);
2967
2968 /*
2969 * We don't care whether the owner is running on a processor.
2970 * We just spin because that's what this interface requires.
2971 */
2972 for (;;) {
2973 if (*lockp == 0) { /* lock byte appears to be clear */
2974 no_preempt(self);
2975 if (set_lock_byte(lockp) == 0)
2976 break;
2977 preempt(self);
2978 }
2979 if (count < INT_MAX)
2980 count++;
2981 SMT_PAUSE();
2982 }
2983 mp->mutex_owner = (uintptr_t)self;
2984 if (mp->mutex_type == USYNC_PROCESS)
2985 mp->mutex_ownerpid = self->ul_uberdata->pid;
2986 preempt(self);
2987 if (count) {
2988 DTRACE_PROBE3(plockstat, mutex__spun, mp, 1, count);
2989 }
2990 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count);
2991 return (0);
2992 }
2993
2994 int
pthread_spin_unlock(pthread_spinlock_t * lock)2995 pthread_spin_unlock(pthread_spinlock_t *lock)
2996 {
2997 mutex_t *mp = (mutex_t *)lock;
2998 ulwp_t *self = curthread;
2999
3000 no_preempt(self);
3001 mp->mutex_owner = 0;
3002 mp->mutex_ownerpid = 0;
3003 DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
3004 (void) atomic_swap_32(&mp->mutex_lockword, 0);
3005 preempt(self);
3006 return (0);
3007 }
3008
3009 #define INITIAL_LOCKS 8 /* initial size of ul_heldlocks.array */
3010
3011 /*
3012 * Find/allocate an entry for 'lock' in our array of held locks.
3013 */
3014 static mutex_t **
find_lock_entry(mutex_t * lock)3015 find_lock_entry(mutex_t *lock)
3016 {
3017 ulwp_t *self = curthread;
3018 mutex_t **remembered = NULL;
3019 mutex_t **lockptr;
3020 uint_t nlocks;
3021
3022 if ((nlocks = self->ul_heldlockcnt) != 0)
3023 lockptr = self->ul_heldlocks.array;
3024 else {
3025 nlocks = 1;
3026 lockptr = &self->ul_heldlocks.single;
3027 }
3028
3029 for (; nlocks; nlocks--, lockptr++) {
3030 if (*lockptr == lock)
3031 return (lockptr);
3032 if (*lockptr == NULL && remembered == NULL)
3033 remembered = lockptr;
3034 }
3035 if (remembered != NULL) {
3036 *remembered = lock;
3037 return (remembered);
3038 }
3039
3040 /*
3041 * No entry available. Allocate more space, converting
3042 * the single entry into an array of entries if necessary.
3043 */
3044 if ((nlocks = self->ul_heldlockcnt) == 0) {
3045 /*
3046 * Initial allocation of the array.
3047 * Convert the single entry into an array.
3048 */
3049 self->ul_heldlockcnt = nlocks = INITIAL_LOCKS;
3050 lockptr = lmalloc(nlocks * sizeof (mutex_t *));
3051 /*
3052 * The single entry becomes the first entry in the array.
3053 */
3054 *lockptr = self->ul_heldlocks.single;
3055 self->ul_heldlocks.array = lockptr;
3056 /*
3057 * Return the next available entry in the array.
3058 */
3059 *++lockptr = lock;
3060 return (lockptr);
3061 }
3062 /*
3063 * Reallocate the array, double the size each time.
3064 */
3065 lockptr = lmalloc(nlocks * 2 * sizeof (mutex_t *));
3066 (void) memcpy(lockptr, self->ul_heldlocks.array,
3067 nlocks * sizeof (mutex_t *));
3068 lfree(self->ul_heldlocks.array, nlocks * sizeof (mutex_t *));
3069 self->ul_heldlocks.array = lockptr;
3070 self->ul_heldlockcnt *= 2;
3071 /*
3072 * Return the next available entry in the newly allocated array.
3073 */
3074 *(lockptr += nlocks) = lock;
3075 return (lockptr);
3076 }
3077
3078 /*
3079 * Insert 'lock' into our list of held locks.
3080 * Currently only used for LOCK_ROBUST mutexes.
3081 */
3082 void
remember_lock(mutex_t * lock)3083 remember_lock(mutex_t *lock)
3084 {
3085 (void) find_lock_entry(lock);
3086 }
3087
3088 /*
3089 * Remove 'lock' from our list of held locks.
3090 * Currently only used for LOCK_ROBUST mutexes.
3091 */
3092 void
forget_lock(mutex_t * lock)3093 forget_lock(mutex_t *lock)
3094 {
3095 *find_lock_entry(lock) = NULL;
3096 }
3097
3098 /*
3099 * Free the array of held locks.
3100 */
3101 void
heldlock_free(ulwp_t * ulwp)3102 heldlock_free(ulwp_t *ulwp)
3103 {
3104 uint_t nlocks;
3105
3106 if ((nlocks = ulwp->ul_heldlockcnt) != 0)
3107 lfree(ulwp->ul_heldlocks.array, nlocks * sizeof (mutex_t *));
3108 ulwp->ul_heldlockcnt = 0;
3109 ulwp->ul_heldlocks.array = NULL;
3110 }
3111
3112 /*
3113 * Mark all held LOCK_ROBUST mutexes LOCK_OWNERDEAD.
3114 * Called from _thrp_exit() to deal with abandoned locks.
3115 */
3116 void
heldlock_exit(void)3117 heldlock_exit(void)
3118 {
3119 ulwp_t *self = curthread;
3120 mutex_t **lockptr;
3121 uint_t nlocks;
3122 mutex_t *mp;
3123
3124 if ((nlocks = self->ul_heldlockcnt) != 0)
3125 lockptr = self->ul_heldlocks.array;
3126 else {
3127 nlocks = 1;
3128 lockptr = &self->ul_heldlocks.single;
3129 }
3130
3131 for (; nlocks; nlocks--, lockptr++) {
3132 /*
3133 * The kernel takes care of transitioning held
3134 * LOCK_PRIO_INHERIT mutexes to LOCK_OWNERDEAD.
3135 * We avoid that case here.
3136 */
3137 if ((mp = *lockptr) != NULL &&
3138 mutex_held(mp) &&
3139 (mp->mutex_type & (LOCK_ROBUST | LOCK_PRIO_INHERIT)) ==
3140 LOCK_ROBUST) {
3141 mp->mutex_rcount = 0;
3142 if (!(mp->mutex_flag & LOCK_UNMAPPED))
3143 mp->mutex_flag |= LOCK_OWNERDEAD;
3144 (void) mutex_unlock_internal(mp, 1);
3145 }
3146 }
3147
3148 heldlock_free(self);
3149 }
3150
3151 #pragma weak _cond_init = cond_init
3152 int
cond_init(cond_t * cvp,int type,void * arg __unused)3153 cond_init(cond_t *cvp, int type, void *arg __unused)
3154 {
3155 if (type != USYNC_THREAD && type != USYNC_PROCESS)
3156 return (EINVAL);
3157 (void) memset(cvp, 0, sizeof (*cvp));
3158 cvp->cond_type = (uint16_t)type;
3159 cvp->cond_magic = COND_MAGIC;
3160
3161 /*
3162 * This should be at the beginning of the function,
3163 * but for the sake of old broken applications that
3164 * do not have proper alignment for their condvars
3165 * (and don't check the return code from cond_init),
3166 * we put it here, after initializing the condvar regardless.
3167 */
3168 if (((uintptr_t)cvp & (_LONG_LONG_ALIGNMENT - 1)) &&
3169 curthread->ul_misaligned == 0)
3170 return (EINVAL);
3171
3172 return (0);
3173 }
3174
3175 /*
3176 * cond_sleep_queue(): utility function for cond_wait_queue().
3177 *
3178 * Go to sleep on a condvar sleep queue, expect to be waked up
3179 * by someone calling cond_signal() or cond_broadcast() or due
3180 * to receiving a UNIX signal or being cancelled, or just simply
3181 * due to a spurious wakeup (like someome calling forkall()).
3182 *
3183 * The associated mutex is *not* reacquired before returning.
3184 * That must be done by the caller of cond_sleep_queue().
3185 */
3186 static int
cond_sleep_queue(cond_t * cvp,mutex_t * mp,timespec_t * tsp)3187 cond_sleep_queue(cond_t *cvp, mutex_t *mp, timespec_t *tsp)
3188 {
3189 ulwp_t *self = curthread;
3190 queue_head_t *qp;
3191 queue_head_t *mqp;
3192 lwpid_t lwpid;
3193 int signalled;
3194 int error;
3195 int cv_wake;
3196 int release_all;
3197
3198 /*
3199 * Put ourself on the CV sleep queue, unlock the mutex, then
3200 * park ourself and unpark a candidate lwp to grab the mutex.
3201 * We must go onto the CV sleep queue before dropping the
3202 * mutex in order to guarantee atomicity of the operation.
3203 */
3204 self->ul_sp = stkptr();
3205 qp = queue_lock(cvp, CV);
3206 enqueue(qp, self, 0);
3207 cvp->cond_waiters_user = 1;
3208 self->ul_cvmutex = mp;
3209 self->ul_cv_wake = cv_wake = (tsp != NULL);
3210 self->ul_signalled = 0;
3211 if (mp->mutex_flag & LOCK_OWNERDEAD) {
3212 mp->mutex_flag &= ~LOCK_OWNERDEAD;
3213 mp->mutex_flag |= LOCK_NOTRECOVERABLE;
3214 }
3215 release_all = ((mp->mutex_flag & LOCK_NOTRECOVERABLE) != 0);
3216 lwpid = mutex_unlock_queue(mp, release_all);
3217 for (;;) {
3218 set_parking_flag(self, 1);
3219 queue_unlock(qp);
3220 if (lwpid != 0) {
3221 lwpid = preempt_unpark(self, lwpid);
3222 preempt(self);
3223 }
3224 /*
3225 * We may have a deferred signal present,
3226 * in which case we should return EINTR.
3227 * Also, we may have received a SIGCANCEL; if so
3228 * and we are cancelable we should return EINTR.
3229 * We force an immediate EINTR return from
3230 * __lwp_park() by turning our parking flag off.
3231 */
3232 if (self->ul_cursig != 0 ||
3233 (self->ul_cancelable && self->ul_cancel_pending))
3234 set_parking_flag(self, 0);
3235 /*
3236 * __lwp_park() will return the residual time in tsp
3237 * if we are unparked before the timeout expires.
3238 */
3239 error = __lwp_park(tsp, lwpid);
3240 set_parking_flag(self, 0);
3241 lwpid = 0; /* unpark the other lwp only once */
3242 /*
3243 * We were waked up by cond_signal(), cond_broadcast(),
3244 * by an interrupt or timeout (EINTR or ETIME),
3245 * or we may just have gotten a spurious wakeup.
3246 */
3247 qp = queue_lock(cvp, CV);
3248 if (!cv_wake)
3249 mqp = queue_lock(mp, MX);
3250 if (self->ul_sleepq == NULL)
3251 break;
3252 /*
3253 * We are on either the condvar sleep queue or the
3254 * mutex sleep queue. Break out of the sleep if we
3255 * were interrupted or we timed out (EINTR or ETIME).
3256 * Else this is a spurious wakeup; continue the loop.
3257 */
3258 if (!cv_wake && self->ul_sleepq == mqp) { /* mutex queue */
3259 if (error) {
3260 mp->mutex_waiters = dequeue_self(mqp);
3261 break;
3262 }
3263 tsp = NULL; /* no more timeout */
3264 } else if (self->ul_sleepq == qp) { /* condvar queue */
3265 if (error) {
3266 cvp->cond_waiters_user = dequeue_self(qp);
3267 break;
3268 }
3269 /*
3270 * Else a spurious wakeup on the condvar queue.
3271 * __lwp_park() has already adjusted the timeout.
3272 */
3273 } else {
3274 thr_panic("cond_sleep_queue(): thread not on queue");
3275 }
3276 if (!cv_wake)
3277 queue_unlock(mqp);
3278 }
3279
3280 self->ul_sp = 0;
3281 self->ul_cv_wake = 0;
3282 ASSERT(self->ul_cvmutex == NULL);
3283 ASSERT(self->ul_sleepq == NULL && self->ul_link == NULL &&
3284 self->ul_wchan == NULL);
3285
3286 signalled = self->ul_signalled;
3287 self->ul_signalled = 0;
3288 queue_unlock(qp);
3289 if (!cv_wake)
3290 queue_unlock(mqp);
3291
3292 /*
3293 * If we were concurrently cond_signal()d and any of:
3294 * received a UNIX signal, were cancelled, or got a timeout,
3295 * then perform another cond_signal() to avoid consuming it.
3296 */
3297 if (error && signalled)
3298 (void) cond_signal(cvp);
3299
3300 return (error);
3301 }
3302
3303 static void
cond_wait_check_alignment(cond_t * cvp,mutex_t * mp)3304 cond_wait_check_alignment(cond_t *cvp, mutex_t *mp)
3305 {
3306 if ((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1))
3307 lock_error(mp, "cond_wait", cvp, "mutex is misaligned");
3308 if ((uintptr_t)cvp & (_LONG_LONG_ALIGNMENT - 1))
3309 lock_error(mp, "cond_wait", cvp, "condvar is misaligned");
3310 }
3311
3312 int
cond_wait_queue(cond_t * cvp,mutex_t * mp,timespec_t * tsp)3313 cond_wait_queue(cond_t *cvp, mutex_t *mp, timespec_t *tsp)
3314 {
3315 ulwp_t *self = curthread;
3316 int error;
3317 int merror;
3318
3319 if (self->ul_error_detection && self->ul_misaligned == 0)
3320 cond_wait_check_alignment(cvp, mp);
3321
3322 /*
3323 * The old thread library was programmed to defer signals
3324 * while in cond_wait() so that the associated mutex would
3325 * be guaranteed to be held when the application signal
3326 * handler was invoked.
3327 *
3328 * We do not behave this way by default; the state of the
3329 * associated mutex in the signal handler is undefined.
3330 *
3331 * To accommodate applications that depend on the old
3332 * behavior, the _THREAD_COND_WAIT_DEFER environment
3333 * variable can be set to 1 and we will behave in the
3334 * old way with respect to cond_wait().
3335 */
3336 if (self->ul_cond_wait_defer)
3337 sigoff(self);
3338
3339 error = cond_sleep_queue(cvp, mp, tsp);
3340
3341 /*
3342 * Reacquire the mutex.
3343 */
3344 if ((merror = mutex_lock_impl(mp, NULL)) != 0)
3345 error = merror;
3346
3347 /*
3348 * Take any deferred signal now, after we have reacquired the mutex.
3349 */
3350 if (self->ul_cond_wait_defer)
3351 sigon(self);
3352
3353 return (error);
3354 }
3355
3356 /*
3357 * cond_sleep_kernel(): utility function for cond_wait_kernel().
3358 * See the comment ahead of cond_sleep_queue(), above.
3359 */
3360 static int
cond_sleep_kernel(cond_t * cvp,mutex_t * mp,timespec_t * tsp)3361 cond_sleep_kernel(cond_t *cvp, mutex_t *mp, timespec_t *tsp)
3362 {
3363 int mtype = mp->mutex_type;
3364 ulwp_t *self = curthread;
3365 int error;
3366
3367 if ((mtype & LOCK_PRIO_PROTECT) && _ceil_mylist_del(mp))
3368 _ceil_prio_waive();
3369
3370 self->ul_sp = stkptr();
3371 self->ul_wchan = cvp;
3372 sigoff(self);
3373 mp->mutex_owner = 0;
3374 /* mp->mutex_ownerpid is cleared by ___lwp_cond_wait() */
3375 if (mtype & LOCK_PRIO_INHERIT) {
3376 mp->mutex_lockw = LOCKCLEAR;
3377 self->ul_pilocks--;
3378 }
3379 /*
3380 * ___lwp_cond_wait() returns immediately with EINTR if
3381 * set_parking_flag(self,0) is called on this lwp before it
3382 * goes to sleep in the kernel. sigacthandler() calls this
3383 * when a deferred signal is noted. This assures that we don't
3384 * get stuck in ___lwp_cond_wait() with all signals blocked
3385 * due to taking a deferred signal before going to sleep.
3386 */
3387 set_parking_flag(self, 1);
3388 if (self->ul_cursig != 0 ||
3389 (self->ul_cancelable && self->ul_cancel_pending))
3390 set_parking_flag(self, 0);
3391 error = ___lwp_cond_wait(cvp, mp, tsp, 1);
3392 set_parking_flag(self, 0);
3393 sigon(self);
3394 self->ul_sp = 0;
3395 self->ul_wchan = NULL;
3396 return (error);
3397 }
3398
3399 int
cond_wait_kernel(cond_t * cvp,mutex_t * mp,timespec_t * tsp)3400 cond_wait_kernel(cond_t *cvp, mutex_t *mp, timespec_t *tsp)
3401 {
3402 ulwp_t *self = curthread;
3403 int error;
3404 int merror;
3405
3406 if (self->ul_error_detection && self->ul_misaligned == 0)
3407 cond_wait_check_alignment(cvp, mp);
3408
3409 /*
3410 * See the large comment in cond_wait_queue(), above.
3411 */
3412 if (self->ul_cond_wait_defer)
3413 sigoff(self);
3414
3415 error = cond_sleep_kernel(cvp, mp, tsp);
3416
3417 /*
3418 * Override the return code from ___lwp_cond_wait()
3419 * with any non-zero return code from mutex_lock().
3420 * This addresses robust lock failures in particular;
3421 * the caller must see the EOWNERDEAD or ENOTRECOVERABLE
3422 * errors in order to take corrective action.
3423 */
3424 if ((merror = mutex_lock_impl(mp, NULL)) != 0)
3425 error = merror;
3426
3427 /*
3428 * Take any deferred signal now, after we have reacquired the mutex.
3429 */
3430 if (self->ul_cond_wait_defer)
3431 sigon(self);
3432
3433 return (error);
3434 }
3435
3436 /*
3437 * Common code for cond_wait() and cond_timedwait()
3438 */
3439 int
cond_wait_common(cond_t * cvp,mutex_t * mp,timespec_t * tsp)3440 cond_wait_common(cond_t *cvp, mutex_t *mp, timespec_t *tsp)
3441 {
3442 int mtype = mp->mutex_type;
3443 hrtime_t begin_sleep = 0;
3444 ulwp_t *self = curthread;
3445 uberdata_t *udp = self->ul_uberdata;
3446 tdb_cond_stats_t *csp = COND_STATS(cvp, udp);
3447 tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp);
3448 uint8_t rcount;
3449 int error = 0;
3450
3451 /*
3452 * The SUSV3 Posix spec for pthread_cond_timedwait() states:
3453 * Except in the case of [ETIMEDOUT], all these error checks
3454 * shall act as if they were performed immediately at the
3455 * beginning of processing for the function and shall cause
3456 * an error return, in effect, prior to modifying the state
3457 * of the mutex specified by mutex or the condition variable
3458 * specified by cond.
3459 * Therefore, we must return EINVAL now if the timout is invalid.
3460 */
3461 if (tsp != NULL &&
3462 (tsp->tv_sec < 0 || (ulong_t)tsp->tv_nsec >= NANOSEC))
3463 return (EINVAL);
3464
3465 if (__td_event_report(self, TD_SLEEP, udp)) {
3466 self->ul_sp = stkptr();
3467 self->ul_wchan = cvp;
3468 self->ul_td_evbuf.eventnum = TD_SLEEP;
3469 self->ul_td_evbuf.eventdata = cvp;
3470 tdb_event(TD_SLEEP, udp);
3471 self->ul_sp = 0;
3472 }
3473 if (csp) {
3474 if (tsp)
3475 tdb_incr(csp->cond_timedwait);
3476 else
3477 tdb_incr(csp->cond_wait);
3478 }
3479 if (msp)
3480 begin_sleep = record_hold_time(msp);
3481 else if (csp)
3482 begin_sleep = gethrtime();
3483
3484 if (self->ul_error_detection) {
3485 if (!mutex_held(mp))
3486 lock_error(mp, "cond_wait", cvp, NULL);
3487 if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0)
3488 lock_error(mp, "recursive mutex in cond_wait",
3489 cvp, NULL);
3490 if (cvp->cond_type & USYNC_PROCESS) {
3491 if (!(mtype & USYNC_PROCESS))
3492 lock_error(mp, "cond_wait", cvp,
3493 "condvar process-shared, "
3494 "mutex process-private");
3495 } else {
3496 if (mtype & USYNC_PROCESS)
3497 lock_error(mp, "cond_wait", cvp,
3498 "condvar process-private, "
3499 "mutex process-shared");
3500 }
3501 }
3502
3503 /*
3504 * We deal with recursive mutexes by completely
3505 * dropping the lock and restoring the recursion
3506 * count after waking up. This is arguably wrong,
3507 * but it obeys the principle of least astonishment.
3508 */
3509 rcount = mp->mutex_rcount;
3510 mp->mutex_rcount = 0;
3511 if ((mtype &
3512 (USYNC_PROCESS | LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT)) |
3513 (cvp->cond_type & USYNC_PROCESS))
3514 error = cond_wait_kernel(cvp, mp, tsp);
3515 else
3516 error = cond_wait_queue(cvp, mp, tsp);
3517 mp->mutex_rcount = rcount;
3518
3519 if (csp) {
3520 hrtime_t lapse = gethrtime() - begin_sleep;
3521 if (tsp == NULL)
3522 csp->cond_wait_sleep_time += lapse;
3523 else {
3524 csp->cond_timedwait_sleep_time += lapse;
3525 if (error == ETIME)
3526 tdb_incr(csp->cond_timedwait_timeout);
3527 }
3528 }
3529 return (error);
3530 }
3531
3532 /*
3533 * cond_wait() is a cancellation point but __cond_wait() is not.
3534 * Internally, libc calls the non-cancellation version.
3535 * Other libraries need to use pthread_setcancelstate(), as appropriate,
3536 * since __cond_wait() is not exported from libc.
3537 */
3538 int
__cond_wait(cond_t * cvp,mutex_t * mp)3539 __cond_wait(cond_t *cvp, mutex_t *mp)
3540 {
3541 ulwp_t *self = curthread;
3542 uberdata_t *udp = self->ul_uberdata;
3543 uberflags_t *gflags;
3544
3545 if ((mp->mutex_type & (LOCK_ERRORCHECK | LOCK_ROBUST)) &&
3546 !mutex_held(mp))
3547 return (EPERM);
3548
3549 /*
3550 * Optimize the common case of USYNC_THREAD plus
3551 * no error detection, no lock statistics, and no event tracing.
3552 */
3553 if ((gflags = self->ul_schedctl_called) != NULL &&
3554 (cvp->cond_type | mp->mutex_type | gflags->uf_trs_ted |
3555 self->ul_td_events_enable |
3556 udp->tdb.tdb_ev_global_mask.event_bits[0]) == 0)
3557 return (cond_wait_queue(cvp, mp, NULL));
3558
3559 /*
3560 * Else do it the long way.
3561 */
3562 return (cond_wait_common(cvp, mp, NULL));
3563 }
3564
3565 #pragma weak _cond_wait = cond_wait
3566 int
cond_wait(cond_t * cvp,mutex_t * mp)3567 cond_wait(cond_t *cvp, mutex_t *mp)
3568 {
3569 int error;
3570
3571 _cancelon();
3572 error = __cond_wait(cvp, mp);
3573 if (error == EINTR)
3574 _canceloff();
3575 else
3576 _canceloff_nocancel();
3577 return (error);
3578 }
3579
3580 /*
3581 * pthread_cond_wait() is a cancellation point.
3582 */
3583 int
pthread_cond_wait(pthread_cond_t * _RESTRICT_KYWD cvp,pthread_mutex_t * _RESTRICT_KYWD mp)3584 pthread_cond_wait(pthread_cond_t *_RESTRICT_KYWD cvp,
3585 pthread_mutex_t *_RESTRICT_KYWD mp)
3586 {
3587 int error;
3588
3589 error = cond_wait((cond_t *)cvp, (mutex_t *)mp);
3590 return ((error == EINTR)? 0 : error);
3591 }
3592
3593 /*
3594 * cond_timedwait() is a cancellation point but __cond_timedwait() is not.
3595 */
3596 int
__cond_timedwait(cond_t * cvp,mutex_t * mp,const timespec_t * abstime)3597 __cond_timedwait(cond_t *cvp, mutex_t *mp, const timespec_t *abstime)
3598 {
3599 clockid_t clock_id = cvp->cond_clockid;
3600 timespec_t reltime;
3601 int error;
3602
3603 if ((mp->mutex_type & (LOCK_ERRORCHECK | LOCK_ROBUST)) &&
3604 !mutex_held(mp))
3605 return (EPERM);
3606
3607 if (clock_id != CLOCK_REALTIME && clock_id != CLOCK_HIGHRES)
3608 clock_id = CLOCK_REALTIME;
3609 abstime_to_reltime(clock_id, abstime, &reltime);
3610 error = cond_wait_common(cvp, mp, &reltime);
3611 if (error == ETIME && clock_id == CLOCK_HIGHRES) {
3612 /*
3613 * Don't return ETIME if we didn't really get a timeout.
3614 * This can happen if we return because someone resets
3615 * the system clock. Just return zero in this case,
3616 * giving a spurious wakeup but not a timeout.
3617 */
3618 if ((hrtime_t)(uint32_t)abstime->tv_sec * NANOSEC +
3619 abstime->tv_nsec > gethrtime())
3620 error = 0;
3621 }
3622 return (error);
3623 }
3624
3625 int
cond_timedwait(cond_t * cvp,mutex_t * mp,const timespec_t * abstime)3626 cond_timedwait(cond_t *cvp, mutex_t *mp, const timespec_t *abstime)
3627 {
3628 int error;
3629
3630 _cancelon();
3631 error = __cond_timedwait(cvp, mp, abstime);
3632 if (error == EINTR)
3633 _canceloff();
3634 else
3635 _canceloff_nocancel();
3636 return (error);
3637 }
3638
3639 /*
3640 * pthread_cond_timedwait() is a cancellation point.
3641 */
3642 int
pthread_cond_timedwait(pthread_cond_t * _RESTRICT_KYWD cvp,pthread_mutex_t * _RESTRICT_KYWD mp,const struct timespec * _RESTRICT_KYWD abstime)3643 pthread_cond_timedwait(pthread_cond_t *_RESTRICT_KYWD cvp,
3644 pthread_mutex_t *_RESTRICT_KYWD mp,
3645 const struct timespec *_RESTRICT_KYWD abstime)
3646 {
3647 int error;
3648
3649 error = cond_timedwait((cond_t *)cvp, (mutex_t *)mp, abstime);
3650 if (error == ETIME)
3651 error = ETIMEDOUT;
3652 else if (error == EINTR)
3653 error = 0;
3654 return (error);
3655 }
3656
3657 /*
3658 * cond_reltimedwait() is a cancellation point but __cond_reltimedwait() is not.
3659 */
3660 int
__cond_reltimedwait(cond_t * cvp,mutex_t * mp,const timespec_t * reltime)3661 __cond_reltimedwait(cond_t *cvp, mutex_t *mp, const timespec_t *reltime)
3662 {
3663 timespec_t tslocal = *reltime;
3664
3665 if ((mp->mutex_type & (LOCK_ERRORCHECK | LOCK_ROBUST)) &&
3666 !mutex_held(mp))
3667 return (EPERM);
3668
3669 return (cond_wait_common(cvp, mp, &tslocal));
3670 }
3671
3672 int
cond_reltimedwait(cond_t * cvp,mutex_t * mp,const timespec_t * reltime)3673 cond_reltimedwait(cond_t *cvp, mutex_t *mp, const timespec_t *reltime)
3674 {
3675 int error;
3676
3677 _cancelon();
3678 error = __cond_reltimedwait(cvp, mp, reltime);
3679 if (error == EINTR)
3680 _canceloff();
3681 else
3682 _canceloff_nocancel();
3683 return (error);
3684 }
3685
3686 int
pthread_cond_reltimedwait_np(pthread_cond_t * _RESTRICT_KYWD cvp,pthread_mutex_t * _RESTRICT_KYWD mp,const struct timespec * _RESTRICT_KYWD reltime)3687 pthread_cond_reltimedwait_np(pthread_cond_t *_RESTRICT_KYWD cvp,
3688 pthread_mutex_t *_RESTRICT_KYWD mp,
3689 const struct timespec *_RESTRICT_KYWD reltime)
3690 {
3691 int error;
3692
3693 error = cond_reltimedwait((cond_t *)cvp, (mutex_t *)mp, reltime);
3694 if (error == ETIME)
3695 error = ETIMEDOUT;
3696 else if (error == EINTR)
3697 error = 0;
3698 return (error);
3699 }
3700
3701 #pragma weak pthread_cond_signal = cond_signal
3702 #pragma weak _cond_signal = cond_signal
3703 int
cond_signal(cond_t * cvp)3704 cond_signal(cond_t *cvp)
3705 {
3706 ulwp_t *self = curthread;
3707 uberdata_t *udp = self->ul_uberdata;
3708 tdb_cond_stats_t *csp = COND_STATS(cvp, udp);
3709 int error = 0;
3710 int more;
3711 lwpid_t lwpid;
3712 queue_head_t *qp;
3713 mutex_t *mp;
3714 queue_head_t *mqp;
3715 ulwp_t **ulwpp;
3716 ulwp_t *ulwp;
3717 ulwp_t *prev;
3718
3719 if (csp)
3720 tdb_incr(csp->cond_signal);
3721
3722 if (cvp->cond_waiters_kernel) /* someone sleeping in the kernel? */
3723 error = _lwp_cond_signal(cvp);
3724
3725 if (!cvp->cond_waiters_user) /* no one sleeping at user-level */
3726 return (error);
3727
3728 /*
3729 * Move some thread from the condvar sleep queue to the mutex sleep
3730 * queue for the mutex that it will acquire on being waked up.
3731 * We can do this only if we own the mutex it will acquire.
3732 * If we do not own the mutex, or if its ul_cv_wake flag
3733 * is set, just dequeue and unpark it.
3734 */
3735 qp = queue_lock(cvp, CV);
3736 ulwpp = queue_slot(qp, &prev, &more);
3737 cvp->cond_waiters_user = more;
3738 if (ulwpp == NULL) { /* no one on the sleep queue */
3739 queue_unlock(qp);
3740 return (error);
3741 }
3742 ulwp = *ulwpp;
3743
3744 /*
3745 * Inform the thread that it was the recipient of a cond_signal().
3746 * This lets it deal with cond_signal() and, concurrently,
3747 * one or more of a cancellation, a UNIX signal, or a timeout.
3748 * These latter conditions must not consume a cond_signal().
3749 */
3750 ulwp->ul_signalled = 1;
3751
3752 /*
3753 * Dequeue the waiter but leave its ul_sleepq non-NULL
3754 * while we move it to the mutex queue so that it can
3755 * deal properly with spurious wakeups.
3756 */
3757 queue_unlink(qp, ulwpp, prev);
3758
3759 mp = ulwp->ul_cvmutex; /* the mutex it will acquire */
3760 ulwp->ul_cvmutex = NULL;
3761 ASSERT(mp != NULL);
3762
3763 if (ulwp->ul_cv_wake || !MUTEX_OWNED(mp, self)) {
3764 /* just wake it up */
3765 lwpid = ulwp->ul_lwpid;
3766 no_preempt(self);
3767 ulwp->ul_sleepq = NULL;
3768 ulwp->ul_wchan = NULL;
3769 queue_unlock(qp);
3770 (void) __lwp_unpark(lwpid);
3771 preempt(self);
3772 } else {
3773 /* move it to the mutex queue */
3774 mqp = queue_lock(mp, MX);
3775 enqueue(mqp, ulwp, 0);
3776 mp->mutex_waiters = 1;
3777 queue_unlock(mqp);
3778 queue_unlock(qp);
3779 }
3780
3781 return (error);
3782 }
3783
3784 /*
3785 * Utility function called by mutex_wakeup_all(), cond_broadcast(),
3786 * and rw_queue_release() to (re)allocate a big buffer to hold the
3787 * lwpids of all the threads to be set running after they are removed
3788 * from their sleep queues. Since we are holding a queue lock, we
3789 * cannot call any function that might acquire a lock. mmap(), munmap(),
3790 * lwp_unpark_all() are simple system calls and are safe in this regard.
3791 */
3792 lwpid_t *
alloc_lwpids(lwpid_t * lwpid,int * nlwpid_ptr,int * maxlwps_ptr)3793 alloc_lwpids(lwpid_t *lwpid, int *nlwpid_ptr, int *maxlwps_ptr)
3794 {
3795 /*
3796 * Allocate NEWLWPS ids on the first overflow.
3797 * Double the allocation each time after that.
3798 */
3799 int nlwpid = *nlwpid_ptr;
3800 int maxlwps = *maxlwps_ptr;
3801 int first_allocation;
3802 int newlwps;
3803 void *vaddr;
3804
3805 ASSERT(nlwpid == maxlwps);
3806
3807 first_allocation = (maxlwps == MAXLWPS);
3808 newlwps = first_allocation? NEWLWPS : 2 * maxlwps;
3809 vaddr = mmap(NULL, newlwps * sizeof (lwpid_t),
3810 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, (off_t)0);
3811
3812 if (vaddr == MAP_FAILED) {
3813 /*
3814 * Let's hope this never happens.
3815 * If it does, then we have a terrible
3816 * thundering herd on our hands.
3817 */
3818 (void) __lwp_unpark_all(lwpid, nlwpid);
3819 *nlwpid_ptr = 0;
3820 } else {
3821 (void) memcpy(vaddr, lwpid, maxlwps * sizeof (lwpid_t));
3822 if (!first_allocation)
3823 (void) munmap((caddr_t)lwpid,
3824 maxlwps * sizeof (lwpid_t));
3825 lwpid = vaddr;
3826 *maxlwps_ptr = newlwps;
3827 }
3828
3829 return (lwpid);
3830 }
3831
3832 #pragma weak pthread_cond_broadcast = cond_broadcast
3833 #pragma weak _cond_broadcast = cond_broadcast
3834 int
cond_broadcast(cond_t * cvp)3835 cond_broadcast(cond_t *cvp)
3836 {
3837 ulwp_t *self = curthread;
3838 uberdata_t *udp = self->ul_uberdata;
3839 tdb_cond_stats_t *csp = COND_STATS(cvp, udp);
3840 int error = 0;
3841 queue_head_t *qp;
3842 queue_root_t *qrp;
3843 mutex_t *mp;
3844 mutex_t *mp_cache = NULL;
3845 queue_head_t *mqp = NULL;
3846 ulwp_t *ulwp;
3847 int nlwpid = 0;
3848 int maxlwps = MAXLWPS;
3849 lwpid_t buffer[MAXLWPS];
3850 lwpid_t *lwpid = buffer;
3851
3852 if (csp)
3853 tdb_incr(csp->cond_broadcast);
3854
3855 if (cvp->cond_waiters_kernel) /* someone sleeping in the kernel? */
3856 error = _lwp_cond_broadcast(cvp);
3857
3858 if (!cvp->cond_waiters_user) /* no one sleeping at user-level */
3859 return (error);
3860
3861 /*
3862 * Move everyone from the condvar sleep queue to the mutex sleep
3863 * queue for the mutex that they will acquire on being waked up.
3864 * We can do this only if we own the mutex they will acquire.
3865 * If we do not own the mutex, or if their ul_cv_wake flag
3866 * is set, just dequeue and unpark them.
3867 *
3868 * We keep track of lwpids that are to be unparked in lwpid[].
3869 * __lwp_unpark_all() is called to unpark all of them after
3870 * they have been removed from the sleep queue and the sleep
3871 * queue lock has been dropped. If we run out of space in our
3872 * on-stack buffer, we need to allocate more but we can't call
3873 * lmalloc() because we are holding a queue lock when the overflow
3874 * occurs and lmalloc() acquires a lock. We can't use alloca()
3875 * either because the application may have allocated a small
3876 * stack and we don't want to overrun the stack. So we call
3877 * alloc_lwpids() to allocate a bigger buffer using the mmap()
3878 * system call directly since that path acquires no locks.
3879 */
3880 qp = queue_lock(cvp, CV);
3881 cvp->cond_waiters_user = 0;
3882 for (;;) {
3883 if ((qrp = qp->qh_root) == NULL ||
3884 (ulwp = qrp->qr_head) == NULL)
3885 break;
3886 ASSERT(ulwp->ul_wchan == cvp);
3887 queue_unlink(qp, &qrp->qr_head, NULL);
3888 mp = ulwp->ul_cvmutex; /* its mutex */
3889 ulwp->ul_cvmutex = NULL;
3890 ASSERT(mp != NULL);
3891 if (ulwp->ul_cv_wake || !MUTEX_OWNED(mp, self)) {
3892 /* just wake it up */
3893 ulwp->ul_sleepq = NULL;
3894 ulwp->ul_wchan = NULL;
3895 if (nlwpid == maxlwps)
3896 lwpid = alloc_lwpids(lwpid, &nlwpid, &maxlwps);
3897 lwpid[nlwpid++] = ulwp->ul_lwpid;
3898 } else {
3899 /* move it to the mutex queue */
3900 if (mp != mp_cache) {
3901 mp_cache = mp;
3902 if (mqp != NULL)
3903 queue_unlock(mqp);
3904 mqp = queue_lock(mp, MX);
3905 }
3906 enqueue(mqp, ulwp, 0);
3907 mp->mutex_waiters = 1;
3908 }
3909 }
3910 if (mqp != NULL)
3911 queue_unlock(mqp);
3912 if (nlwpid == 0) {
3913 queue_unlock(qp);
3914 } else {
3915 no_preempt(self);
3916 queue_unlock(qp);
3917 if (nlwpid == 1)
3918 (void) __lwp_unpark(lwpid[0]);
3919 else
3920 (void) __lwp_unpark_all(lwpid, nlwpid);
3921 preempt(self);
3922 }
3923 if (lwpid != buffer)
3924 (void) munmap((caddr_t)lwpid, maxlwps * sizeof (lwpid_t));
3925 return (error);
3926 }
3927
3928 #pragma weak pthread_cond_destroy = cond_destroy
3929 int
cond_destroy(cond_t * cvp)3930 cond_destroy(cond_t *cvp)
3931 {
3932 cvp->cond_magic = 0;
3933 tdb_sync_obj_deregister(cvp);
3934 return (0);
3935 }
3936
3937 #if defined(DEBUG)
3938 void
assert_no_libc_locks_held(void)3939 assert_no_libc_locks_held(void)
3940 {
3941 ASSERT(!curthread->ul_critical || curthread->ul_bindflags);
3942 }
3943
3944 /* protected by link_lock */
3945 uint64_t spin_lock_spin;
3946 uint64_t spin_lock_spin2;
3947 uint64_t spin_lock_sleep;
3948 uint64_t spin_lock_wakeup;
3949
3950 /*
3951 * Record spin lock statistics.
3952 * Called by a thread exiting itself in thrp_exit().
3953 * Also called via atexit() from the thread calling
3954 * exit() to do all the other threads as well.
3955 */
3956 void
record_spin_locks(ulwp_t * ulwp)3957 record_spin_locks(ulwp_t *ulwp)
3958 {
3959 spin_lock_spin += ulwp->ul_spin_lock_spin;
3960 spin_lock_spin2 += ulwp->ul_spin_lock_spin2;
3961 spin_lock_sleep += ulwp->ul_spin_lock_sleep;
3962 spin_lock_wakeup += ulwp->ul_spin_lock_wakeup;
3963 ulwp->ul_spin_lock_spin = 0;
3964 ulwp->ul_spin_lock_spin2 = 0;
3965 ulwp->ul_spin_lock_sleep = 0;
3966 ulwp->ul_spin_lock_wakeup = 0;
3967 }
3968
3969 /*
3970 * atexit function: dump the queue statistics to stderr.
3971 */
3972 #include <stdio.h>
3973 void
dump_queue_statistics(void)3974 dump_queue_statistics(void)
3975 {
3976 uberdata_t *udp = curthread->ul_uberdata;
3977 queue_head_t *qp;
3978 int qn;
3979 uint64_t spin_lock_total = 0;
3980
3981 if (udp->queue_head == NULL || thread_queue_dump == 0)
3982 return;
3983
3984 if (fprintf(stderr, "\n%5d mutex queues:\n", QHASHSIZE) < 0 ||
3985 fprintf(stderr, "queue# lockcount max qlen max hlen\n") < 0)
3986 return;
3987 for (qn = 0, qp = udp->queue_head; qn < QHASHSIZE; qn++, qp++) {
3988 if (qp->qh_lockcount == 0)
3989 continue;
3990 spin_lock_total += qp->qh_lockcount;
3991 if (fprintf(stderr, "%5d %12llu%12u%12u\n", qn,
3992 (u_longlong_t)qp->qh_lockcount,
3993 qp->qh_qmax, qp->qh_hmax) < 0)
3994 return;
3995 }
3996
3997 if (fprintf(stderr, "\n%5d condvar queues:\n", QHASHSIZE) < 0 ||
3998 fprintf(stderr, "queue# lockcount max qlen max hlen\n") < 0)
3999 return;
4000 for (qn = 0; qn < QHASHSIZE; qn++, qp++) {
4001 if (qp->qh_lockcount == 0)
4002 continue;
4003 spin_lock_total += qp->qh_lockcount;
4004 if (fprintf(stderr, "%5d %12llu%12u%12u\n", qn,
4005 (u_longlong_t)qp->qh_lockcount,
4006 qp->qh_qmax, qp->qh_hmax) < 0)
4007 return;
4008 }
4009
4010 (void) fprintf(stderr, "\n spin_lock_total = %10llu\n",
4011 (u_longlong_t)spin_lock_total);
4012 (void) fprintf(stderr, " spin_lock_spin = %10llu\n",
4013 (u_longlong_t)spin_lock_spin);
4014 (void) fprintf(stderr, " spin_lock_spin2 = %10llu\n",
4015 (u_longlong_t)spin_lock_spin2);
4016 (void) fprintf(stderr, " spin_lock_sleep = %10llu\n",
4017 (u_longlong_t)spin_lock_sleep);
4018 (void) fprintf(stderr, " spin_lock_wakeup = %10llu\n",
4019 (u_longlong_t)spin_lock_wakeup);
4020 }
4021 #endif
4022