xref: /illumos-gate/usr/src/lib/libc/port/threads/thr.c (revision 0570e35b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 /*
26  * Copyright 2016 Joyent, Inc.
27  */
28 
29 #include "lint.h"
30 #include "thr_uberdata.h"
31 #include <pthread.h>
32 #include <procfs.h>
33 #include <sys/uio.h>
34 #include <ctype.h>
35 #include "libc.h"
36 
37 /*
38  * These symbols should not be exported from libc, but
39  * /lib/libm.so.2 references _thr_main.  libm needs to be fixed.
40  * Also, some older versions of the Studio compiler/debugger
41  * components reference them.  These need to be fixed, too.
42  */
43 #pragma weak _thr_main = thr_main
44 #pragma weak _thr_create = thr_create
45 #pragma weak _thr_join = thr_join
46 #pragma weak _thr_self = thr_self
47 
48 #undef errno
49 extern int errno;
50 
51 /*
52  * Between Solaris 2.5 and Solaris 9, __threaded was used to indicate
53  * "we are linked with libthread".  The Sun Workshop 6 update 1 compilation
54  * system used it illegally (it is a consolidation private symbol).
55  * To accommodate this and possibly other abusers of the symbol,
56  * we make it always equal to 1 now that libthread has been folded
57  * into libc.  The new __libc_threaded symbol is used to indicate
58  * the new meaning, "more than one thread exists".
59  */
60 int __threaded = 1;		/* always equal to 1 */
61 int __libc_threaded = 0;	/* zero until first thr_create() */
62 
63 /*
64  * thr_concurrency and pthread_concurrency are not used by the library.
65  * They exist solely to hold and return the values set by calls to
66  * thr_setconcurrency() and pthread_setconcurrency().
67  * Because thr_concurrency is affected by the THR_NEW_LWP flag
68  * to thr_create(), thr_concurrency is protected by link_lock.
69  */
70 static	int	thr_concurrency = 1;
71 static	int	pthread_concurrency;
72 
73 #define	HASHTBLSZ	1024	/* must be a power of two */
74 #define	TIDHASH(tid, udp)	(tid & (udp)->hash_mask)
75 
76 /* initial allocation, just enough for one lwp */
77 #pragma align 64(init_hash_table)
78 thr_hash_table_t init_hash_table[1] = {
79 	{ DEFAULTMUTEX, DEFAULTCV, NULL },
80 };
81 
82 extern const Lc_interface rtld_funcs[];
83 
84 /*
85  * The weak version is known to libc_db and mdb.
86  */
87 #pragma weak _uberdata = __uberdata
88 uberdata_t __uberdata = {
89 	{ DEFAULTMUTEX, NULL, 0 },	/* link_lock */
90 	{ RECURSIVEMUTEX, NULL, 0 },	/* ld_lock */
91 	{ RECURSIVEMUTEX, NULL, 0 },	/* fork_lock */
92 	{ RECURSIVEMUTEX, NULL, 0 },	/* atfork_lock */
93 	{ RECURSIVEMUTEX, NULL, 0 },	/* callout_lock */
94 	{ DEFAULTMUTEX, NULL, 0 },	/* tdb_hash_lock */
95 	{ 0, },				/* tdb_hash_lock_stats */
96 	{ { 0 }, },			/* siguaction[NSIG] */
97 	{{ DEFAULTMUTEX, NULL, 0 },		/* bucket[NBUCKETS] */
98 	{ DEFAULTMUTEX, NULL, 0 },
99 	{ DEFAULTMUTEX, NULL, 0 },
100 	{ DEFAULTMUTEX, NULL, 0 },
101 	{ DEFAULTMUTEX, NULL, 0 },
102 	{ DEFAULTMUTEX, NULL, 0 },
103 	{ DEFAULTMUTEX, NULL, 0 },
104 	{ DEFAULTMUTEX, NULL, 0 },
105 	{ DEFAULTMUTEX, NULL, 0 },
106 	{ DEFAULTMUTEX, NULL, 0 }},
107 	{ RECURSIVEMUTEX, NULL, NULL },		/* atexit_root */
108 	{ RECURSIVEMUTEX, NULL },		/* quickexit_root */
109 	{ DEFAULTMUTEX, 0, 0, NULL },		/* tsd_metadata */
110 	{ DEFAULTMUTEX, {0, 0}, {0, 0} },	/* tls_metadata */
111 	0,			/* primary_map */
112 	0,			/* bucket_init */
113 	0,			/* pad[0] */
114 	0,			/* pad[1] */
115 	{ 0 },			/* uberflags */
116 	NULL,			/* queue_head */
117 	init_hash_table,	/* thr_hash_table */
118 	1,			/* hash_size: size of the hash table */
119 	0,			/* hash_mask: hash_size - 1 */
120 	NULL,			/* ulwp_one */
121 	NULL,			/* all_lwps */
122 	NULL,			/* all_zombies */
123 	0,			/* nthreads */
124 	0,			/* nzombies */
125 	0,			/* ndaemons */
126 	0,			/* pid */
127 	sigacthandler,		/* sigacthandler */
128 	NULL,			/* lwp_stacks */
129 	NULL,			/* lwp_laststack */
130 	0,			/* nfreestack */
131 	10,			/* thread_stack_cache */
132 	NULL,			/* ulwp_freelist */
133 	NULL,			/* ulwp_lastfree */
134 	NULL,			/* ulwp_replace_free */
135 	NULL,			/* ulwp_replace_last */
136 	NULL,			/* atforklist */
137 	NULL,			/* robustlocks */
138 	NULL,			/* robustlist */
139 	NULL,			/* progname */
140 	NULL,			/* __tdb_bootstrap */
141 	{			/* tdb */
142 		NULL,		/* tdb_sync_addr_hash */
143 		0,		/* tdb_register_count */
144 		0,		/* tdb_hash_alloc_failed */
145 		NULL,		/* tdb_sync_addr_free */
146 		NULL,		/* tdb_sync_addr_last */
147 		0,		/* tdb_sync_alloc */
148 		{ 0, 0 },	/* tdb_ev_global_mask */
149 		tdb_events,	/* tdb_events array */
150 	},
151 };
152 
153 /*
154  * The weak version is known to libc_db and mdb.
155  */
156 #pragma weak _tdb_bootstrap = __tdb_bootstrap
157 uberdata_t **__tdb_bootstrap = NULL;
158 
159 int	thread_queue_fifo = 4;
160 int	thread_queue_dump = 0;
161 int	thread_cond_wait_defer = 0;
162 int	thread_error_detection = 0;
163 int	thread_async_safe = 0;
164 int	thread_stack_cache = 10;
165 int	thread_door_noreserve = 0;
166 int	thread_locks_misaligned = 0;
167 
168 static	ulwp_t	*ulwp_alloc(void);
169 static	void	ulwp_free(ulwp_t *);
170 
171 /*
172  * Insert the lwp into the hash table.
173  */
174 void
175 hash_in_unlocked(ulwp_t *ulwp, int ix, uberdata_t *udp)
176 {
177 	ulwp->ul_hash = udp->thr_hash_table[ix].hash_bucket;
178 	udp->thr_hash_table[ix].hash_bucket = ulwp;
179 	ulwp->ul_ix = ix;
180 }
181 
182 void
183 hash_in(ulwp_t *ulwp, uberdata_t *udp)
184 {
185 	int ix = TIDHASH(ulwp->ul_lwpid, udp);
186 	mutex_t *mp = &udp->thr_hash_table[ix].hash_lock;
187 
188 	lmutex_lock(mp);
189 	hash_in_unlocked(ulwp, ix, udp);
190 	lmutex_unlock(mp);
191 }
192 
193 /*
194  * Delete the lwp from the hash table.
195  */
196 void
197 hash_out_unlocked(ulwp_t *ulwp, int ix, uberdata_t *udp)
198 {
199 	ulwp_t **ulwpp;
200 
201 	for (ulwpp = &udp->thr_hash_table[ix].hash_bucket;
202 	    ulwp != *ulwpp;
203 	    ulwpp = &(*ulwpp)->ul_hash)
204 		;
205 	*ulwpp = ulwp->ul_hash;
206 	ulwp->ul_hash = NULL;
207 	ulwp->ul_ix = -1;
208 }
209 
210 void
211 hash_out(ulwp_t *ulwp, uberdata_t *udp)
212 {
213 	int ix;
214 
215 	if ((ix = ulwp->ul_ix) >= 0) {
216 		mutex_t *mp = &udp->thr_hash_table[ix].hash_lock;
217 
218 		lmutex_lock(mp);
219 		hash_out_unlocked(ulwp, ix, udp);
220 		lmutex_unlock(mp);
221 	}
222 }
223 
224 /*
225  * Retain stack information for thread structures that are being recycled for
226  * new threads.  All other members of the thread structure should be zeroed.
227  */
228 static void
229 ulwp_clean(ulwp_t *ulwp)
230 {
231 	caddr_t stk = ulwp->ul_stk;
232 	size_t mapsiz = ulwp->ul_mapsiz;
233 	size_t guardsize = ulwp->ul_guardsize;
234 	uintptr_t stktop = ulwp->ul_stktop;
235 	size_t stksiz = ulwp->ul_stksiz;
236 
237 	(void) memset(ulwp, 0, sizeof (*ulwp));
238 
239 	ulwp->ul_stk = stk;
240 	ulwp->ul_mapsiz = mapsiz;
241 	ulwp->ul_guardsize = guardsize;
242 	ulwp->ul_stktop = stktop;
243 	ulwp->ul_stksiz = stksiz;
244 }
245 
246 static int stackprot;
247 
248 /*
249  * Answer the question, "Is the lwp in question really dead?"
250  * We must inquire of the operating system to be really sure
251  * because the lwp may have called lwp_exit() but it has not
252  * yet completed the exit.
253  */
254 static int
255 dead_and_buried(ulwp_t *ulwp)
256 {
257 	if (ulwp->ul_lwpid == (lwpid_t)(-1))
258 		return (1);
259 	if (ulwp->ul_dead && ulwp->ul_detached &&
260 	    _lwp_kill(ulwp->ul_lwpid, 0) == ESRCH) {
261 		ulwp->ul_lwpid = (lwpid_t)(-1);
262 		return (1);
263 	}
264 	return (0);
265 }
266 
267 /*
268  * Attempt to keep the stack cache within the specified cache limit.
269  */
270 static void
271 trim_stack_cache(int cache_limit)
272 {
273 	ulwp_t *self = curthread;
274 	uberdata_t *udp = self->ul_uberdata;
275 	ulwp_t *prev = NULL;
276 	ulwp_t **ulwpp = &udp->lwp_stacks;
277 	ulwp_t *ulwp;
278 
279 	ASSERT(udp->nthreads <= 1 || MUTEX_OWNED(&udp->link_lock, self));
280 
281 	while (udp->nfreestack > cache_limit && (ulwp = *ulwpp) != NULL) {
282 		if (dead_and_buried(ulwp)) {
283 			*ulwpp = ulwp->ul_next;
284 			if (ulwp == udp->lwp_laststack)
285 				udp->lwp_laststack = prev;
286 			hash_out(ulwp, udp);
287 			udp->nfreestack--;
288 			(void) munmap(ulwp->ul_stk, ulwp->ul_mapsiz);
289 			/*
290 			 * Now put the free ulwp on the ulwp freelist.
291 			 */
292 			ulwp->ul_mapsiz = 0;
293 			ulwp->ul_next = NULL;
294 			if (udp->ulwp_freelist == NULL)
295 				udp->ulwp_freelist = udp->ulwp_lastfree = ulwp;
296 			else {
297 				udp->ulwp_lastfree->ul_next = ulwp;
298 				udp->ulwp_lastfree = ulwp;
299 			}
300 		} else {
301 			prev = ulwp;
302 			ulwpp = &ulwp->ul_next;
303 		}
304 	}
305 }
306 
307 /*
308  * Find an unused stack of the requested size
309  * or create a new stack of the requested size.
310  * Return a pointer to the ulwp_t structure referring to the stack, or NULL.
311  * thr_exit() stores 1 in the ul_dead member.
312  * thr_join() stores -1 in the ul_lwpid member.
313  */
314 static ulwp_t *
315 find_stack(size_t stksize, size_t guardsize)
316 {
317 	static size_t pagesize = 0;
318 
319 	uberdata_t *udp = curthread->ul_uberdata;
320 	size_t mapsize;
321 	ulwp_t *prev;
322 	ulwp_t *ulwp;
323 	ulwp_t **ulwpp;
324 	void *stk;
325 
326 	/*
327 	 * The stack is allocated PROT_READ|PROT_WRITE|PROT_EXEC
328 	 * unless overridden by the system's configuration.
329 	 */
330 	if (stackprot == 0) {	/* do this once */
331 		long lprot = _sysconf(_SC_STACK_PROT);
332 		if (lprot <= 0)
333 			lprot = (PROT_READ|PROT_WRITE|PROT_EXEC);
334 		stackprot = (int)lprot;
335 	}
336 	if (pagesize == 0)	/* do this once */
337 		pagesize = _sysconf(_SC_PAGESIZE);
338 
339 	/*
340 	 * One megabyte stacks by default, but subtract off
341 	 * two pages for the system-created red zones.
342 	 * Round up a non-zero stack size to a pagesize multiple.
343 	 */
344 	if (stksize == 0)
345 		stksize = DEFAULTSTACK - 2 * pagesize;
346 	else
347 		stksize = ((stksize + pagesize - 1) & -pagesize);
348 
349 	/*
350 	 * Round up the mapping size to a multiple of pagesize.
351 	 * Note: mmap() provides at least one page of red zone
352 	 * so we deduct that from the value of guardsize.
353 	 */
354 	if (guardsize != 0)
355 		guardsize = ((guardsize + pagesize - 1) & -pagesize) - pagesize;
356 	mapsize = stksize + guardsize;
357 
358 	lmutex_lock(&udp->link_lock);
359 	for (prev = NULL, ulwpp = &udp->lwp_stacks;
360 	    (ulwp = *ulwpp) != NULL;
361 	    prev = ulwp, ulwpp = &ulwp->ul_next) {
362 		if (ulwp->ul_mapsiz == mapsize &&
363 		    ulwp->ul_guardsize == guardsize &&
364 		    dead_and_buried(ulwp)) {
365 			/*
366 			 * The previous lwp is gone; reuse the stack.
367 			 * Remove the ulwp from the stack list.
368 			 */
369 			*ulwpp = ulwp->ul_next;
370 			ulwp->ul_next = NULL;
371 			if (ulwp == udp->lwp_laststack)
372 				udp->lwp_laststack = prev;
373 			hash_out(ulwp, udp);
374 			udp->nfreestack--;
375 			lmutex_unlock(&udp->link_lock);
376 			ulwp_clean(ulwp);
377 			return (ulwp);
378 		}
379 	}
380 
381 	/*
382 	 * None of the cached stacks matched our mapping size.
383 	 * Reduce the stack cache to get rid of possibly
384 	 * very old stacks that will never be reused.
385 	 */
386 	if (udp->nfreestack > udp->thread_stack_cache)
387 		trim_stack_cache(udp->thread_stack_cache);
388 	else if (udp->nfreestack > 0)
389 		trim_stack_cache(udp->nfreestack - 1);
390 	lmutex_unlock(&udp->link_lock);
391 
392 	/*
393 	 * Create a new stack.
394 	 */
395 	if ((stk = mmap(NULL, mapsize, stackprot,
396 	    MAP_PRIVATE|MAP_NORESERVE|MAP_ANON, -1, (off_t)0)) != MAP_FAILED) {
397 		/*
398 		 * We have allocated our stack.  Now allocate the ulwp.
399 		 */
400 		ulwp = ulwp_alloc();
401 		if (ulwp == NULL)
402 			(void) munmap(stk, mapsize);
403 		else {
404 			ulwp->ul_stk = stk;
405 			ulwp->ul_mapsiz = mapsize;
406 			ulwp->ul_guardsize = guardsize;
407 			ulwp->ul_stktop = (uintptr_t)stk + mapsize;
408 			ulwp->ul_stksiz = stksize;
409 			if (guardsize)	/* protect the extra red zone */
410 				(void) mprotect(stk, guardsize, PROT_NONE);
411 		}
412 	}
413 	return (ulwp);
414 }
415 
416 /*
417  * Get a ulwp_t structure from the free list or allocate a new one.
418  * Such ulwp_t's do not have a stack allocated by the library.
419  */
420 static ulwp_t *
421 ulwp_alloc(void)
422 {
423 	ulwp_t *self = curthread;
424 	uberdata_t *udp = self->ul_uberdata;
425 	size_t tls_size;
426 	ulwp_t *prev;
427 	ulwp_t *ulwp;
428 	ulwp_t **ulwpp;
429 	caddr_t data;
430 
431 	lmutex_lock(&udp->link_lock);
432 	for (prev = NULL, ulwpp = &udp->ulwp_freelist;
433 	    (ulwp = *ulwpp) != NULL;
434 	    prev = ulwp, ulwpp = &ulwp->ul_next) {
435 		if (dead_and_buried(ulwp)) {
436 			*ulwpp = ulwp->ul_next;
437 			ulwp->ul_next = NULL;
438 			if (ulwp == udp->ulwp_lastfree)
439 				udp->ulwp_lastfree = prev;
440 			hash_out(ulwp, udp);
441 			lmutex_unlock(&udp->link_lock);
442 			ulwp_clean(ulwp);
443 			return (ulwp);
444 		}
445 	}
446 	lmutex_unlock(&udp->link_lock);
447 
448 	tls_size = roundup64(udp->tls_metadata.static_tls.tls_size);
449 	data = lmalloc(sizeof (*ulwp) + tls_size);
450 	if (data != NULL) {
451 		/* LINTED pointer cast may result in improper alignment */
452 		ulwp = (ulwp_t *)(data + tls_size);
453 	}
454 	return (ulwp);
455 }
456 
457 /*
458  * Free a ulwp structure.
459  * If there is an associated stack, put it on the stack list and
460  * munmap() previously freed stacks up to the residual cache limit.
461  * Else put it on the ulwp free list and never call lfree() on it.
462  */
463 static void
464 ulwp_free(ulwp_t *ulwp)
465 {
466 	uberdata_t *udp = curthread->ul_uberdata;
467 
468 	ASSERT(udp->nthreads <= 1 || MUTEX_OWNED(&udp->link_lock, curthread));
469 	ulwp->ul_next = NULL;
470 	if (ulwp == udp->ulwp_one)	/* don't reuse the primoridal stack */
471 		/*EMPTY*/;
472 	else if (ulwp->ul_mapsiz != 0) {
473 		if (udp->lwp_stacks == NULL)
474 			udp->lwp_stacks = udp->lwp_laststack = ulwp;
475 		else {
476 			udp->lwp_laststack->ul_next = ulwp;
477 			udp->lwp_laststack = ulwp;
478 		}
479 		if (++udp->nfreestack > udp->thread_stack_cache)
480 			trim_stack_cache(udp->thread_stack_cache);
481 	} else {
482 		if (udp->ulwp_freelist == NULL)
483 			udp->ulwp_freelist = udp->ulwp_lastfree = ulwp;
484 		else {
485 			udp->ulwp_lastfree->ul_next = ulwp;
486 			udp->ulwp_lastfree = ulwp;
487 		}
488 	}
489 }
490 
491 /*
492  * Find a named lwp and return a pointer to its hash list location.
493  * On success, returns with the hash lock held.
494  */
495 ulwp_t **
496 find_lwpp(thread_t tid)
497 {
498 	uberdata_t *udp = curthread->ul_uberdata;
499 	int ix = TIDHASH(tid, udp);
500 	mutex_t *mp = &udp->thr_hash_table[ix].hash_lock;
501 	ulwp_t *ulwp;
502 	ulwp_t **ulwpp;
503 
504 	if (tid == 0)
505 		return (NULL);
506 
507 	lmutex_lock(mp);
508 	for (ulwpp = &udp->thr_hash_table[ix].hash_bucket;
509 	    (ulwp = *ulwpp) != NULL;
510 	    ulwpp = &ulwp->ul_hash) {
511 		if (ulwp->ul_lwpid == tid)
512 			return (ulwpp);
513 	}
514 	lmutex_unlock(mp);
515 	return (NULL);
516 }
517 
518 /*
519  * Wake up all lwps waiting on this lwp for some reason.
520  */
521 void
522 ulwp_broadcast(ulwp_t *ulwp)
523 {
524 	ulwp_t *self = curthread;
525 	uberdata_t *udp = self->ul_uberdata;
526 
527 	ASSERT(MUTEX_OWNED(ulwp_mutex(ulwp, udp), self));
528 	(void) cond_broadcast(ulwp_condvar(ulwp, udp));
529 }
530 
531 /*
532  * Find a named lwp and return a pointer to it.
533  * Returns with the hash lock held.
534  */
535 ulwp_t *
536 find_lwp(thread_t tid)
537 {
538 	ulwp_t *self = curthread;
539 	uberdata_t *udp = self->ul_uberdata;
540 	ulwp_t *ulwp = NULL;
541 	ulwp_t **ulwpp;
542 
543 	if (self->ul_lwpid == tid) {
544 		ulwp = self;
545 		ulwp_lock(ulwp, udp);
546 	} else if ((ulwpp = find_lwpp(tid)) != NULL) {
547 		ulwp = *ulwpp;
548 	}
549 
550 	if (ulwp && ulwp->ul_dead) {
551 		ulwp_unlock(ulwp, udp);
552 		ulwp = NULL;
553 	}
554 
555 	return (ulwp);
556 }
557 
558 int
559 _thrp_create(void *stk, size_t stksize, void *(*func)(void *), void *arg,
560     long flags, thread_t *new_thread, size_t guardsize)
561 {
562 	ulwp_t *self = curthread;
563 	uberdata_t *udp = self->ul_uberdata;
564 	ucontext_t uc;
565 	uint_t lwp_flags;
566 	thread_t tid;
567 	int error;
568 	ulwp_t *ulwp;
569 
570 	/*
571 	 * Enforce the restriction of not creating any threads
572 	 * until the primary link map has been initialized.
573 	 * Also, disallow thread creation to a child of vfork().
574 	 */
575 	if (!self->ul_primarymap || self->ul_vfork)
576 		return (ENOTSUP);
577 
578 	if (udp->hash_size == 1)
579 		finish_init();
580 
581 	if ((stk || stksize) && stksize < MINSTACK)
582 		return (EINVAL);
583 
584 	if (stk == NULL) {
585 		if ((ulwp = find_stack(stksize, guardsize)) == NULL)
586 			return (ENOMEM);
587 		stksize = ulwp->ul_mapsiz - ulwp->ul_guardsize;
588 	} else {
589 		/* initialize the private stack */
590 		if ((ulwp = ulwp_alloc()) == NULL)
591 			return (ENOMEM);
592 		ulwp->ul_stk = stk;
593 		ulwp->ul_stktop = (uintptr_t)stk + stksize;
594 		ulwp->ul_stksiz = stksize;
595 	}
596 	/* ulwp is not in the hash table; make sure hash_out() doesn't fail */
597 	ulwp->ul_ix = -1;
598 	ulwp->ul_errnop = &ulwp->ul_errno;
599 
600 	lwp_flags = LWP_SUSPENDED;
601 	if (flags & (THR_DETACHED|THR_DAEMON)) {
602 		flags |= THR_DETACHED;
603 		lwp_flags |= LWP_DETACHED;
604 	}
605 	if (flags & THR_DAEMON)
606 		lwp_flags |= LWP_DAEMON;
607 
608 	/* creating a thread: enforce mt-correctness in mutex_lock() */
609 	self->ul_async_safe = 1;
610 
611 	/* per-thread copies of global variables, for speed */
612 	ulwp->ul_queue_fifo = self->ul_queue_fifo;
613 	ulwp->ul_cond_wait_defer = self->ul_cond_wait_defer;
614 	ulwp->ul_error_detection = self->ul_error_detection;
615 	ulwp->ul_async_safe = self->ul_async_safe;
616 	ulwp->ul_max_spinners = self->ul_max_spinners;
617 	ulwp->ul_adaptive_spin = self->ul_adaptive_spin;
618 	ulwp->ul_queue_spin = self->ul_queue_spin;
619 	ulwp->ul_door_noreserve = self->ul_door_noreserve;
620 	ulwp->ul_misaligned = self->ul_misaligned;
621 
622 	/* new thread inherits creating thread's scheduling parameters */
623 	ulwp->ul_policy = self->ul_policy;
624 	ulwp->ul_pri = (self->ul_epri? self->ul_epri : self->ul_pri);
625 	ulwp->ul_cid = self->ul_cid;
626 	ulwp->ul_rtclassid = self->ul_rtclassid;
627 
628 	ulwp->ul_primarymap = self->ul_primarymap;
629 	ulwp->ul_self = ulwp;
630 	ulwp->ul_uberdata = udp;
631 
632 	/* debugger support */
633 	ulwp->ul_usropts = flags;
634 
635 #ifdef __sparc
636 	/*
637 	 * We cache several instructions in the thread structure for use
638 	 * by the fasttrap DTrace provider. When changing this, read the
639 	 * comment in fasttrap.h for the all the other places that must
640 	 * be changed.
641 	 */
642 	ulwp->ul_dsave = 0x9de04000;	/* save %g1, %g0, %sp */
643 	ulwp->ul_drestore = 0x81e80000;	/* restore %g0, %g0, %g0 */
644 	ulwp->ul_dftret = 0x91d0203a;	/* ta 0x3a */
645 	ulwp->ul_dreturn = 0x81ca0000;	/* return %o0 */
646 #endif
647 
648 	ulwp->ul_startpc = func;
649 	ulwp->ul_startarg = arg;
650 	_fpinherit(ulwp);
651 	/*
652 	 * Defer signals on the new thread until its TLS constructors
653 	 * have been called.  _thrp_setup() will call sigon() after
654 	 * it has called tls_setup().
655 	 */
656 	ulwp->ul_sigdefer = 1;
657 
658 	error = setup_context(&uc, _thrp_setup, ulwp,
659 	    (caddr_t)ulwp->ul_stk + ulwp->ul_guardsize, stksize);
660 	if (error != 0 && stk != NULL)	/* inaccessible stack */
661 		error = EFAULT;
662 
663 	/*
664 	 * Call enter_critical() to avoid being suspended until we
665 	 * have linked the new thread into the proper lists.
666 	 * This is necessary because forkall() and fork1() must
667 	 * suspend all threads and they must see a complete list.
668 	 */
669 	enter_critical(self);
670 	uc.uc_sigmask = ulwp->ul_sigmask = self->ul_sigmask;
671 	if (error != 0 ||
672 	    (error = __lwp_create(&uc, lwp_flags, &tid)) != 0) {
673 		exit_critical(self);
674 		ulwp->ul_lwpid = (lwpid_t)(-1);
675 		ulwp->ul_dead = 1;
676 		ulwp->ul_detached = 1;
677 		lmutex_lock(&udp->link_lock);
678 		ulwp_free(ulwp);
679 		lmutex_unlock(&udp->link_lock);
680 		return (error);
681 	}
682 	self->ul_nocancel = 0;	/* cancellation is now possible */
683 	udp->uberflags.uf_mt = 1;
684 	if (new_thread)
685 		*new_thread = tid;
686 	if (flags & THR_DETACHED)
687 		ulwp->ul_detached = 1;
688 	ulwp->ul_lwpid = tid;
689 	ulwp->ul_stop = TSTP_REGULAR;
690 	if (flags & THR_SUSPENDED)
691 		ulwp->ul_created = 1;
692 
693 	lmutex_lock(&udp->link_lock);
694 	ulwp->ul_forw = udp->all_lwps;
695 	ulwp->ul_back = udp->all_lwps->ul_back;
696 	ulwp->ul_back->ul_forw = ulwp;
697 	ulwp->ul_forw->ul_back = ulwp;
698 	hash_in(ulwp, udp);
699 	udp->nthreads++;
700 	if (flags & THR_DAEMON)
701 		udp->ndaemons++;
702 	if (flags & THR_NEW_LWP)
703 		thr_concurrency++;
704 	__libc_threaded = 1;		/* inform stdio */
705 	lmutex_unlock(&udp->link_lock);
706 
707 	if (__td_event_report(self, TD_CREATE, udp)) {
708 		self->ul_td_evbuf.eventnum = TD_CREATE;
709 		self->ul_td_evbuf.eventdata = (void *)(uintptr_t)tid;
710 		tdb_event(TD_CREATE, udp);
711 	}
712 
713 	exit_critical(self);
714 
715 	if (!(flags & THR_SUSPENDED))
716 		(void) _thrp_continue(tid, TSTP_REGULAR);
717 
718 	return (0);
719 }
720 
721 int
722 thr_create(void *stk, size_t stksize, void *(*func)(void *), void *arg,
723     long flags, thread_t *new_thread)
724 {
725 	return (_thrp_create(stk, stksize, func, arg, flags, new_thread, 0));
726 }
727 
728 /*
729  * A special cancellation cleanup hook for DCE.
730  * cleanuphndlr, when it is not NULL, will contain a callback
731  * function to be called before a thread is terminated in
732  * thr_exit() as a result of being cancelled.
733  */
734 static void (*cleanuphndlr)(void) = NULL;
735 
736 /*
737  * _pthread_setcleanupinit: sets the cleanup hook.
738  */
739 int
740 _pthread_setcleanupinit(void (*func)(void))
741 {
742 	cleanuphndlr = func;
743 	return (0);
744 }
745 
746 void
747 _thrp_exit()
748 {
749 	ulwp_t *self = curthread;
750 	uberdata_t *udp = self->ul_uberdata;
751 	ulwp_t *replace = NULL;
752 
753 	if (__td_event_report(self, TD_DEATH, udp)) {
754 		self->ul_td_evbuf.eventnum = TD_DEATH;
755 		tdb_event(TD_DEATH, udp);
756 	}
757 
758 	ASSERT(self->ul_sigdefer != 0);
759 
760 	lmutex_lock(&udp->link_lock);
761 	udp->nthreads--;
762 	if (self->ul_usropts & THR_NEW_LWP)
763 		thr_concurrency--;
764 	if (self->ul_usropts & THR_DAEMON)
765 		udp->ndaemons--;
766 	else if (udp->nthreads == udp->ndaemons) {
767 		/*
768 		 * We are the last non-daemon thread exiting.
769 		 * Exit the process.  We retain our TSD and TLS so
770 		 * that atexit() application functions can use them.
771 		 */
772 		lmutex_unlock(&udp->link_lock);
773 		exit(0);
774 		thr_panic("_thrp_exit(): exit(0) returned");
775 	}
776 	lmutex_unlock(&udp->link_lock);
777 
778 	tmem_exit();		/* deallocate tmem allocations */
779 	tsd_exit();		/* deallocate thread-specific data */
780 	tls_exit();		/* deallocate thread-local storage */
781 	heldlock_exit();	/* deal with left-over held locks */
782 
783 	/* block all signals to finish exiting */
784 	block_all_signals(self);
785 	/* also prevent ourself from being suspended */
786 	enter_critical(self);
787 	rwl_free(self);
788 	lmutex_lock(&udp->link_lock);
789 	ulwp_free(self);
790 	(void) ulwp_lock(self, udp);
791 
792 	if (self->ul_mapsiz && !self->ul_detached) {
793 		/*
794 		 * We want to free the stack for reuse but must keep
795 		 * the ulwp_t struct for the benefit of thr_join().
796 		 * For this purpose we allocate a replacement ulwp_t.
797 		 */
798 		if ((replace = udp->ulwp_replace_free) == NULL)
799 			replace = lmalloc(REPLACEMENT_SIZE);
800 		else if ((udp->ulwp_replace_free = replace->ul_next) == NULL)
801 			udp->ulwp_replace_last = NULL;
802 	}
803 
804 	if (udp->all_lwps == self)
805 		udp->all_lwps = self->ul_forw;
806 	if (udp->all_lwps == self)
807 		udp->all_lwps = NULL;
808 	else {
809 		self->ul_forw->ul_back = self->ul_back;
810 		self->ul_back->ul_forw = self->ul_forw;
811 	}
812 	self->ul_forw = self->ul_back = NULL;
813 #if defined(THREAD_DEBUG)
814 	/* collect queue lock statistics before marking ourself dead */
815 	record_spin_locks(self);
816 #endif
817 	self->ul_dead = 1;
818 	self->ul_pleasestop = 0;
819 	if (replace != NULL) {
820 		int ix = self->ul_ix;		/* the hash index */
821 		(void) memcpy(replace, self, REPLACEMENT_SIZE);
822 		replace->ul_self = replace;
823 		replace->ul_next = NULL;	/* clone not on stack list */
824 		replace->ul_mapsiz = 0;		/* allows clone to be freed */
825 		replace->ul_replace = 1;	/* requires clone to be freed */
826 		hash_out_unlocked(self, ix, udp);
827 		hash_in_unlocked(replace, ix, udp);
828 		ASSERT(!(self->ul_detached));
829 		self->ul_detached = 1;		/* this frees the stack */
830 		self->ul_schedctl = NULL;
831 		self->ul_schedctl_called = &udp->uberflags;
832 		set_curthread(self = replace);
833 		/*
834 		 * Having just changed the address of curthread, we
835 		 * must reset the ownership of the locks we hold so
836 		 * that assertions will not fire when we release them.
837 		 */
838 		udp->link_lock.mutex_owner = (uintptr_t)self;
839 		ulwp_mutex(self, udp)->mutex_owner = (uintptr_t)self;
840 		/*
841 		 * NOTE:
842 		 * On i386, %gs still references the original, not the
843 		 * replacement, ulwp structure.  Fetching the replacement
844 		 * curthread pointer via %gs:0 works correctly since the
845 		 * original ulwp structure will not be reallocated until
846 		 * this lwp has completed its lwp_exit() system call (see
847 		 * dead_and_buried()), but from here on out, we must make
848 		 * no references to %gs:<offset> other than %gs:0.
849 		 */
850 	}
851 	/*
852 	 * Put non-detached terminated threads in the all_zombies list.
853 	 */
854 	if (!self->ul_detached) {
855 		udp->nzombies++;
856 		if (udp->all_zombies == NULL) {
857 			ASSERT(udp->nzombies == 1);
858 			udp->all_zombies = self->ul_forw = self->ul_back = self;
859 		} else {
860 			self->ul_forw = udp->all_zombies;
861 			self->ul_back = udp->all_zombies->ul_back;
862 			self->ul_back->ul_forw = self;
863 			self->ul_forw->ul_back = self;
864 		}
865 	}
866 	/*
867 	 * Notify everyone waiting for this thread.
868 	 */
869 	ulwp_broadcast(self);
870 	(void) ulwp_unlock(self, udp);
871 	/*
872 	 * Prevent any more references to the schedctl data.
873 	 * We are exiting and continue_fork() may not find us.
874 	 * Do this just before dropping link_lock, since fork
875 	 * serializes on link_lock.
876 	 */
877 	self->ul_schedctl = NULL;
878 	self->ul_schedctl_called = &udp->uberflags;
879 	lmutex_unlock(&udp->link_lock);
880 
881 	ASSERT(self->ul_critical == 1);
882 	ASSERT(self->ul_preempt == 0);
883 	_lwp_terminate();	/* never returns */
884 	thr_panic("_thrp_exit(): _lwp_terminate() returned");
885 }
886 
887 #if defined(THREAD_DEBUG)
888 void
889 collect_queue_statistics()
890 {
891 	uberdata_t *udp = curthread->ul_uberdata;
892 	ulwp_t *ulwp;
893 
894 	if (thread_queue_dump) {
895 		lmutex_lock(&udp->link_lock);
896 		if ((ulwp = udp->all_lwps) != NULL) {
897 			do {
898 				record_spin_locks(ulwp);
899 			} while ((ulwp = ulwp->ul_forw) != udp->all_lwps);
900 		}
901 		lmutex_unlock(&udp->link_lock);
902 	}
903 }
904 #endif
905 
906 static void __NORETURN
907 _thrp_exit_common(void *status, int unwind)
908 {
909 	ulwp_t *self = curthread;
910 	int cancelled = (self->ul_cancel_pending && status == PTHREAD_CANCELED);
911 
912 	ASSERT(self->ul_critical == 0 && self->ul_preempt == 0);
913 
914 	/*
915 	 * Disable cancellation and call the special DCE cancellation
916 	 * cleanup hook if it is enabled.  Do nothing else before calling
917 	 * the DCE cancellation cleanup hook; it may call longjmp() and
918 	 * never return here.
919 	 */
920 	self->ul_cancel_disabled = 1;
921 	self->ul_cancel_async = 0;
922 	self->ul_save_async = 0;
923 	self->ul_cancelable = 0;
924 	self->ul_cancel_pending = 0;
925 	set_cancel_pending_flag(self, 1);
926 	if (cancelled && cleanuphndlr != NULL)
927 		(*cleanuphndlr)();
928 
929 	/*
930 	 * Block application signals while we are exiting.
931 	 * We call out to C++, TSD, and TLS destructors while exiting
932 	 * and these are application-defined, so we cannot be assured
933 	 * that they won't reset the signal mask.  We use sigoff() to
934 	 * defer any signals that may be received as a result of this
935 	 * bad behavior.  Such signals will be lost to the process
936 	 * when the thread finishes exiting.
937 	 */
938 	(void) thr_sigsetmask(SIG_SETMASK, &maskset, NULL);
939 	sigoff(self);
940 
941 	self->ul_rval = status;
942 
943 	/*
944 	 * If thr_exit is being called from the places where
945 	 * C++ destructors are to be called such as cancellation
946 	 * points, then set this flag. It is checked in _t_cancel()
947 	 * to decide whether _ex_unwind() is to be called or not.
948 	 */
949 	if (unwind)
950 		self->ul_unwind = 1;
951 
952 	/*
953 	 * _thrp_unwind() will eventually call _thrp_exit().
954 	 * It never returns.
955 	 */
956 	_thrp_unwind(NULL);
957 	thr_panic("_thrp_exit_common(): _thrp_unwind() returned");
958 
959 	for (;;)	/* to shut the compiler up about __NORETURN */
960 		continue;
961 }
962 
963 /*
964  * Called when a thread returns from its start function.
965  * We are at the top of the stack; no unwinding is necessary.
966  */
967 void
968 _thrp_terminate(void *status)
969 {
970 	_thrp_exit_common(status, 0);
971 }
972 
973 #pragma weak pthread_exit = thr_exit
974 #pragma weak _thr_exit = thr_exit
975 void
976 thr_exit(void *status)
977 {
978 	_thrp_exit_common(status, 1);
979 }
980 
981 int
982 _thrp_join(thread_t tid, thread_t *departed, void **status, int do_cancel)
983 {
984 	uberdata_t *udp = curthread->ul_uberdata;
985 	mutex_t *mp;
986 	void *rval;
987 	thread_t found;
988 	ulwp_t *ulwp;
989 	ulwp_t **ulwpp;
990 	int replace;
991 	int error;
992 
993 	if (do_cancel)
994 		error = lwp_wait(tid, &found);
995 	else {
996 		while ((error = __lwp_wait(tid, &found)) == EINTR)
997 			;
998 	}
999 	if (error)
1000 		return (error);
1001 
1002 	/*
1003 	 * We must hold link_lock to avoid a race condition with find_stack().
1004 	 */
1005 	lmutex_lock(&udp->link_lock);
1006 	if ((ulwpp = find_lwpp(found)) == NULL) {
1007 		/*
1008 		 * lwp_wait() found an lwp that the library doesn't know
1009 		 * about.  It must have been created with _lwp_create().
1010 		 * Just return its lwpid; we can't know its status.
1011 		 */
1012 		lmutex_unlock(&udp->link_lock);
1013 		rval = NULL;
1014 	} else {
1015 		/*
1016 		 * Remove ulwp from the hash table.
1017 		 */
1018 		ulwp = *ulwpp;
1019 		*ulwpp = ulwp->ul_hash;
1020 		ulwp->ul_hash = NULL;
1021 		/*
1022 		 * Remove ulwp from all_zombies list.
1023 		 */
1024 		ASSERT(udp->nzombies >= 1);
1025 		if (udp->all_zombies == ulwp)
1026 			udp->all_zombies = ulwp->ul_forw;
1027 		if (udp->all_zombies == ulwp)
1028 			udp->all_zombies = NULL;
1029 		else {
1030 			ulwp->ul_forw->ul_back = ulwp->ul_back;
1031 			ulwp->ul_back->ul_forw = ulwp->ul_forw;
1032 		}
1033 		ulwp->ul_forw = ulwp->ul_back = NULL;
1034 		udp->nzombies--;
1035 		ASSERT(ulwp->ul_dead && !ulwp->ul_detached &&
1036 		    !(ulwp->ul_usropts & (THR_DETACHED|THR_DAEMON)));
1037 		/*
1038 		 * We can't call ulwp_unlock(ulwp) after we set
1039 		 * ulwp->ul_ix = -1 so we have to get a pointer to the
1040 		 * ulwp's hash table mutex now in order to unlock it below.
1041 		 */
1042 		mp = ulwp_mutex(ulwp, udp);
1043 		ulwp->ul_lwpid = (lwpid_t)(-1);
1044 		ulwp->ul_ix = -1;
1045 		rval = ulwp->ul_rval;
1046 		replace = ulwp->ul_replace;
1047 		lmutex_unlock(mp);
1048 		if (replace) {
1049 			ulwp->ul_next = NULL;
1050 			if (udp->ulwp_replace_free == NULL)
1051 				udp->ulwp_replace_free =
1052 				    udp->ulwp_replace_last = ulwp;
1053 			else {
1054 				udp->ulwp_replace_last->ul_next = ulwp;
1055 				udp->ulwp_replace_last = ulwp;
1056 			}
1057 		}
1058 		lmutex_unlock(&udp->link_lock);
1059 	}
1060 
1061 	if (departed != NULL)
1062 		*departed = found;
1063 	if (status != NULL)
1064 		*status = rval;
1065 	return (0);
1066 }
1067 
1068 int
1069 thr_join(thread_t tid, thread_t *departed, void **status)
1070 {
1071 	int error = _thrp_join(tid, departed, status, 1);
1072 	return ((error == EINVAL)? ESRCH : error);
1073 }
1074 
1075 /*
1076  * pthread_join() differs from Solaris thr_join():
1077  * It does not return the departed thread's id
1078  * and hence does not have a "departed" argument.
1079  * It returns EINVAL if tid refers to a detached thread.
1080  */
1081 #pragma weak _pthread_join = pthread_join
1082 int
1083 pthread_join(pthread_t tid, void **status)
1084 {
1085 	return ((tid == 0)? ESRCH : _thrp_join(tid, NULL, status, 1));
1086 }
1087 
1088 int
1089 pthread_detach(pthread_t tid)
1090 {
1091 	uberdata_t *udp = curthread->ul_uberdata;
1092 	ulwp_t *ulwp;
1093 	ulwp_t **ulwpp;
1094 	int error = 0;
1095 
1096 	if ((ulwpp = find_lwpp(tid)) == NULL)
1097 		return (ESRCH);
1098 	ulwp = *ulwpp;
1099 
1100 	if (ulwp->ul_dead) {
1101 		ulwp_unlock(ulwp, udp);
1102 		error = _thrp_join(tid, NULL, NULL, 0);
1103 	} else {
1104 		error = __lwp_detach(tid);
1105 		ulwp->ul_detached = 1;
1106 		ulwp->ul_usropts |= THR_DETACHED;
1107 		ulwp_unlock(ulwp, udp);
1108 	}
1109 	return (error);
1110 }
1111 
1112 static const char *
1113 ematch(const char *ev, const char *match)
1114 {
1115 	int c;
1116 
1117 	while ((c = *match++) != '\0') {
1118 		if (*ev++ != c)
1119 			return (NULL);
1120 	}
1121 	if (*ev++ != '=')
1122 		return (NULL);
1123 	return (ev);
1124 }
1125 
1126 static int
1127 envvar(const char *ev, const char *match, int limit)
1128 {
1129 	int val = -1;
1130 	const char *ename;
1131 
1132 	if ((ename = ematch(ev, match)) != NULL) {
1133 		int c;
1134 		for (val = 0; (c = *ename) != '\0'; ename++) {
1135 			if (!isdigit(c)) {
1136 				val = -1;
1137 				break;
1138 			}
1139 			val = val * 10 + (c - '0');
1140 			if (val > limit) {
1141 				val = limit;
1142 				break;
1143 			}
1144 		}
1145 	}
1146 	return (val);
1147 }
1148 
1149 static void
1150 etest(const char *ev)
1151 {
1152 	int value;
1153 
1154 	if ((value = envvar(ev, "QUEUE_SPIN", 1000000)) >= 0)
1155 		thread_queue_spin = value;
1156 	if ((value = envvar(ev, "ADAPTIVE_SPIN", 1000000)) >= 0)
1157 		thread_adaptive_spin = value;
1158 	if ((value = envvar(ev, "MAX_SPINNERS", 255)) >= 0)
1159 		thread_max_spinners = value;
1160 	if ((value = envvar(ev, "QUEUE_FIFO", 8)) >= 0)
1161 		thread_queue_fifo = value;
1162 #if defined(THREAD_DEBUG)
1163 	if ((value = envvar(ev, "QUEUE_VERIFY", 1)) >= 0)
1164 		thread_queue_verify = value;
1165 	if ((value = envvar(ev, "QUEUE_DUMP", 1)) >= 0)
1166 		thread_queue_dump = value;
1167 #endif
1168 	if ((value = envvar(ev, "STACK_CACHE", 10000)) >= 0)
1169 		thread_stack_cache = value;
1170 	if ((value = envvar(ev, "COND_WAIT_DEFER", 1)) >= 0)
1171 		thread_cond_wait_defer = value;
1172 	if ((value = envvar(ev, "ERROR_DETECTION", 2)) >= 0)
1173 		thread_error_detection = value;
1174 	if ((value = envvar(ev, "ASYNC_SAFE", 1)) >= 0)
1175 		thread_async_safe = value;
1176 	if ((value = envvar(ev, "DOOR_NORESERVE", 1)) >= 0)
1177 		thread_door_noreserve = value;
1178 	if ((value = envvar(ev, "LOCKS_MISALIGNED", 1)) >= 0)
1179 		thread_locks_misaligned = value;
1180 }
1181 
1182 /*
1183  * Look for and evaluate environment variables of the form "_THREAD_*".
1184  * For compatibility with the past, we also look for environment
1185  * names of the form "LIBTHREAD_*".
1186  */
1187 static void
1188 set_thread_vars()
1189 {
1190 	extern const char **_environ;
1191 	const char **pev;
1192 	const char *ev;
1193 	char c;
1194 
1195 	if ((pev = _environ) == NULL)
1196 		return;
1197 	while ((ev = *pev++) != NULL) {
1198 		c = *ev;
1199 		if (c == '_' && strncmp(ev, "_THREAD_", 8) == 0)
1200 			etest(ev + 8);
1201 		if (c == 'L' && strncmp(ev, "LIBTHREAD_", 10) == 0)
1202 			etest(ev + 10);
1203 	}
1204 }
1205 
1206 /* PROBE_SUPPORT begin */
1207 #pragma weak __tnf_probe_notify
1208 extern void __tnf_probe_notify(void);
1209 /* PROBE_SUPPORT end */
1210 
1211 /* same as atexit() but private to the library */
1212 extern int _atexit(void (*)(void));
1213 
1214 /* same as _cleanup() but private to the library */
1215 extern void __cleanup(void);
1216 
1217 extern void atfork_init(void);
1218 
1219 #ifdef __amd64
1220 extern void __proc64id(void);
1221 #endif
1222 
1223 /*
1224  * libc_init() is called by ld.so.1 for library initialization.
1225  * We perform minimal initialization; enough to work with the main thread.
1226  */
1227 void
1228 libc_init(void)
1229 {
1230 	uberdata_t *udp = &__uberdata;
1231 	ulwp_t *oldself = __curthread();
1232 	ucontext_t uc;
1233 	ulwp_t *self;
1234 	struct rlimit rl;
1235 	caddr_t data;
1236 	size_t tls_size;
1237 	int setmask;
1238 
1239 	/*
1240 	 * For the initial stage of initialization, we must be careful
1241 	 * not to call any function that could possibly call _cerror().
1242 	 * For this purpose, we call only the raw system call wrappers.
1243 	 */
1244 
1245 #ifdef __amd64
1246 	/*
1247 	 * Gather information about cache layouts for optimized
1248 	 * AMD and Intel assembler strfoo() and memfoo() functions.
1249 	 */
1250 	__proc64id();
1251 #endif
1252 
1253 	/*
1254 	 * Every libc, regardless of which link map, must register __cleanup().
1255 	 */
1256 	(void) _atexit(__cleanup);
1257 
1258 	/*
1259 	 * We keep our uberdata on one of (a) the first alternate link map
1260 	 * or (b) the primary link map.  We switch to the primary link map
1261 	 * and stay there once we see it.  All intermediate link maps are
1262 	 * subject to being unloaded at any time.
1263 	 */
1264 	if (oldself != NULL && (oldself->ul_primarymap || !primary_link_map)) {
1265 		__tdb_bootstrap = oldself->ul_uberdata->tdb_bootstrap;
1266 		mutex_setup();
1267 		atfork_init();	/* every link map needs atfork() processing */
1268 		init_progname();
1269 		return;
1270 	}
1271 
1272 	/*
1273 	 * To establish the main stack information, we have to get our context.
1274 	 * This is also convenient to use for getting our signal mask.
1275 	 */
1276 	uc.uc_flags = UC_ALL;
1277 	(void) __getcontext(&uc);
1278 	ASSERT(uc.uc_link == NULL);
1279 
1280 	tls_size = roundup64(udp->tls_metadata.static_tls.tls_size);
1281 	ASSERT(primary_link_map || tls_size == 0);
1282 	data = lmalloc(sizeof (ulwp_t) + tls_size);
1283 	if (data == NULL)
1284 		thr_panic("cannot allocate thread structure for main thread");
1285 	/* LINTED pointer cast may result in improper alignment */
1286 	self = (ulwp_t *)(data + tls_size);
1287 	init_hash_table[0].hash_bucket = self;
1288 
1289 	self->ul_sigmask = uc.uc_sigmask;
1290 	delete_reserved_signals(&self->ul_sigmask);
1291 	/*
1292 	 * Are the old and new sets different?
1293 	 * (This can happen if we are currently blocking SIGCANCEL.)
1294 	 * If so, we must explicitly set our signal mask, below.
1295 	 */
1296 	setmask =
1297 	    ((self->ul_sigmask.__sigbits[0] ^ uc.uc_sigmask.__sigbits[0]) |
1298 	    (self->ul_sigmask.__sigbits[1] ^ uc.uc_sigmask.__sigbits[1]) |
1299 	    (self->ul_sigmask.__sigbits[2] ^ uc.uc_sigmask.__sigbits[2]) |
1300 	    (self->ul_sigmask.__sigbits[3] ^ uc.uc_sigmask.__sigbits[3]));
1301 
1302 #ifdef __sparc
1303 	/*
1304 	 * We cache several instructions in the thread structure for use
1305 	 * by the fasttrap DTrace provider. When changing this, read the
1306 	 * comment in fasttrap.h for the all the other places that must
1307 	 * be changed.
1308 	 */
1309 	self->ul_dsave = 0x9de04000;	/* save %g1, %g0, %sp */
1310 	self->ul_drestore = 0x81e80000;	/* restore %g0, %g0, %g0 */
1311 	self->ul_dftret = 0x91d0203a;	/* ta 0x3a */
1312 	self->ul_dreturn = 0x81ca0000;	/* return %o0 */
1313 #endif
1314 
1315 	self->ul_stktop = (uintptr_t)uc.uc_stack.ss_sp + uc.uc_stack.ss_size;
1316 	(void) getrlimit(RLIMIT_STACK, &rl);
1317 	self->ul_stksiz = rl.rlim_cur;
1318 	self->ul_stk = (caddr_t)(self->ul_stktop - self->ul_stksiz);
1319 
1320 	self->ul_forw = self->ul_back = self;
1321 	self->ul_hash = NULL;
1322 	self->ul_ix = 0;
1323 	self->ul_lwpid = 1; /* _lwp_self() */
1324 	self->ul_main = 1;
1325 	self->ul_self = self;
1326 	self->ul_policy = -1;		/* initialize only when needed */
1327 	self->ul_pri = 0;
1328 	self->ul_cid = 0;
1329 	self->ul_rtclassid = -1;
1330 	self->ul_uberdata = udp;
1331 	if (oldself != NULL) {
1332 		int i;
1333 
1334 		ASSERT(primary_link_map);
1335 		ASSERT(oldself->ul_main == 1);
1336 		self->ul_stsd = oldself->ul_stsd;
1337 		for (i = 0; i < TSD_NFAST; i++)
1338 			self->ul_ftsd[i] = oldself->ul_ftsd[i];
1339 		self->ul_tls = oldself->ul_tls;
1340 		/*
1341 		 * Retrieve all pointers to uberdata allocated
1342 		 * while running on previous link maps.
1343 		 * We would like to do a structure assignment here, but
1344 		 * gcc turns structure assignments into calls to memcpy(),
1345 		 * a function exported from libc.  We can't call any such
1346 		 * external functions until we establish curthread, below,
1347 		 * so we just call our private version of memcpy().
1348 		 */
1349 		(void) memcpy(udp, oldself->ul_uberdata, sizeof (*udp));
1350 		/*
1351 		 * These items point to global data on the primary link map.
1352 		 */
1353 		udp->thr_hash_table = init_hash_table;
1354 		udp->sigacthandler = sigacthandler;
1355 		udp->tdb.tdb_events = tdb_events;
1356 		ASSERT(udp->nthreads == 1 && !udp->uberflags.uf_mt);
1357 		ASSERT(udp->lwp_stacks == NULL);
1358 		ASSERT(udp->ulwp_freelist == NULL);
1359 		ASSERT(udp->ulwp_replace_free == NULL);
1360 		ASSERT(udp->hash_size == 1);
1361 	}
1362 	udp->all_lwps = self;
1363 	udp->ulwp_one = self;
1364 	udp->pid = getpid();
1365 	udp->nthreads = 1;
1366 	/*
1367 	 * In every link map, tdb_bootstrap points to the same piece of
1368 	 * allocated memory.  When the primary link map is initialized,
1369 	 * the allocated memory is assigned a pointer to the one true
1370 	 * uberdata.  This allows libc_db to initialize itself regardless
1371 	 * of which instance of libc it finds in the address space.
1372 	 */
1373 	if (udp->tdb_bootstrap == NULL)
1374 		udp->tdb_bootstrap = lmalloc(sizeof (uberdata_t *));
1375 	__tdb_bootstrap = udp->tdb_bootstrap;
1376 	if (primary_link_map) {
1377 		self->ul_primarymap = 1;
1378 		udp->primary_map = 1;
1379 		*udp->tdb_bootstrap = udp;
1380 	}
1381 	/*
1382 	 * Cancellation can't happen until:
1383 	 *	pthread_cancel() is called
1384 	 * or:
1385 	 *	another thread is created
1386 	 * For now, as a single-threaded process, set the flag that tells
1387 	 * PROLOGUE/EPILOGUE (in scalls.c) that cancellation can't happen.
1388 	 */
1389 	self->ul_nocancel = 1;
1390 
1391 #if defined(__amd64)
1392 	(void) ___lwp_private(_LWP_SETPRIVATE, _LWP_FSBASE, self);
1393 #elif defined(__i386)
1394 	(void) ___lwp_private(_LWP_SETPRIVATE, _LWP_GSBASE, self);
1395 #endif	/* __i386 || __amd64 */
1396 	set_curthread(self);		/* redundant on i386 */
1397 	/*
1398 	 * Now curthread is established and it is safe to call any
1399 	 * function in libc except one that uses thread-local storage.
1400 	 */
1401 	self->ul_errnop = &errno;
1402 	if (oldself != NULL) {
1403 		/* tls_size was zero when oldself was allocated */
1404 		lfree(oldself, sizeof (ulwp_t));
1405 	}
1406 	mutex_setup();
1407 	atfork_init();
1408 	signal_init();
1409 
1410 	/*
1411 	 * If the stack is unlimited, we set the size to zero to disable
1412 	 * stack checking.
1413 	 * XXX: Work harder here.  Get the stack size from /proc/self/rmap
1414 	 */
1415 	if (self->ul_stksiz == RLIM_INFINITY) {
1416 		self->ul_ustack.ss_sp = (void *)self->ul_stktop;
1417 		self->ul_ustack.ss_size = 0;
1418 	} else {
1419 		self->ul_ustack.ss_sp = self->ul_stk;
1420 		self->ul_ustack.ss_size = self->ul_stksiz;
1421 	}
1422 	self->ul_ustack.ss_flags = 0;
1423 	(void) setustack(&self->ul_ustack);
1424 
1425 	/*
1426 	 * Get the variables that affect thread behavior from the environment.
1427 	 */
1428 	set_thread_vars();
1429 	udp->uberflags.uf_thread_error_detection = (char)thread_error_detection;
1430 	udp->thread_stack_cache = thread_stack_cache;
1431 
1432 	/*
1433 	 * Make per-thread copies of global variables, for speed.
1434 	 */
1435 	self->ul_queue_fifo = (char)thread_queue_fifo;
1436 	self->ul_cond_wait_defer = (char)thread_cond_wait_defer;
1437 	self->ul_error_detection = (char)thread_error_detection;
1438 	self->ul_async_safe = (char)thread_async_safe;
1439 	self->ul_door_noreserve = (char)thread_door_noreserve;
1440 	self->ul_misaligned = (char)thread_locks_misaligned;
1441 	self->ul_max_spinners = (uint8_t)thread_max_spinners;
1442 	self->ul_adaptive_spin = thread_adaptive_spin;
1443 	self->ul_queue_spin = thread_queue_spin;
1444 
1445 #if defined(__sparc) && !defined(_LP64)
1446 	if (self->ul_misaligned) {
1447 		/*
1448 		 * Tell the kernel to fix up ldx/stx instructions that
1449 		 * refer to non-8-byte aligned data instead of giving
1450 		 * the process an alignment trap and generating SIGBUS.
1451 		 *
1452 		 * Programs compiled for 32-bit sparc with the Studio SS12
1453 		 * compiler get this done for them automatically (in _init()).
1454 		 * We do it here for the benefit of programs compiled with
1455 		 * other compilers, like gcc.
1456 		 *
1457 		 * This is necessary for the _THREAD_LOCKS_MISALIGNED=1
1458 		 * environment variable horrible hack to work.
1459 		 */
1460 		extern void _do_fix_align(void);
1461 		_do_fix_align();
1462 	}
1463 #endif
1464 
1465 	/*
1466 	 * When we have initialized the primary link map, inform
1467 	 * the dynamic linker about our interface functions.
1468 	 * Set up our pointer to the program name.
1469 	 */
1470 	if (self->ul_primarymap)
1471 		_ld_libc((void *)rtld_funcs);
1472 	init_progname();
1473 
1474 	/*
1475 	 * Defer signals until TLS constructors have been called.
1476 	 */
1477 	sigoff(self);
1478 	tls_setup();
1479 	sigon(self);
1480 	if (setmask)
1481 		(void) restore_signals(self);
1482 
1483 	/*
1484 	 * Make private copies of __xpg4 and __xpg6 so libc can test
1485 	 * them after this point without invoking the dynamic linker.
1486 	 */
1487 	libc__xpg4 = __xpg4;
1488 	libc__xpg6 = __xpg6;
1489 
1490 	/* PROBE_SUPPORT begin */
1491 	if (self->ul_primarymap && __tnf_probe_notify != NULL)
1492 		__tnf_probe_notify();
1493 	/* PROBE_SUPPORT end */
1494 
1495 	init_sigev_thread();
1496 	init_aio();
1497 
1498 	/*
1499 	 * We need to reset __threaded dynamically at runtime, so that
1500 	 * __threaded can be bound to __threaded outside libc which may not
1501 	 * have initial value of 1 (without a copy relocation in a.out).
1502 	 */
1503 	__threaded = 1;
1504 }
1505 
1506 #pragma fini(libc_fini)
1507 void
1508 libc_fini()
1509 {
1510 	/*
1511 	 * If we are doing fini processing for the instance of libc
1512 	 * on the first alternate link map (this happens only when
1513 	 * the dynamic linker rejects a bad audit library), then clear
1514 	 * __curthread().  We abandon whatever memory was allocated by
1515 	 * lmalloc() while running on this alternate link-map but we
1516 	 * don't care (and can't find the memory in any case); we just
1517 	 * want to protect the application from this bad audit library.
1518 	 * No fini processing is done by libc in the normal case.
1519 	 */
1520 
1521 	uberdata_t *udp = curthread->ul_uberdata;
1522 
1523 	if (udp->primary_map == 0 && udp == &__uberdata)
1524 		set_curthread(NULL);
1525 }
1526 
1527 /*
1528  * finish_init is called when we are about to become multi-threaded,
1529  * that is, on the first call to thr_create().
1530  */
1531 void
1532 finish_init()
1533 {
1534 	ulwp_t *self = curthread;
1535 	uberdata_t *udp = self->ul_uberdata;
1536 	thr_hash_table_t *htp;
1537 	void *data;
1538 	int i;
1539 
1540 	/*
1541 	 * No locks needed here; we are single-threaded on the first call.
1542 	 * We can be called only after the primary link map has been set up.
1543 	 */
1544 	ASSERT(self->ul_primarymap);
1545 	ASSERT(self == udp->ulwp_one);
1546 	ASSERT(!udp->uberflags.uf_mt);
1547 	ASSERT(udp->hash_size == 1);
1548 
1549 	/*
1550 	 * Initialize self->ul_policy, self->ul_cid, and self->ul_pri.
1551 	 */
1552 	update_sched(self);
1553 
1554 	/*
1555 	 * Allocate the queue_head array if not already allocated.
1556 	 */
1557 	if (udp->queue_head == NULL)
1558 		queue_alloc();
1559 
1560 	/*
1561 	 * Now allocate the thread hash table.
1562 	 */
1563 	if ((data = mmap(NULL, HASHTBLSZ * sizeof (thr_hash_table_t),
1564 	    PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, (off_t)0))
1565 	    == MAP_FAILED)
1566 		thr_panic("cannot allocate thread hash table");
1567 
1568 	udp->thr_hash_table = htp = (thr_hash_table_t *)data;
1569 	udp->hash_size = HASHTBLSZ;
1570 	udp->hash_mask = HASHTBLSZ - 1;
1571 
1572 	for (i = 0; i < HASHTBLSZ; i++, htp++) {
1573 		htp->hash_lock.mutex_flag = LOCK_INITED;
1574 		htp->hash_lock.mutex_magic = MUTEX_MAGIC;
1575 		htp->hash_cond.cond_magic = COND_MAGIC;
1576 	}
1577 	hash_in_unlocked(self, TIDHASH(self->ul_lwpid, udp), udp);
1578 
1579 	/*
1580 	 * Set up the SIGCANCEL handler for threads cancellation.
1581 	 */
1582 	setup_cancelsig(SIGCANCEL);
1583 
1584 	/*
1585 	 * Arrange to do special things on exit --
1586 	 * - collect queue statistics from all remaining active threads.
1587 	 * - dump queue statistics to stderr if _THREAD_QUEUE_DUMP is set.
1588 	 * - grab assert_lock to ensure that assertion failures
1589 	 *   and a core dump take precedence over _exit().
1590 	 * (Functions are called in the reverse order of their registration.)
1591 	 */
1592 	(void) _atexit(grab_assert_lock);
1593 #if defined(THREAD_DEBUG)
1594 	(void) _atexit(dump_queue_statistics);
1595 	(void) _atexit(collect_queue_statistics);
1596 #endif
1597 }
1598 
1599 /*
1600  * Used only by postfork1_child(), below.
1601  */
1602 static void
1603 mark_dead_and_buried(ulwp_t *ulwp)
1604 {
1605 	ulwp->ul_dead = 1;
1606 	ulwp->ul_lwpid = (lwpid_t)(-1);
1607 	ulwp->ul_hash = NULL;
1608 	ulwp->ul_ix = -1;
1609 	ulwp->ul_schedctl = NULL;
1610 	ulwp->ul_schedctl_called = NULL;
1611 }
1612 
1613 /*
1614  * This is called from fork1() in the child.
1615  * Reset our data structures to reflect one lwp.
1616  */
1617 void
1618 postfork1_child()
1619 {
1620 	ulwp_t *self = curthread;
1621 	uberdata_t *udp = self->ul_uberdata;
1622 	queue_head_t *qp;
1623 	ulwp_t *next;
1624 	ulwp_t *ulwp;
1625 	int i;
1626 
1627 	/* daemon threads shouldn't call fork1(), but oh well... */
1628 	self->ul_usropts &= ~THR_DAEMON;
1629 	udp->nthreads = 1;
1630 	udp->ndaemons = 0;
1631 	udp->uberflags.uf_mt = 0;
1632 	__libc_threaded = 0;
1633 	for (i = 0; i < udp->hash_size; i++)
1634 		udp->thr_hash_table[i].hash_bucket = NULL;
1635 	self->ul_lwpid = _lwp_self();
1636 	hash_in_unlocked(self, TIDHASH(self->ul_lwpid, udp), udp);
1637 
1638 	/*
1639 	 * Some thread in the parent might have been suspended
1640 	 * while holding udp->callout_lock or udp->ld_lock.
1641 	 * Reinitialize the child's copies.
1642 	 */
1643 	(void) mutex_init(&udp->callout_lock,
1644 	    USYNC_THREAD | LOCK_RECURSIVE, NULL);
1645 	(void) mutex_init(&udp->ld_lock,
1646 	    USYNC_THREAD | LOCK_RECURSIVE, NULL);
1647 
1648 	/* no one in the child is on a sleep queue; reinitialize */
1649 	if ((qp = udp->queue_head) != NULL) {
1650 		(void) memset(qp, 0, 2 * QHASHSIZE * sizeof (queue_head_t));
1651 		for (i = 0; i < 2 * QHASHSIZE; qp++, i++) {
1652 			qp->qh_type = (i < QHASHSIZE)? MX : CV;
1653 			qp->qh_lock.mutex_flag = LOCK_INITED;
1654 			qp->qh_lock.mutex_magic = MUTEX_MAGIC;
1655 			qp->qh_hlist = &qp->qh_def_root;
1656 #if defined(THREAD_DEBUG)
1657 			qp->qh_hlen = 1;
1658 			qp->qh_hmax = 1;
1659 #endif
1660 		}
1661 	}
1662 
1663 	/*
1664 	 * Do post-fork1 processing for subsystems that need it.
1665 	 * We need to do this before unmapping all of the abandoned
1666 	 * threads' stacks, below(), because the post-fork1 actions
1667 	 * might require access to those stacks.
1668 	 */
1669 	postfork1_child_sigev_aio();
1670 	postfork1_child_sigev_mq();
1671 	postfork1_child_sigev_timer();
1672 	postfork1_child_aio();
1673 	/*
1674 	 * The above subsystems use thread pools, so this action
1675 	 * must be performed after those actions.
1676 	 */
1677 	postfork1_child_tpool();
1678 
1679 	/*
1680 	 * All lwps except ourself are gone.  Mark them so.
1681 	 * First mark all of the lwps that have already been freed.
1682 	 * Then mark and free all of the active lwps except ourself.
1683 	 * Since we are single-threaded, no locks are required here.
1684 	 */
1685 	for (ulwp = udp->lwp_stacks; ulwp != NULL; ulwp = ulwp->ul_next)
1686 		mark_dead_and_buried(ulwp);
1687 	for (ulwp = udp->ulwp_freelist; ulwp != NULL; ulwp = ulwp->ul_next)
1688 		mark_dead_and_buried(ulwp);
1689 	for (ulwp = self->ul_forw; ulwp != self; ulwp = next) {
1690 		next = ulwp->ul_forw;
1691 		ulwp->ul_forw = ulwp->ul_back = NULL;
1692 		mark_dead_and_buried(ulwp);
1693 		tsd_free(ulwp);
1694 		tls_free(ulwp);
1695 		rwl_free(ulwp);
1696 		heldlock_free(ulwp);
1697 		ulwp_free(ulwp);
1698 	}
1699 	self->ul_forw = self->ul_back = udp->all_lwps = self;
1700 	if (self != udp->ulwp_one)
1701 		mark_dead_and_buried(udp->ulwp_one);
1702 	if ((ulwp = udp->all_zombies) != NULL) {
1703 		ASSERT(udp->nzombies != 0);
1704 		do {
1705 			next = ulwp->ul_forw;
1706 			ulwp->ul_forw = ulwp->ul_back = NULL;
1707 			mark_dead_and_buried(ulwp);
1708 			udp->nzombies--;
1709 			if (ulwp->ul_replace) {
1710 				ulwp->ul_next = NULL;
1711 				if (udp->ulwp_replace_free == NULL) {
1712 					udp->ulwp_replace_free =
1713 					    udp->ulwp_replace_last = ulwp;
1714 				} else {
1715 					udp->ulwp_replace_last->ul_next = ulwp;
1716 					udp->ulwp_replace_last = ulwp;
1717 				}
1718 			}
1719 		} while ((ulwp = next) != udp->all_zombies);
1720 		ASSERT(udp->nzombies == 0);
1721 		udp->all_zombies = NULL;
1722 		udp->nzombies = 0;
1723 	}
1724 	trim_stack_cache(0);
1725 }
1726 
1727 lwpid_t
1728 lwp_self(void)
1729 {
1730 	return (curthread->ul_lwpid);
1731 }
1732 
1733 #pragma weak _ti_thr_self = thr_self
1734 #pragma weak pthread_self = thr_self
1735 thread_t
1736 thr_self()
1737 {
1738 	return (curthread->ul_lwpid);
1739 }
1740 
1741 int
1742 thr_main()
1743 {
1744 	ulwp_t *self = __curthread();
1745 
1746 	return ((self == NULL)? -1 : self->ul_main);
1747 }
1748 
1749 int
1750 _thrp_cancelled(void)
1751 {
1752 	return (curthread->ul_rval == PTHREAD_CANCELED);
1753 }
1754 
1755 int
1756 _thrp_stksegment(ulwp_t *ulwp, stack_t *stk)
1757 {
1758 	stk->ss_sp = (void *)ulwp->ul_stktop;
1759 	stk->ss_size = ulwp->ul_stksiz;
1760 	stk->ss_flags = 0;
1761 	return (0);
1762 }
1763 
1764 #pragma weak _thr_stksegment = thr_stksegment
1765 int
1766 thr_stksegment(stack_t *stk)
1767 {
1768 	return (_thrp_stksegment(curthread, stk));
1769 }
1770 
1771 void
1772 force_continue(ulwp_t *ulwp)
1773 {
1774 #if defined(THREAD_DEBUG)
1775 	ulwp_t *self = curthread;
1776 	uberdata_t *udp = self->ul_uberdata;
1777 #endif
1778 	int error;
1779 	timespec_t ts;
1780 
1781 	ASSERT(MUTEX_OWNED(&udp->fork_lock, self));
1782 	ASSERT(MUTEX_OWNED(ulwp_mutex(ulwp, udp), self));
1783 
1784 	for (;;) {
1785 		error = _lwp_continue(ulwp->ul_lwpid);
1786 		if (error != 0 && error != EINTR)
1787 			break;
1788 		error = 0;
1789 		if (ulwp->ul_stopping) {	/* he is stopping himself */
1790 			ts.tv_sec = 0;		/* give him a chance to run */
1791 			ts.tv_nsec = 100000;	/* 100 usecs or clock tick */
1792 			(void) __nanosleep(&ts, NULL);
1793 		}
1794 		if (!ulwp->ul_stopping)		/* he is running now */
1795 			break;			/* so we are done */
1796 		/*
1797 		 * He is marked as being in the process of stopping
1798 		 * himself.  Loop around and continue him again.
1799 		 * He may not have been stopped the first time.
1800 		 */
1801 	}
1802 }
1803 
1804 /*
1805  * Suspend an lwp with lwp_suspend(), then move it to a safe point,
1806  * that is, to a point where ul_critical and ul_rtld are both zero.
1807  * On return, the ulwp_lock() is dropped as with ulwp_unlock().
1808  * If 'link_dropped' is non-NULL, then 'link_lock' is held on entry.
1809  * If we have to drop link_lock, we store 1 through link_dropped.
1810  * If the lwp exits before it can be suspended, we return ESRCH.
1811  */
1812 int
1813 safe_suspend(ulwp_t *ulwp, uchar_t whystopped, int *link_dropped)
1814 {
1815 	ulwp_t *self = curthread;
1816 	uberdata_t *udp = self->ul_uberdata;
1817 	cond_t *cvp = ulwp_condvar(ulwp, udp);
1818 	mutex_t *mp = ulwp_mutex(ulwp, udp);
1819 	thread_t tid = ulwp->ul_lwpid;
1820 	int ix = ulwp->ul_ix;
1821 	int error = 0;
1822 
1823 	ASSERT(whystopped == TSTP_REGULAR ||
1824 	    whystopped == TSTP_MUTATOR ||
1825 	    whystopped == TSTP_FORK);
1826 	ASSERT(ulwp != self);
1827 	ASSERT(!ulwp->ul_stop);
1828 	ASSERT(MUTEX_OWNED(&udp->fork_lock, self));
1829 	ASSERT(MUTEX_OWNED(mp, self));
1830 
1831 	if (link_dropped != NULL)
1832 		*link_dropped = 0;
1833 
1834 	/*
1835 	 * We must grab the target's spin lock before suspending it.
1836 	 * See the comments below and in _thrp_suspend() for why.
1837 	 */
1838 	spin_lock_set(&ulwp->ul_spinlock);
1839 	(void) ___lwp_suspend(tid);
1840 	spin_lock_clear(&ulwp->ul_spinlock);
1841 
1842 top:
1843 	if ((ulwp->ul_critical == 0 && ulwp->ul_rtld == 0) ||
1844 	    ulwp->ul_stopping) {
1845 		/* thread is already safe */
1846 		ulwp->ul_stop |= whystopped;
1847 	} else {
1848 		/*
1849 		 * Setting ul_pleasestop causes the target thread to stop
1850 		 * itself in _thrp_suspend(), below, after we drop its lock.
1851 		 * We must continue the critical thread before dropping
1852 		 * link_lock because the critical thread may be holding
1853 		 * the queue lock for link_lock.  This is delicate.
1854 		 */
1855 		ulwp->ul_pleasestop |= whystopped;
1856 		force_continue(ulwp);
1857 		if (link_dropped != NULL) {
1858 			*link_dropped = 1;
1859 			lmutex_unlock(&udp->link_lock);
1860 			/* be sure to drop link_lock only once */
1861 			link_dropped = NULL;
1862 		}
1863 
1864 		/*
1865 		 * The thread may disappear by calling thr_exit() so we
1866 		 * cannot rely on the ulwp pointer after dropping the lock.
1867 		 * Instead, we search the hash table to find it again.
1868 		 * When we return, we may find that the thread has been
1869 		 * continued by some other thread.  The suspend/continue
1870 		 * interfaces are prone to such race conditions by design.
1871 		 */
1872 		while (ulwp && !ulwp->ul_dead && !ulwp->ul_stop &&
1873 		    (ulwp->ul_pleasestop & whystopped)) {
1874 			(void) __cond_wait(cvp, mp);
1875 			for (ulwp = udp->thr_hash_table[ix].hash_bucket;
1876 			    ulwp != NULL; ulwp = ulwp->ul_hash) {
1877 				if (ulwp->ul_lwpid == tid)
1878 					break;
1879 			}
1880 		}
1881 
1882 		if (ulwp == NULL || ulwp->ul_dead)
1883 			error = ESRCH;
1884 		else {
1885 			/*
1886 			 * Do another lwp_suspend() to make sure we don't
1887 			 * return until the target thread is fully stopped
1888 			 * in the kernel.  Don't apply lwp_suspend() until
1889 			 * we know that the target is not holding any
1890 			 * queue locks, that is, that it has completed
1891 			 * ulwp_unlock(self) and has, or at least is
1892 			 * about to, call lwp_suspend() on itself.  We do
1893 			 * this by grabbing the target's spin lock.
1894 			 */
1895 			ASSERT(ulwp->ul_lwpid == tid);
1896 			spin_lock_set(&ulwp->ul_spinlock);
1897 			(void) ___lwp_suspend(tid);
1898 			spin_lock_clear(&ulwp->ul_spinlock);
1899 			/*
1900 			 * If some other thread did a thr_continue()
1901 			 * on the target thread we have to start over.
1902 			 */
1903 			if (!ulwp->ul_stopping || !(ulwp->ul_stop & whystopped))
1904 				goto top;
1905 		}
1906 	}
1907 
1908 	(void) cond_broadcast(cvp);
1909 	lmutex_unlock(mp);
1910 	return (error);
1911 }
1912 
1913 int
1914 _thrp_suspend(thread_t tid, uchar_t whystopped)
1915 {
1916 	ulwp_t *self = curthread;
1917 	uberdata_t *udp = self->ul_uberdata;
1918 	ulwp_t *ulwp;
1919 	int error = 0;
1920 
1921 	ASSERT((whystopped & (TSTP_REGULAR|TSTP_MUTATOR|TSTP_FORK)) != 0);
1922 	ASSERT((whystopped & ~(TSTP_REGULAR|TSTP_MUTATOR|TSTP_FORK)) == 0);
1923 
1924 	/*
1925 	 * We can't suspend anyone except ourself while
1926 	 * some other thread is performing a fork.
1927 	 * This also allows only one suspension at a time.
1928 	 */
1929 	if (tid != self->ul_lwpid)
1930 		fork_lock_enter();
1931 
1932 	if ((ulwp = find_lwp(tid)) == NULL)
1933 		error = ESRCH;
1934 	else if (whystopped == TSTP_MUTATOR && !ulwp->ul_mutator) {
1935 		ulwp_unlock(ulwp, udp);
1936 		error = EINVAL;
1937 	} else if (ulwp->ul_stop) {	/* already stopped */
1938 		ulwp->ul_stop |= whystopped;
1939 		ulwp_broadcast(ulwp);
1940 		ulwp_unlock(ulwp, udp);
1941 	} else if (ulwp != self) {
1942 		/*
1943 		 * After suspending the other thread, move it out of a
1944 		 * critical section and deal with the schedctl mappings.
1945 		 * safe_suspend() suspends the other thread, calls
1946 		 * ulwp_broadcast(ulwp) and drops the ulwp lock.
1947 		 */
1948 		error = safe_suspend(ulwp, whystopped, NULL);
1949 	} else {
1950 		int schedctl_after_fork = 0;
1951 
1952 		/*
1953 		 * We are suspending ourself.  We must not take a signal
1954 		 * until we return from lwp_suspend() and clear ul_stopping.
1955 		 * This is to guard against siglongjmp().
1956 		 */
1957 		enter_critical(self);
1958 		self->ul_sp = stkptr();
1959 		_flush_windows();	/* sparc */
1960 		self->ul_pleasestop = 0;
1961 		self->ul_stop |= whystopped;
1962 		/*
1963 		 * Grab our spin lock before dropping ulwp_mutex(self).
1964 		 * This prevents the suspending thread from applying
1965 		 * lwp_suspend() to us before we emerge from
1966 		 * lmutex_unlock(mp) and have dropped mp's queue lock.
1967 		 */
1968 		spin_lock_set(&self->ul_spinlock);
1969 		self->ul_stopping = 1;
1970 		ulwp_broadcast(self);
1971 		ulwp_unlock(self, udp);
1972 		/*
1973 		 * From this point until we return from lwp_suspend(),
1974 		 * we must not call any function that might invoke the
1975 		 * dynamic linker, that is, we can only call functions
1976 		 * private to the library.
1977 		 *
1978 		 * Also, this is a nasty race condition for a process
1979 		 * that is undergoing a forkall() operation:
1980 		 * Once we clear our spinlock (below), we are vulnerable
1981 		 * to being suspended by the forkall() thread before
1982 		 * we manage to suspend ourself in ___lwp_suspend().
1983 		 * See safe_suspend() and force_continue().
1984 		 *
1985 		 * To avoid a SIGSEGV due to the disappearance
1986 		 * of the schedctl mappings in the child process,
1987 		 * which can happen in spin_lock_clear() if we
1988 		 * are suspended while we are in the middle of
1989 		 * its call to preempt(), we preemptively clear
1990 		 * our own schedctl pointer before dropping our
1991 		 * spinlock.  We reinstate it, in both the parent
1992 		 * and (if this really is a forkall()) the child.
1993 		 */
1994 		if (whystopped & TSTP_FORK) {
1995 			schedctl_after_fork = 1;
1996 			self->ul_schedctl = NULL;
1997 			self->ul_schedctl_called = &udp->uberflags;
1998 		}
1999 		spin_lock_clear(&self->ul_spinlock);
2000 		(void) ___lwp_suspend(tid);
2001 		/*
2002 		 * Somebody else continued us.
2003 		 * We can't grab ulwp_lock(self)
2004 		 * until after clearing ul_stopping.
2005 		 * force_continue() relies on this.
2006 		 */
2007 		self->ul_stopping = 0;
2008 		self->ul_sp = 0;
2009 		if (schedctl_after_fork) {
2010 			self->ul_schedctl_called = NULL;
2011 			self->ul_schedctl = NULL;
2012 			(void) setup_schedctl();
2013 		}
2014 		ulwp_lock(self, udp);
2015 		ulwp_broadcast(self);
2016 		ulwp_unlock(self, udp);
2017 		exit_critical(self);
2018 	}
2019 
2020 	if (tid != self->ul_lwpid)
2021 		fork_lock_exit();
2022 
2023 	return (error);
2024 }
2025 
2026 /*
2027  * Suspend all lwps other than ourself in preparation for fork.
2028  */
2029 void
2030 suspend_fork()
2031 {
2032 	ulwp_t *self = curthread;
2033 	uberdata_t *udp = self->ul_uberdata;
2034 	ulwp_t *ulwp;
2035 	int link_dropped;
2036 
2037 	ASSERT(MUTEX_OWNED(&udp->fork_lock, self));
2038 top:
2039 	lmutex_lock(&udp->link_lock);
2040 
2041 	for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) {
2042 		ulwp_lock(ulwp, udp);
2043 		if (ulwp->ul_stop) {	/* already stopped */
2044 			ulwp->ul_stop |= TSTP_FORK;
2045 			ulwp_broadcast(ulwp);
2046 			ulwp_unlock(ulwp, udp);
2047 		} else {
2048 			/*
2049 			 * Move the stopped lwp out of a critical section.
2050 			 */
2051 			if (safe_suspend(ulwp, TSTP_FORK, &link_dropped) ||
2052 			    link_dropped)
2053 				goto top;
2054 		}
2055 	}
2056 
2057 	lmutex_unlock(&udp->link_lock);
2058 }
2059 
2060 void
2061 continue_fork(int child)
2062 {
2063 	ulwp_t *self = curthread;
2064 	uberdata_t *udp = self->ul_uberdata;
2065 	ulwp_t *ulwp;
2066 
2067 	ASSERT(MUTEX_OWNED(&udp->fork_lock, self));
2068 
2069 	/*
2070 	 * Clear the schedctl pointers in the child of forkall().
2071 	 */
2072 	if (child) {
2073 		for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) {
2074 			ulwp->ul_schedctl_called =
2075 			    ulwp->ul_dead? &udp->uberflags : NULL;
2076 			ulwp->ul_schedctl = NULL;
2077 		}
2078 	}
2079 
2080 	/*
2081 	 * Set all lwps that were stopped for fork() running again.
2082 	 */
2083 	lmutex_lock(&udp->link_lock);
2084 	for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) {
2085 		mutex_t *mp = ulwp_mutex(ulwp, udp);
2086 		lmutex_lock(mp);
2087 		ASSERT(ulwp->ul_stop & TSTP_FORK);
2088 		ulwp->ul_stop &= ~TSTP_FORK;
2089 		ulwp_broadcast(ulwp);
2090 		if (!ulwp->ul_stop)
2091 			force_continue(ulwp);
2092 		lmutex_unlock(mp);
2093 	}
2094 	lmutex_unlock(&udp->link_lock);
2095 }
2096 
2097 int
2098 _thrp_continue(thread_t tid, uchar_t whystopped)
2099 {
2100 	uberdata_t *udp = curthread->ul_uberdata;
2101 	ulwp_t *ulwp;
2102 	mutex_t *mp;
2103 	int error = 0;
2104 
2105 	ASSERT(whystopped == TSTP_REGULAR ||
2106 	    whystopped == TSTP_MUTATOR);
2107 
2108 	/*
2109 	 * We single-thread the entire thread suspend/continue mechanism.
2110 	 */
2111 	fork_lock_enter();
2112 
2113 	if ((ulwp = find_lwp(tid)) == NULL) {
2114 		fork_lock_exit();
2115 		return (ESRCH);
2116 	}
2117 
2118 	mp = ulwp_mutex(ulwp, udp);
2119 	if ((whystopped == TSTP_MUTATOR && !ulwp->ul_mutator)) {
2120 		error = EINVAL;
2121 	} else if (ulwp->ul_stop & whystopped) {
2122 		ulwp->ul_stop &= ~whystopped;
2123 		ulwp_broadcast(ulwp);
2124 		if (!ulwp->ul_stop) {
2125 			if (whystopped == TSTP_REGULAR && ulwp->ul_created) {
2126 				ulwp->ul_sp = 0;
2127 				ulwp->ul_created = 0;
2128 			}
2129 			force_continue(ulwp);
2130 		}
2131 	}
2132 	lmutex_unlock(mp);
2133 
2134 	fork_lock_exit();
2135 	return (error);
2136 }
2137 
2138 int
2139 thr_suspend(thread_t tid)
2140 {
2141 	return (_thrp_suspend(tid, TSTP_REGULAR));
2142 }
2143 
2144 int
2145 thr_continue(thread_t tid)
2146 {
2147 	return (_thrp_continue(tid, TSTP_REGULAR));
2148 }
2149 
2150 void
2151 thr_yield()
2152 {
2153 	yield();
2154 }
2155 
2156 #pragma weak pthread_kill = thr_kill
2157 #pragma weak _thr_kill = thr_kill
2158 int
2159 thr_kill(thread_t tid, int sig)
2160 {
2161 	if (sig == SIGCANCEL)
2162 		return (EINVAL);
2163 	return (_lwp_kill(tid, sig));
2164 }
2165 
2166 /*
2167  * Exit a critical section, take deferred actions if necessary.
2168  * Called from exit_critical() and from sigon().
2169  */
2170 void
2171 do_exit_critical()
2172 {
2173 	ulwp_t *self = curthread;
2174 	int sig;
2175 
2176 	ASSERT(self->ul_critical == 0);
2177 
2178 	/*
2179 	 * Don't suspend ourself or take a deferred signal while dying
2180 	 * or while executing inside the dynamic linker (ld.so.1).
2181 	 */
2182 	if (self->ul_dead || self->ul_rtld)
2183 		return;
2184 
2185 	while (self->ul_pleasestop ||
2186 	    (self->ul_cursig != 0 && self->ul_sigdefer == 0)) {
2187 		/*
2188 		 * Avoid a recursive call to exit_critical() in _thrp_suspend()
2189 		 * by keeping self->ul_critical == 1 here.
2190 		 */
2191 		self->ul_critical++;
2192 		while (self->ul_pleasestop) {
2193 			/*
2194 			 * Guard against suspending ourself while on a sleep
2195 			 * queue.  See the comments in call_user_handler().
2196 			 */
2197 			unsleep_self();
2198 			set_parking_flag(self, 0);
2199 			(void) _thrp_suspend(self->ul_lwpid,
2200 			    self->ul_pleasestop);
2201 		}
2202 		self->ul_critical--;
2203 
2204 		if ((sig = self->ul_cursig) != 0 && self->ul_sigdefer == 0) {
2205 			/*
2206 			 * Clear ul_cursig before proceeding.
2207 			 * This protects us from the dynamic linker's
2208 			 * calls to bind_guard()/bind_clear() in the
2209 			 * event that it is invoked to resolve a symbol
2210 			 * like take_deferred_signal() below.
2211 			 */
2212 			self->ul_cursig = 0;
2213 			take_deferred_signal(sig);
2214 			ASSERT(self->ul_cursig == 0);
2215 		}
2216 	}
2217 	ASSERT(self->ul_critical == 0);
2218 }
2219 
2220 /*
2221  * _ti_bind_guard() and _ti_bind_clear() are called by the dynamic linker
2222  * (ld.so.1) when it has do do something, like resolve a symbol to be called
2223  * by the application or one of its libraries.  _ti_bind_guard() is called
2224  * on entry to ld.so.1, _ti_bind_clear() on exit from ld.so.1 back to the
2225  * application.  The dynamic linker gets special dispensation from libc to
2226  * run in a critical region (all signals deferred and no thread suspension
2227  * or forking allowed), and to be immune from cancellation for the duration.
2228  */
2229 int
2230 _ti_bind_guard(int flags)
2231 {
2232 	ulwp_t *self = curthread;
2233 	uberdata_t *udp = self->ul_uberdata;
2234 	int bindflag = (flags & THR_FLG_RTLD);
2235 
2236 	if ((self->ul_bindflags & bindflag) == bindflag)
2237 		return (0);
2238 	self->ul_bindflags |= bindflag;
2239 	if ((flags & (THR_FLG_NOLOCK | THR_FLG_REENTER)) == THR_FLG_NOLOCK) {
2240 		sigoff(self);	/* see no signals while holding ld_lock */
2241 		self->ul_rtld++;	/* don't suspend while in ld.so.1 */
2242 		(void) mutex_lock(&udp->ld_lock);
2243 	}
2244 	enter_critical(self);
2245 	self->ul_save_state = self->ul_cancel_disabled;
2246 	self->ul_cancel_disabled = 1;
2247 	set_cancel_pending_flag(self, 0);
2248 	return (1);
2249 }
2250 
2251 int
2252 _ti_bind_clear(int flags)
2253 {
2254 	ulwp_t *self = curthread;
2255 	uberdata_t *udp = self->ul_uberdata;
2256 	int bindflag = (flags & THR_FLG_RTLD);
2257 
2258 	if ((self->ul_bindflags & bindflag) == 0)
2259 		return (self->ul_bindflags);
2260 	self->ul_bindflags &= ~bindflag;
2261 	self->ul_cancel_disabled = self->ul_save_state;
2262 	set_cancel_pending_flag(self, 0);
2263 	exit_critical(self);
2264 	if ((flags & (THR_FLG_NOLOCK | THR_FLG_REENTER)) == THR_FLG_NOLOCK) {
2265 		if (MUTEX_OWNED(&udp->ld_lock, self)) {
2266 			(void) mutex_unlock(&udp->ld_lock);
2267 			self->ul_rtld--;
2268 			sigon(self);	/* reenable signals */
2269 		}
2270 	}
2271 	return (self->ul_bindflags);
2272 }
2273 
2274 /*
2275  * Tell the dynamic linker (ld.so.1) whether or not it was entered from
2276  * a critical region in libc.  Return zero if not, else return non-zero.
2277  */
2278 int
2279 _ti_critical(void)
2280 {
2281 	ulwp_t *self = curthread;
2282 	int level = self->ul_critical;
2283 
2284 	if ((self->ul_bindflags & THR_FLG_RTLD) == 0 || level == 0)
2285 		return (level);	/* ld.so.1 hasn't (yet) called enter() */
2286 	return (level - 1);
2287 }
2288 
2289 /*
2290  * sigoff() and sigon() enable cond_wait() to behave (optionally) like
2291  * it does in the old libthread (see the comments in cond_wait_queue()).
2292  * Also, signals are deferred at thread startup until TLS constructors
2293  * have all been called, at which time _thrp_setup() calls sigon().
2294  *
2295  * _sigoff() and _sigon() are external consolidation-private interfaces to
2296  * sigoff() and sigon(), respectively, in libc.  These are used in libnsl.
2297  * Also, _sigoff() and _sigon() are called from dbx's run-time checking
2298  * (librtc.so) to defer signals during its critical sections (not to be
2299  * confused with libc critical sections [see exit_critical() above]).
2300  */
2301 void
2302 _sigoff(void)
2303 {
2304 	ulwp_t *self = curthread;
2305 
2306 	sigoff(self);
2307 }
2308 
2309 void
2310 _sigon(void)
2311 {
2312 	ulwp_t *self = curthread;
2313 
2314 	ASSERT(self->ul_sigdefer > 0);
2315 	sigon(self);
2316 }
2317 
2318 int
2319 thr_getconcurrency()
2320 {
2321 	return (thr_concurrency);
2322 }
2323 
2324 int
2325 pthread_getconcurrency()
2326 {
2327 	return (pthread_concurrency);
2328 }
2329 
2330 int
2331 thr_setconcurrency(int new_level)
2332 {
2333 	uberdata_t *udp = curthread->ul_uberdata;
2334 
2335 	if (new_level < 0)
2336 		return (EINVAL);
2337 	if (new_level > 65536)		/* 65536 is totally arbitrary */
2338 		return (EAGAIN);
2339 	lmutex_lock(&udp->link_lock);
2340 	if (new_level > thr_concurrency)
2341 		thr_concurrency = new_level;
2342 	lmutex_unlock(&udp->link_lock);
2343 	return (0);
2344 }
2345 
2346 int
2347 pthread_setconcurrency(int new_level)
2348 {
2349 	if (new_level < 0)
2350 		return (EINVAL);
2351 	if (new_level > 65536)		/* 65536 is totally arbitrary */
2352 		return (EAGAIN);
2353 	pthread_concurrency = new_level;
2354 	return (0);
2355 }
2356 
2357 size_t
2358 thr_min_stack(void)
2359 {
2360 	return (MINSTACK);
2361 }
2362 
2363 int
2364 __nthreads(void)
2365 {
2366 	return (curthread->ul_uberdata->nthreads);
2367 }
2368 
2369 /*
2370  * XXX
2371  * The remainder of this file implements the private interfaces to java for
2372  * garbage collection.  It is no longer used, at least by java 1.2.
2373  * It can all go away once all old JVMs have disappeared.
2374  */
2375 
2376 int	suspendingallmutators;	/* when non-zero, suspending all mutators. */
2377 int	suspendedallmutators;	/* when non-zero, all mutators suspended. */
2378 int	mutatorsbarrier;	/* when non-zero, mutators barrier imposed. */
2379 mutex_t	mutatorslock = DEFAULTMUTEX;	/* used to enforce mutators barrier. */
2380 cond_t	mutatorscv = DEFAULTCV;		/* where non-mutators sleep. */
2381 
2382 /*
2383  * Get the available register state for the target thread.
2384  * Return non-volatile registers: TRS_NONVOLATILE
2385  */
2386 #pragma weak _thr_getstate = thr_getstate
2387 int
2388 thr_getstate(thread_t tid, int *flag, lwpid_t *lwp, stack_t *ss, gregset_t rs)
2389 {
2390 	ulwp_t *self = curthread;
2391 	uberdata_t *udp = self->ul_uberdata;
2392 	ulwp_t **ulwpp;
2393 	ulwp_t *ulwp;
2394 	int error = 0;
2395 	int trs_flag = TRS_LWPID;
2396 
2397 	if (tid == 0 || self->ul_lwpid == tid) {
2398 		ulwp = self;
2399 		ulwp_lock(ulwp, udp);
2400 	} else if ((ulwpp = find_lwpp(tid)) != NULL) {
2401 		ulwp = *ulwpp;
2402 	} else {
2403 		if (flag)
2404 			*flag = TRS_INVALID;
2405 		return (ESRCH);
2406 	}
2407 
2408 	if (ulwp->ul_dead) {
2409 		trs_flag = TRS_INVALID;
2410 	} else if (!ulwp->ul_stop && !suspendedallmutators) {
2411 		error = EINVAL;
2412 		trs_flag = TRS_INVALID;
2413 	} else if (ulwp->ul_stop) {
2414 		trs_flag = TRS_NONVOLATILE;
2415 		getgregs(ulwp, rs);
2416 	}
2417 
2418 	if (flag)
2419 		*flag = trs_flag;
2420 	if (lwp)
2421 		*lwp = tid;
2422 	if (ss != NULL)
2423 		(void) _thrp_stksegment(ulwp, ss);
2424 
2425 	ulwp_unlock(ulwp, udp);
2426 	return (error);
2427 }
2428 
2429 /*
2430  * Set the appropriate register state for the target thread.
2431  * This is not used by java.  It exists solely for the MSTC test suite.
2432  */
2433 #pragma weak _thr_setstate = thr_setstate
2434 int
2435 thr_setstate(thread_t tid, int flag, gregset_t rs)
2436 {
2437 	uberdata_t *udp = curthread->ul_uberdata;
2438 	ulwp_t *ulwp;
2439 	int error = 0;
2440 
2441 	if ((ulwp = find_lwp(tid)) == NULL)
2442 		return (ESRCH);
2443 
2444 	if (!ulwp->ul_stop && !suspendedallmutators)
2445 		error = EINVAL;
2446 	else if (rs != NULL) {
2447 		switch (flag) {
2448 		case TRS_NONVOLATILE:
2449 			/* do /proc stuff here? */
2450 			if (ulwp->ul_stop)
2451 				setgregs(ulwp, rs);
2452 			else
2453 				error = EINVAL;
2454 			break;
2455 		case TRS_LWPID:		/* do /proc stuff here? */
2456 		default:
2457 			error = EINVAL;
2458 			break;
2459 		}
2460 	}
2461 
2462 	ulwp_unlock(ulwp, udp);
2463 	return (error);
2464 }
2465 
2466 int
2467 getlwpstatus(thread_t tid, struct lwpstatus *sp)
2468 {
2469 	extern ssize_t __pread(int, void *, size_t, off_t);
2470 	char buf[100];
2471 	int fd;
2472 
2473 	/* "/proc/self/lwp/%u/lwpstatus" w/o stdio */
2474 	(void) strcpy(buf, "/proc/self/lwp/");
2475 	ultos((uint64_t)tid, 10, buf + strlen(buf));
2476 	(void) strcat(buf, "/lwpstatus");
2477 	if ((fd = __open(buf, O_RDONLY, 0)) >= 0) {
2478 		while (__pread(fd, sp, sizeof (*sp), 0) == sizeof (*sp)) {
2479 			if (sp->pr_flags & PR_STOPPED) {
2480 				(void) __close(fd);
2481 				return (0);
2482 			}
2483 			yield();	/* give him a chance to stop */
2484 		}
2485 		(void) __close(fd);
2486 	}
2487 	return (-1);
2488 }
2489 
2490 int
2491 putlwpregs(thread_t tid, prgregset_t prp)
2492 {
2493 	extern ssize_t __writev(int, const struct iovec *, int);
2494 	char buf[100];
2495 	int fd;
2496 	long dstop_sreg[2];
2497 	long run_null[2];
2498 	iovec_t iov[3];
2499 
2500 	/* "/proc/self/lwp/%u/lwpctl" w/o stdio */
2501 	(void) strcpy(buf, "/proc/self/lwp/");
2502 	ultos((uint64_t)tid, 10, buf + strlen(buf));
2503 	(void) strcat(buf, "/lwpctl");
2504 	if ((fd = __open(buf, O_WRONLY, 0)) >= 0) {
2505 		dstop_sreg[0] = PCDSTOP;	/* direct it to stop */
2506 		dstop_sreg[1] = PCSREG;		/* set the registers */
2507 		iov[0].iov_base = (caddr_t)dstop_sreg;
2508 		iov[0].iov_len = sizeof (dstop_sreg);
2509 		iov[1].iov_base = (caddr_t)prp;	/* from the register set */
2510 		iov[1].iov_len = sizeof (prgregset_t);
2511 		run_null[0] = PCRUN;		/* make it runnable again */
2512 		run_null[1] = 0;
2513 		iov[2].iov_base = (caddr_t)run_null;
2514 		iov[2].iov_len = sizeof (run_null);
2515 		if (__writev(fd, iov, 3) >= 0) {
2516 			(void) __close(fd);
2517 			return (0);
2518 		}
2519 		(void) __close(fd);
2520 	}
2521 	return (-1);
2522 }
2523 
2524 static ulong_t
2525 gettsp_slow(thread_t tid)
2526 {
2527 	char buf[100];
2528 	struct lwpstatus status;
2529 
2530 	if (getlwpstatus(tid, &status) != 0) {
2531 		/* "__gettsp(%u): can't read lwpstatus" w/o stdio */
2532 		(void) strcpy(buf, "__gettsp(");
2533 		ultos((uint64_t)tid, 10, buf + strlen(buf));
2534 		(void) strcat(buf, "): can't read lwpstatus");
2535 		thr_panic(buf);
2536 	}
2537 	return (status.pr_reg[R_SP]);
2538 }
2539 
2540 ulong_t
2541 __gettsp(thread_t tid)
2542 {
2543 	uberdata_t *udp = curthread->ul_uberdata;
2544 	ulwp_t *ulwp;
2545 	ulong_t result;
2546 
2547 	if ((ulwp = find_lwp(tid)) == NULL)
2548 		return (0);
2549 
2550 	if (ulwp->ul_stop && (result = ulwp->ul_sp) != 0) {
2551 		ulwp_unlock(ulwp, udp);
2552 		return (result);
2553 	}
2554 
2555 	result = gettsp_slow(tid);
2556 	ulwp_unlock(ulwp, udp);
2557 	return (result);
2558 }
2559 
2560 /*
2561  * This tells java stack walkers how to find the ucontext
2562  * structure passed to signal handlers.
2563  */
2564 #pragma weak _thr_sighndlrinfo = thr_sighndlrinfo
2565 void
2566 thr_sighndlrinfo(void (**func)(), int *funcsize)
2567 {
2568 	*func = &__sighndlr;
2569 	*funcsize = (char *)&__sighndlrend - (char *)&__sighndlr;
2570 }
2571 
2572 /*
2573  * Mark a thread a mutator or reset a mutator to being a default,
2574  * non-mutator thread.
2575  */
2576 #pragma weak _thr_setmutator = thr_setmutator
2577 int
2578 thr_setmutator(thread_t tid, int enabled)
2579 {
2580 	ulwp_t *self = curthread;
2581 	uberdata_t *udp = self->ul_uberdata;
2582 	ulwp_t *ulwp;
2583 	int error;
2584 	int cancel_state;
2585 
2586 	enabled = enabled? 1 : 0;
2587 top:
2588 	if (tid == 0) {
2589 		ulwp = self;
2590 		ulwp_lock(ulwp, udp);
2591 	} else if ((ulwp = find_lwp(tid)) == NULL) {
2592 		return (ESRCH);
2593 	}
2594 
2595 	/*
2596 	 * The target thread should be the caller itself or a suspended thread.
2597 	 * This prevents the target from also changing its ul_mutator field.
2598 	 */
2599 	error = 0;
2600 	if (ulwp != self && !ulwp->ul_stop && enabled)
2601 		error = EINVAL;
2602 	else if (ulwp->ul_mutator != enabled) {
2603 		lmutex_lock(&mutatorslock);
2604 		if (mutatorsbarrier) {
2605 			ulwp_unlock(ulwp, udp);
2606 			(void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE,
2607 			    &cancel_state);
2608 			while (mutatorsbarrier)
2609 				(void) cond_wait(&mutatorscv, &mutatorslock);
2610 			(void) pthread_setcancelstate(cancel_state, NULL);
2611 			lmutex_unlock(&mutatorslock);
2612 			goto top;
2613 		}
2614 		ulwp->ul_mutator = enabled;
2615 		lmutex_unlock(&mutatorslock);
2616 	}
2617 
2618 	ulwp_unlock(ulwp, udp);
2619 	return (error);
2620 }
2621 
2622 /*
2623  * Establish a barrier against new mutators.  Any non-mutator trying
2624  * to become a mutator is suspended until the barrier is removed.
2625  */
2626 #pragma weak _thr_mutators_barrier = thr_mutators_barrier
2627 void
2628 thr_mutators_barrier(int enabled)
2629 {
2630 	int oldvalue;
2631 	int cancel_state;
2632 
2633 	lmutex_lock(&mutatorslock);
2634 
2635 	/*
2636 	 * Wait if trying to set the barrier while it is already set.
2637 	 */
2638 	(void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cancel_state);
2639 	while (mutatorsbarrier && enabled)
2640 		(void) cond_wait(&mutatorscv, &mutatorslock);
2641 	(void) pthread_setcancelstate(cancel_state, NULL);
2642 
2643 	oldvalue = mutatorsbarrier;
2644 	mutatorsbarrier = enabled;
2645 	/*
2646 	 * Wakeup any blocked non-mutators when barrier is removed.
2647 	 */
2648 	if (oldvalue && !enabled)
2649 		(void) cond_broadcast(&mutatorscv);
2650 	lmutex_unlock(&mutatorslock);
2651 }
2652 
2653 /*
2654  * Suspend the set of all mutators except for the caller.  The list
2655  * of actively running threads is searched and only the mutators
2656  * in this list are suspended.  Actively running non-mutators remain
2657  * running.  Any other thread is suspended.
2658  */
2659 #pragma weak _thr_suspend_allmutators = thr_suspend_allmutators
2660 int
2661 thr_suspend_allmutators(void)
2662 {
2663 	ulwp_t *self = curthread;
2664 	uberdata_t *udp = self->ul_uberdata;
2665 	ulwp_t *ulwp;
2666 	int link_dropped;
2667 
2668 	/*
2669 	 * We single-thread the entire thread suspend/continue mechanism.
2670 	 */
2671 	fork_lock_enter();
2672 
2673 top:
2674 	lmutex_lock(&udp->link_lock);
2675 
2676 	if (suspendingallmutators || suspendedallmutators) {
2677 		lmutex_unlock(&udp->link_lock);
2678 		fork_lock_exit();
2679 		return (EINVAL);
2680 	}
2681 	suspendingallmutators = 1;
2682 
2683 	for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) {
2684 		ulwp_lock(ulwp, udp);
2685 		if (!ulwp->ul_mutator) {
2686 			ulwp_unlock(ulwp, udp);
2687 		} else if (ulwp->ul_stop) {	/* already stopped */
2688 			ulwp->ul_stop |= TSTP_MUTATOR;
2689 			ulwp_broadcast(ulwp);
2690 			ulwp_unlock(ulwp, udp);
2691 		} else {
2692 			/*
2693 			 * Move the stopped lwp out of a critical section.
2694 			 */
2695 			if (safe_suspend(ulwp, TSTP_MUTATOR, &link_dropped) ||
2696 			    link_dropped) {
2697 				suspendingallmutators = 0;
2698 				goto top;
2699 			}
2700 		}
2701 	}
2702 
2703 	suspendedallmutators = 1;
2704 	suspendingallmutators = 0;
2705 	lmutex_unlock(&udp->link_lock);
2706 	fork_lock_exit();
2707 	return (0);
2708 }
2709 
2710 /*
2711  * Suspend the target mutator.  The caller is permitted to suspend
2712  * itself.  If a mutator barrier is enabled, the caller will suspend
2713  * itself as though it had been suspended by thr_suspend_allmutators().
2714  * When the barrier is removed, this thread will be resumed.  Any
2715  * suspended mutator, whether suspended by thr_suspend_mutator(), or by
2716  * thr_suspend_allmutators(), can be resumed by thr_continue_mutator().
2717  */
2718 #pragma weak _thr_suspend_mutator = thr_suspend_mutator
2719 int
2720 thr_suspend_mutator(thread_t tid)
2721 {
2722 	if (tid == 0)
2723 		tid = curthread->ul_lwpid;
2724 	return (_thrp_suspend(tid, TSTP_MUTATOR));
2725 }
2726 
2727 /*
2728  * Resume the set of all suspended mutators.
2729  */
2730 #pragma weak _thr_continue_allmutators = thr_continue_allmutators
2731 int
2732 thr_continue_allmutators()
2733 {
2734 	ulwp_t *self = curthread;
2735 	uberdata_t *udp = self->ul_uberdata;
2736 	ulwp_t *ulwp;
2737 
2738 	/*
2739 	 * We single-thread the entire thread suspend/continue mechanism.
2740 	 */
2741 	fork_lock_enter();
2742 
2743 	lmutex_lock(&udp->link_lock);
2744 	if (!suspendedallmutators) {
2745 		lmutex_unlock(&udp->link_lock);
2746 		fork_lock_exit();
2747 		return (EINVAL);
2748 	}
2749 	suspendedallmutators = 0;
2750 
2751 	for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) {
2752 		mutex_t *mp = ulwp_mutex(ulwp, udp);
2753 		lmutex_lock(mp);
2754 		if (ulwp->ul_stop & TSTP_MUTATOR) {
2755 			ulwp->ul_stop &= ~TSTP_MUTATOR;
2756 			ulwp_broadcast(ulwp);
2757 			if (!ulwp->ul_stop)
2758 				force_continue(ulwp);
2759 		}
2760 		lmutex_unlock(mp);
2761 	}
2762 
2763 	lmutex_unlock(&udp->link_lock);
2764 	fork_lock_exit();
2765 	return (0);
2766 }
2767 
2768 /*
2769  * Resume a suspended mutator.
2770  */
2771 #pragma weak _thr_continue_mutator = thr_continue_mutator
2772 int
2773 thr_continue_mutator(thread_t tid)
2774 {
2775 	return (_thrp_continue(tid, TSTP_MUTATOR));
2776 }
2777 
2778 #pragma weak _thr_wait_mutator = thr_wait_mutator
2779 int
2780 thr_wait_mutator(thread_t tid, int dontwait)
2781 {
2782 	uberdata_t *udp = curthread->ul_uberdata;
2783 	ulwp_t *ulwp;
2784 	int cancel_state;
2785 	int error = 0;
2786 
2787 	(void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cancel_state);
2788 top:
2789 	if ((ulwp = find_lwp(tid)) == NULL) {
2790 		(void) pthread_setcancelstate(cancel_state, NULL);
2791 		return (ESRCH);
2792 	}
2793 
2794 	if (!ulwp->ul_mutator)
2795 		error = EINVAL;
2796 	else if (dontwait) {
2797 		if (!(ulwp->ul_stop & TSTP_MUTATOR))
2798 			error = EWOULDBLOCK;
2799 	} else if (!(ulwp->ul_stop & TSTP_MUTATOR)) {
2800 		cond_t *cvp = ulwp_condvar(ulwp, udp);
2801 		mutex_t *mp = ulwp_mutex(ulwp, udp);
2802 
2803 		(void) cond_wait(cvp, mp);
2804 		(void) lmutex_unlock(mp);
2805 		goto top;
2806 	}
2807 
2808 	ulwp_unlock(ulwp, udp);
2809 	(void) pthread_setcancelstate(cancel_state, NULL);
2810 	return (error);
2811 }
2812 
2813 /* PROBE_SUPPORT begin */
2814 
2815 void
2816 thr_probe_setup(void *data)
2817 {
2818 	curthread->ul_tpdp = data;
2819 }
2820 
2821 static void *
2822 _thread_probe_getfunc()
2823 {
2824 	return (curthread->ul_tpdp);
2825 }
2826 
2827 void * (*thr_probe_getfunc_addr)(void) = _thread_probe_getfunc;
2828 
2829 /* ARGSUSED */
2830 void
2831 _resume(ulwp_t *ulwp, caddr_t sp, int dontsave)
2832 {
2833 	/* never called */
2834 }
2835 
2836 /* ARGSUSED */
2837 void
2838 _resume_ret(ulwp_t *oldlwp)
2839 {
2840 	/* never called */
2841 }
2842 
2843 /* PROBE_SUPPORT end */
2844