17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
57257d1b4Sraf  * Common Development and Distribution License (the "License").
67257d1b4Sraf  * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate  *
87c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate  * and limitations under the License.
127c478bd9Sstevel@tonic-gate  *
137c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate  *
197c478bd9Sstevel@tonic-gate  * CDDL HEADER END
207c478bd9Sstevel@tonic-gate  */
217257d1b4Sraf 
227c478bd9Sstevel@tonic-gate /*
23*49b225e1SGavin Maltby  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
247c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
257c478bd9Sstevel@tonic-gate  */
267c478bd9Sstevel@tonic-gate 
277257d1b4Sraf #include "lint.h"
287c478bd9Sstevel@tonic-gate #include "thr_uberdata.h"
297c478bd9Sstevel@tonic-gate #include "libc.h"
307c478bd9Sstevel@tonic-gate 
317c478bd9Sstevel@tonic-gate #include <alloca.h>
327c478bd9Sstevel@tonic-gate #include <unistd.h>
337c478bd9Sstevel@tonic-gate #include <thread.h>
347c478bd9Sstevel@tonic-gate #include <pthread.h>
357c478bd9Sstevel@tonic-gate #include <stdio.h>
367c478bd9Sstevel@tonic-gate #include <errno.h>
377c478bd9Sstevel@tonic-gate #include <door.h>
387c478bd9Sstevel@tonic-gate #include <signal.h>
397c478bd9Sstevel@tonic-gate #include <ucred.h>
40*49b225e1SGavin Maltby #include <strings.h>
41*49b225e1SGavin Maltby #include <ucontext.h>
427c478bd9Sstevel@tonic-gate #include <sys/ucred.h>
43*49b225e1SGavin Maltby #include <atomic.h>
447c478bd9Sstevel@tonic-gate 
457c478bd9Sstevel@tonic-gate static door_server_func_t door_create_server;
467c478bd9Sstevel@tonic-gate 
477c478bd9Sstevel@tonic-gate /*
487c478bd9Sstevel@tonic-gate  * Global state -- the non-statics are accessed from the __door_return()
497c478bd9Sstevel@tonic-gate  * syscall wrapper.
507c478bd9Sstevel@tonic-gate  */
517c478bd9Sstevel@tonic-gate static mutex_t		door_state_lock = DEFAULTMUTEX;
527c478bd9Sstevel@tonic-gate door_server_func_t	*door_server_func = door_create_server;
537c478bd9Sstevel@tonic-gate pid_t			door_create_pid = 0;
547c478bd9Sstevel@tonic-gate static pid_t		door_create_first_pid = 0;
557c478bd9Sstevel@tonic-gate static pid_t		door_create_unref_pid = 0;
567c478bd9Sstevel@tonic-gate 
577c478bd9Sstevel@tonic-gate /*
587c478bd9Sstevel@tonic-gate  * The raw system call interfaces
597c478bd9Sstevel@tonic-gate  */
607c478bd9Sstevel@tonic-gate extern int __door_create(void (*)(void *, char *, size_t, door_desc_t *,
617c478bd9Sstevel@tonic-gate     uint_t), void *, uint_t);
627c478bd9Sstevel@tonic-gate extern int __door_return(caddr_t, size_t, door_return_desc_t *, caddr_t,
637c478bd9Sstevel@tonic-gate     size_t);
647c478bd9Sstevel@tonic-gate extern int __door_ucred(ucred_t *);
657c478bd9Sstevel@tonic-gate extern int __door_unref(void);
66*49b225e1SGavin Maltby extern int __door_unbind(void);
67*49b225e1SGavin Maltby 
68*49b225e1SGavin Maltby /*
69*49b225e1SGavin Maltby  * Key for per-door data for doors created with door_xcreate.
70*49b225e1SGavin Maltby  */
71*49b225e1SGavin Maltby static pthread_key_t privdoor_key = PTHREAD_ONCE_KEY_NP;
72*49b225e1SGavin Maltby 
73*49b225e1SGavin Maltby /*
74*49b225e1SGavin Maltby  * Each door_xcreate'd door has a struct privdoor_data allocated for it,
75*49b225e1SGavin Maltby  * and each of the initial pool of service threads for the door
76*49b225e1SGavin Maltby  * has TSD for the privdoor_key set to point to this structure.
77*49b225e1SGavin Maltby  * When a thread in door_return decides it is time to perform a
78*49b225e1SGavin Maltby  * thread depletion callback we can retrieve this door information
79*49b225e1SGavin Maltby  * via a TSD lookup on the privdoor key.
80*49b225e1SGavin Maltby  */
81*49b225e1SGavin Maltby struct privdoor_data {
82*49b225e1SGavin Maltby 	int pd_dfd;
83*49b225e1SGavin Maltby 	door_id_t pd_uniqid;
84*49b225e1SGavin Maltby 	volatile uint32_t pd_refcnt;
85*49b225e1SGavin Maltby 	door_xcreate_server_func_t *pd_crf;
86*49b225e1SGavin Maltby 	void *pd_crcookie;
87*49b225e1SGavin Maltby 	door_xcreate_thrsetup_func_t *pd_setupf;
88*49b225e1SGavin Maltby };
89*49b225e1SGavin Maltby 
90*49b225e1SGavin Maltby static int door_xcreate_n(door_info_t *, struct privdoor_data *, int);
91*49b225e1SGavin Maltby 
92*49b225e1SGavin Maltby /*
93*49b225e1SGavin Maltby  * door_create_cmn holds the privdoor data before kicking off server
94*49b225e1SGavin Maltby  * thread creation, all of which must succeed; if they don't then
95*49b225e1SGavin Maltby  * they return leaving the refcnt unchanged overall, and door_create_cmn
96*49b225e1SGavin Maltby  * releases its hold after revoking the door and we're done.  Otherwise
97*49b225e1SGavin Maltby  * all n threads created add one each to the refcnt, and door_create_cmn
98*49b225e1SGavin Maltby  * drops its hold.  If and when a server thread exits the key destructor
99*49b225e1SGavin Maltby  * function will be called, and we use that to decrement the reference
100*49b225e1SGavin Maltby  * count.  We also decrement the reference count on door_unbind().
101*49b225e1SGavin Maltby  * If ever we get the reference count to 0 then we will free that data.
102*49b225e1SGavin Maltby  */
103*49b225e1SGavin Maltby static void
104*49b225e1SGavin Maltby privdoor_data_hold(struct privdoor_data *pdd)
105*49b225e1SGavin Maltby {
106*49b225e1SGavin Maltby 	atomic_inc_32(&pdd->pd_refcnt);
107*49b225e1SGavin Maltby }
108*49b225e1SGavin Maltby 
109*49b225e1SGavin Maltby static void
110*49b225e1SGavin Maltby privdoor_data_rele(struct privdoor_data *pdd)
111*49b225e1SGavin Maltby {
112*49b225e1SGavin Maltby 	if (atomic_dec_32_nv(&pdd->pd_refcnt) == 0)
113*49b225e1SGavin Maltby 		free(pdd);
114*49b225e1SGavin Maltby }
115*49b225e1SGavin Maltby 
116*49b225e1SGavin Maltby void
117*49b225e1SGavin Maltby privdoor_destructor(void *data)
118*49b225e1SGavin Maltby {
119*49b225e1SGavin Maltby 	privdoor_data_rele((struct privdoor_data *)data);
120*49b225e1SGavin Maltby }
1217c478bd9Sstevel@tonic-gate 
1227c478bd9Sstevel@tonic-gate /*
1237c478bd9Sstevel@tonic-gate  * We park the ourselves in the kernel to serve as the "caller" for
1247c478bd9Sstevel@tonic-gate  * unreferenced upcalls for this process.  If the call returns with
1257c478bd9Sstevel@tonic-gate  * EINTR (e.g., someone did a forkall), we repeat as long as we're still
1267c478bd9Sstevel@tonic-gate  * in the parent.  If the child creates an unref door it will create
1277c478bd9Sstevel@tonic-gate  * a new thread.
1287c478bd9Sstevel@tonic-gate  */
1297c478bd9Sstevel@tonic-gate static void *
1307c478bd9Sstevel@tonic-gate door_unref_func(void *arg)
1317c478bd9Sstevel@tonic-gate {
1327c478bd9Sstevel@tonic-gate 	pid_t mypid = (pid_t)(uintptr_t)arg;
1337c478bd9Sstevel@tonic-gate 
1347c478bd9Sstevel@tonic-gate 	sigset_t fillset;
1357c478bd9Sstevel@tonic-gate 
1367c478bd9Sstevel@tonic-gate 	/* mask signals before diving into the kernel */
1377c478bd9Sstevel@tonic-gate 	(void) sigfillset(&fillset);
1387c478bd9Sstevel@tonic-gate 	(void) thr_sigsetmask(SIG_SETMASK, &fillset, NULL);
1397c478bd9Sstevel@tonic-gate 
1407c478bd9Sstevel@tonic-gate 	while (getpid() == mypid && __door_unref() && errno == EINTR)
1417c478bd9Sstevel@tonic-gate 		continue;
1427c478bd9Sstevel@tonic-gate 
1437c478bd9Sstevel@tonic-gate 	return (NULL);
1447c478bd9Sstevel@tonic-gate }
1457c478bd9Sstevel@tonic-gate 
146*49b225e1SGavin Maltby static int
147*49b225e1SGavin Maltby door_create_cmn(door_server_procedure_t *f, void *cookie, uint_t flags,
148*49b225e1SGavin Maltby     door_xcreate_server_func_t *crf, door_xcreate_thrsetup_func_t *setupf,
149*49b225e1SGavin Maltby     void *crcookie, int nthread)
1507c478bd9Sstevel@tonic-gate {
1517c478bd9Sstevel@tonic-gate 	int d;
1527c478bd9Sstevel@tonic-gate 
1537c478bd9Sstevel@tonic-gate 	int is_private = (flags & DOOR_PRIVATE);
1547c478bd9Sstevel@tonic-gate 	int is_unref = (flags & (DOOR_UNREF | DOOR_UNREF_MULTI));
1557c478bd9Sstevel@tonic-gate 	int do_create_first = 0;
1567c478bd9Sstevel@tonic-gate 	int do_create_unref = 0;
1577c478bd9Sstevel@tonic-gate 
1587c478bd9Sstevel@tonic-gate 	ulwp_t *self = curthread;
1597c478bd9Sstevel@tonic-gate 
1607c478bd9Sstevel@tonic-gate 	pid_t mypid;
1617c478bd9Sstevel@tonic-gate 
1627c478bd9Sstevel@tonic-gate 	if (self->ul_vfork) {
1637c478bd9Sstevel@tonic-gate 		errno = ENOTSUP;
1647c478bd9Sstevel@tonic-gate 		return (-1);
1657c478bd9Sstevel@tonic-gate 	}
1667c478bd9Sstevel@tonic-gate 
167*49b225e1SGavin Maltby 	if (crf)
168*49b225e1SGavin Maltby 		flags |= DOOR_PRIVCREATE;
169*49b225e1SGavin Maltby 
1707c478bd9Sstevel@tonic-gate 	/*
1717c478bd9Sstevel@tonic-gate 	 * Doors are associated with the processes which created them.  In
1727c478bd9Sstevel@tonic-gate 	 * the face of forkall(), this gets quite complicated.  To simplify
1737c478bd9Sstevel@tonic-gate 	 * it somewhat, we include the call to __door_create() in a critical
1747c478bd9Sstevel@tonic-gate 	 * section, and figure out what additional actions to take while
1757c478bd9Sstevel@tonic-gate 	 * still in the critical section.
1767c478bd9Sstevel@tonic-gate 	 */
1777c478bd9Sstevel@tonic-gate 	enter_critical(self);
1787c478bd9Sstevel@tonic-gate 	if ((d = __door_create(f, cookie, flags)) < 0) {
1797c478bd9Sstevel@tonic-gate 		exit_critical(self);
180*49b225e1SGavin Maltby 		return (-1);	/* errno is set */
1817c478bd9Sstevel@tonic-gate 	}
1827c478bd9Sstevel@tonic-gate 	mypid = getpid();
1837c478bd9Sstevel@tonic-gate 	if (mypid != door_create_pid ||
1847c478bd9Sstevel@tonic-gate 	    (!is_private && mypid != door_create_first_pid) ||
1857c478bd9Sstevel@tonic-gate 	    (is_unref && mypid != door_create_unref_pid)) {
1867c478bd9Sstevel@tonic-gate 
1877c478bd9Sstevel@tonic-gate 		lmutex_lock(&door_state_lock);
1887c478bd9Sstevel@tonic-gate 		door_create_pid = mypid;
1897c478bd9Sstevel@tonic-gate 
1907c478bd9Sstevel@tonic-gate 		if (!is_private && mypid != door_create_first_pid) {
1917c478bd9Sstevel@tonic-gate 			do_create_first = 1;
1927c478bd9Sstevel@tonic-gate 			door_create_first_pid = mypid;
1937c478bd9Sstevel@tonic-gate 		}
1947c478bd9Sstevel@tonic-gate 		if (is_unref && mypid != door_create_unref_pid) {
1957c478bd9Sstevel@tonic-gate 			do_create_unref = 1;
1967c478bd9Sstevel@tonic-gate 			door_create_unref_pid = mypid;
1977c478bd9Sstevel@tonic-gate 		}
1987c478bd9Sstevel@tonic-gate 		lmutex_unlock(&door_state_lock);
1997c478bd9Sstevel@tonic-gate 	}
2007c478bd9Sstevel@tonic-gate 	exit_critical(self);
2017c478bd9Sstevel@tonic-gate 
2027c478bd9Sstevel@tonic-gate 	if (do_create_unref) {
2037c478bd9Sstevel@tonic-gate 		/*
2047c478bd9Sstevel@tonic-gate 		 * Create an unref thread the first time we create an
2057c478bd9Sstevel@tonic-gate 		 * unref door for this process.  Create it as a daemon
2067c478bd9Sstevel@tonic-gate 		 * thread, so that it doesn't interfere with normal exit
2077c478bd9Sstevel@tonic-gate 		 * processing.
2087c478bd9Sstevel@tonic-gate 		 */
2097c478bd9Sstevel@tonic-gate 		(void) thr_create(NULL, 0, door_unref_func,
2107c478bd9Sstevel@tonic-gate 		    (void *)(uintptr_t)mypid, THR_DAEMON, NULL);
2117c478bd9Sstevel@tonic-gate 	}
2127c478bd9Sstevel@tonic-gate 
2137c478bd9Sstevel@tonic-gate 	if (is_private) {
2147c478bd9Sstevel@tonic-gate 		door_info_t di;
2157c478bd9Sstevel@tonic-gate 
216*49b225e1SGavin Maltby 		/*
217*49b225e1SGavin Maltby 		 * Create the first thread(s) for this private door.
218*49b225e1SGavin Maltby 		 */
2197c478bd9Sstevel@tonic-gate 		if (__door_info(d, &di) < 0)
220*49b225e1SGavin Maltby 			return (-1);	/* errno is set */
221*49b225e1SGavin Maltby 
222*49b225e1SGavin Maltby 		/*
223*49b225e1SGavin Maltby 		 * This key must be available for lookup for all private
224*49b225e1SGavin Maltby 		 * door threads, whether associated with a door created via
225*49b225e1SGavin Maltby 		 * door_create or door_xcreate.
226*49b225e1SGavin Maltby 		 */
227*49b225e1SGavin Maltby 		(void) pthread_key_create_once_np(&privdoor_key,
228*49b225e1SGavin Maltby 		    privdoor_destructor);
229*49b225e1SGavin Maltby 
230*49b225e1SGavin Maltby 		if (crf == NULL) {
2317c478bd9Sstevel@tonic-gate 			(*door_server_func)(&di);
232*49b225e1SGavin Maltby 		} else {
233*49b225e1SGavin Maltby 			struct privdoor_data *pdd = malloc(sizeof (*pdd));
234*49b225e1SGavin Maltby 
235*49b225e1SGavin Maltby 			if (pdd == NULL) {
236*49b225e1SGavin Maltby 				(void) door_revoke(d);
237*49b225e1SGavin Maltby 				errno = ENOMEM;
238*49b225e1SGavin Maltby 				return (-1);
239*49b225e1SGavin Maltby 			}
240*49b225e1SGavin Maltby 
241*49b225e1SGavin Maltby 			pdd->pd_dfd = d;
242*49b225e1SGavin Maltby 			pdd->pd_uniqid = di.di_uniquifier;
243*49b225e1SGavin Maltby 			pdd->pd_refcnt = 1; /* prevent free during xcreate_n */
244*49b225e1SGavin Maltby 			pdd->pd_crf = crf;
245*49b225e1SGavin Maltby 			pdd->pd_crcookie = crcookie;
246*49b225e1SGavin Maltby 			pdd->pd_setupf = setupf;
247*49b225e1SGavin Maltby 
248*49b225e1SGavin Maltby 			if (!door_xcreate_n(&di, pdd, nthread)) {
249*49b225e1SGavin Maltby 				int errnocp = errno;
250*49b225e1SGavin Maltby 
251*49b225e1SGavin Maltby 				(void) door_revoke(d);
252*49b225e1SGavin Maltby 				privdoor_data_rele(pdd);
253*49b225e1SGavin Maltby 				errno = errnocp;
254*49b225e1SGavin Maltby 				return (-1);
255*49b225e1SGavin Maltby 			} else {
256*49b225e1SGavin Maltby 				privdoor_data_rele(pdd);
257*49b225e1SGavin Maltby 			}
258*49b225e1SGavin Maltby 		}
259*49b225e1SGavin Maltby 	} else if (do_create_first) {
260*49b225e1SGavin Maltby 		/* First non-private door created in the process */
261*49b225e1SGavin Maltby 		(*door_server_func)(NULL);
2627c478bd9Sstevel@tonic-gate 	}
2637c478bd9Sstevel@tonic-gate 
2647c478bd9Sstevel@tonic-gate 	return (d);
2657c478bd9Sstevel@tonic-gate }
2667c478bd9Sstevel@tonic-gate 
2677c478bd9Sstevel@tonic-gate int
268*49b225e1SGavin Maltby door_create(door_server_procedure_t *f, void *cookie, uint_t flags)
269*49b225e1SGavin Maltby {
270*49b225e1SGavin Maltby 	if (flags & (DOOR_NO_DEPLETION_CB | DOOR_PRIVCREATE)) {
271*49b225e1SGavin Maltby 		errno = EINVAL;
272*49b225e1SGavin Maltby 		return (-1);
273*49b225e1SGavin Maltby 	}
274*49b225e1SGavin Maltby 
275*49b225e1SGavin Maltby 	return (door_create_cmn(f, cookie, flags, NULL, NULL, NULL, 1));
276*49b225e1SGavin Maltby }
277*49b225e1SGavin Maltby 
278*49b225e1SGavin Maltby int
279*49b225e1SGavin Maltby door_xcreate(door_server_procedure_t *f, void *cookie, uint_t flags,
280*49b225e1SGavin Maltby     door_xcreate_server_func_t *crf, door_xcreate_thrsetup_func_t *setupf,
281*49b225e1SGavin Maltby     void *crcookie, int nthread)
282*49b225e1SGavin Maltby {
283*49b225e1SGavin Maltby 	if (flags & DOOR_PRIVCREATE || nthread < 1 || crf == NULL) {
284*49b225e1SGavin Maltby 		errno = EINVAL;
285*49b225e1SGavin Maltby 		return (-1);
286*49b225e1SGavin Maltby 	}
287*49b225e1SGavin Maltby 
288*49b225e1SGavin Maltby 	return (door_create_cmn(f, cookie, flags | DOOR_PRIVATE,
289*49b225e1SGavin Maltby 	    crf, setupf, crcookie, nthread));
290*49b225e1SGavin Maltby }
291*49b225e1SGavin Maltby 
292*49b225e1SGavin Maltby int
2937c478bd9Sstevel@tonic-gate door_ucred(ucred_t **uc)
2947c478bd9Sstevel@tonic-gate {
2957c478bd9Sstevel@tonic-gate 	ucred_t *ucp = *uc;
2967c478bd9Sstevel@tonic-gate 
2977c478bd9Sstevel@tonic-gate 	if (ucp == NULL) {
2987c478bd9Sstevel@tonic-gate 		ucp = _ucred_alloc();
2997c478bd9Sstevel@tonic-gate 		if (ucp == NULL)
3007c478bd9Sstevel@tonic-gate 			return (-1);
3017c478bd9Sstevel@tonic-gate 	}
3027c478bd9Sstevel@tonic-gate 
3037c478bd9Sstevel@tonic-gate 	if (__door_ucred(ucp) != 0) {
3047c478bd9Sstevel@tonic-gate 		if (*uc == NULL)
3057c478bd9Sstevel@tonic-gate 			ucred_free(ucp);
3067c478bd9Sstevel@tonic-gate 		return (-1);
3077c478bd9Sstevel@tonic-gate 	}
3087c478bd9Sstevel@tonic-gate 
3097c478bd9Sstevel@tonic-gate 	*uc = ucp;
3107c478bd9Sstevel@tonic-gate 
3117c478bd9Sstevel@tonic-gate 	return (0);
3127c478bd9Sstevel@tonic-gate }
3137c478bd9Sstevel@tonic-gate 
3147c478bd9Sstevel@tonic-gate int
3157c478bd9Sstevel@tonic-gate door_cred(door_cred_t *dc)
3167c478bd9Sstevel@tonic-gate {
3177c478bd9Sstevel@tonic-gate 	/*
3187c478bd9Sstevel@tonic-gate 	 * Ucred size is small and alloca is fast
3197c478bd9Sstevel@tonic-gate 	 * and cannot fail.
3207c478bd9Sstevel@tonic-gate 	 */
3217c478bd9Sstevel@tonic-gate 	ucred_t *ucp = alloca(ucred_size());
3227c478bd9Sstevel@tonic-gate 	int ret;
3237c478bd9Sstevel@tonic-gate 
3247c478bd9Sstevel@tonic-gate 	if ((ret = __door_ucred(ucp)) == 0) {
3257c478bd9Sstevel@tonic-gate 		dc->dc_euid = ucred_geteuid(ucp);
3267c478bd9Sstevel@tonic-gate 		dc->dc_ruid = ucred_getruid(ucp);
3277c478bd9Sstevel@tonic-gate 		dc->dc_egid = ucred_getegid(ucp);
3287c478bd9Sstevel@tonic-gate 		dc->dc_rgid = ucred_getrgid(ucp);
3297c478bd9Sstevel@tonic-gate 		dc->dc_pid = ucred_getpid(ucp);
3307c478bd9Sstevel@tonic-gate 	}
3317c478bd9Sstevel@tonic-gate 	return (ret);
3327c478bd9Sstevel@tonic-gate }
3337c478bd9Sstevel@tonic-gate 
3347c478bd9Sstevel@tonic-gate int
335*49b225e1SGavin Maltby door_unbind(void)
336*49b225e1SGavin Maltby {
337*49b225e1SGavin Maltby 	struct privdoor_data *pdd;
338*49b225e1SGavin Maltby 	int rv = __door_unbind();
339*49b225e1SGavin Maltby 
340*49b225e1SGavin Maltby 	/*
341*49b225e1SGavin Maltby 	 * If we were indeed bound to the door then check to see whether
342*49b225e1SGavin Maltby 	 * we are part of a door_xcreate'd door by checking for our TSD.
343*49b225e1SGavin Maltby 	 * If so, then clear the TSD for this key to avoid destructor
344*49b225e1SGavin Maltby 	 * callback on future thread exit, and release the private door data.
345*49b225e1SGavin Maltby 	 */
346*49b225e1SGavin Maltby 	if (rv == 0 && (pdd = pthread_getspecific(privdoor_key)) != NULL) {
347*49b225e1SGavin Maltby 		(void) pthread_setspecific(privdoor_key, NULL);
348*49b225e1SGavin Maltby 		privdoor_data_rele(pdd);
349*49b225e1SGavin Maltby 	}
350*49b225e1SGavin Maltby 
351*49b225e1SGavin Maltby 	return (rv);
352*49b225e1SGavin Maltby }
353*49b225e1SGavin Maltby 
354*49b225e1SGavin Maltby int
3557c478bd9Sstevel@tonic-gate door_return(char *data_ptr, size_t data_size,
3567c478bd9Sstevel@tonic-gate     door_desc_t *desc_ptr, uint_t num_desc)
3577c478bd9Sstevel@tonic-gate {
3587c478bd9Sstevel@tonic-gate 	caddr_t sp;
3597c478bd9Sstevel@tonic-gate 	size_t ssize;
3607c478bd9Sstevel@tonic-gate 	size_t reserve;
3617c478bd9Sstevel@tonic-gate 	ulwp_t *self = curthread;
3627c478bd9Sstevel@tonic-gate 
3637c478bd9Sstevel@tonic-gate 	{
3647c478bd9Sstevel@tonic-gate 		stack_t s;
3657c478bd9Sstevel@tonic-gate 		if (thr_stksegment(&s) != 0) {
3667c478bd9Sstevel@tonic-gate 			errno = EINVAL;
3677c478bd9Sstevel@tonic-gate 			return (-1);
3687c478bd9Sstevel@tonic-gate 		}
3697c478bd9Sstevel@tonic-gate 		sp = s.ss_sp;
3707c478bd9Sstevel@tonic-gate 		ssize = s.ss_size;
3717c478bd9Sstevel@tonic-gate 	}
3727c478bd9Sstevel@tonic-gate 
3737c478bd9Sstevel@tonic-gate 	if (!self->ul_door_noreserve) {
3747c478bd9Sstevel@tonic-gate 		/*
3757c478bd9Sstevel@tonic-gate 		 * When we return from the kernel, we must have enough stack
3767c478bd9Sstevel@tonic-gate 		 * available to handle the request.  Since the creator of
3777c478bd9Sstevel@tonic-gate 		 * the thread has control over its stack size, and larger
3787c478bd9Sstevel@tonic-gate 		 * stacks generally indicate bigger request queues, we
3797c478bd9Sstevel@tonic-gate 		 * use the heuristic of reserving 1/32nd of the stack size
3807c478bd9Sstevel@tonic-gate 		 * (up to the default stack size), with a minimum of 1/8th
3817c478bd9Sstevel@tonic-gate 		 * of MINSTACK.  Currently, this translates to:
3827c478bd9Sstevel@tonic-gate 		 *
3837c478bd9Sstevel@tonic-gate 		 *			_ILP32		_LP64
3847c478bd9Sstevel@tonic-gate 		 *	min resv	 512 bytes	1024 bytes
3857c478bd9Sstevel@tonic-gate 		 *	max resv	 32k bytes	 64k bytes
3867c478bd9Sstevel@tonic-gate 		 *
3877c478bd9Sstevel@tonic-gate 		 * This reservation can be disabled by setting
3887c478bd9Sstevel@tonic-gate 		 *	_THREAD_DOOR_NORESERVE=1
3897c478bd9Sstevel@tonic-gate 		 * in the environment, but shouldn't be.
3907c478bd9Sstevel@tonic-gate 		 */
3917c478bd9Sstevel@tonic-gate 
3927c478bd9Sstevel@tonic-gate #define	STACK_FRACTION		32
3937c478bd9Sstevel@tonic-gate #define	MINSTACK_FRACTION	8
3947c478bd9Sstevel@tonic-gate 
3957c478bd9Sstevel@tonic-gate 		if (ssize < (MINSTACK * (STACK_FRACTION/MINSTACK_FRACTION)))
3967c478bd9Sstevel@tonic-gate 			reserve = MINSTACK / MINSTACK_FRACTION;
3977c478bd9Sstevel@tonic-gate 		else if (ssize < DEFAULTSTACK)
3987c478bd9Sstevel@tonic-gate 			reserve = ssize / STACK_FRACTION;
3997c478bd9Sstevel@tonic-gate 		else
4007c478bd9Sstevel@tonic-gate 			reserve = DEFAULTSTACK / STACK_FRACTION;
4017c478bd9Sstevel@tonic-gate 
4027c478bd9Sstevel@tonic-gate #undef STACK_FRACTION
4037c478bd9Sstevel@tonic-gate #undef MINSTACK_FRACTION
4047c478bd9Sstevel@tonic-gate 
4057c478bd9Sstevel@tonic-gate 		if (ssize > reserve)
4067c478bd9Sstevel@tonic-gate 			ssize -= reserve;
4077c478bd9Sstevel@tonic-gate 		else
4087c478bd9Sstevel@tonic-gate 			ssize = 0;
4097c478bd9Sstevel@tonic-gate 	}
4107c478bd9Sstevel@tonic-gate 
4117c478bd9Sstevel@tonic-gate 	/*
4127c478bd9Sstevel@tonic-gate 	 * Historically, the __door_return() syscall wrapper subtracted
4137c478bd9Sstevel@tonic-gate 	 * some "slop" from the stack pointer before trapping into the
4147c478bd9Sstevel@tonic-gate 	 * kernel.  We now do this here, so that ssize can be adjusted
4157c478bd9Sstevel@tonic-gate 	 * correctly.  Eventually, this should be removed, since it is
4167c478bd9Sstevel@tonic-gate 	 * unnecessary.  (note that TNF on x86 currently relies upon this
4177c478bd9Sstevel@tonic-gate 	 * idiocy)
4187c478bd9Sstevel@tonic-gate 	 */
4197c478bd9Sstevel@tonic-gate #if defined(__sparc)
4207c478bd9Sstevel@tonic-gate 	reserve = SA(MINFRAME);
4217c478bd9Sstevel@tonic-gate #elif defined(__x86)
4227c478bd9Sstevel@tonic-gate 	reserve = SA(512);
4237c478bd9Sstevel@tonic-gate #else
4247c478bd9Sstevel@tonic-gate #error need to define stack base reserve
4257c478bd9Sstevel@tonic-gate #endif
4267c478bd9Sstevel@tonic-gate 
4277c478bd9Sstevel@tonic-gate #ifdef _STACK_GROWS_DOWNWARD
4287c478bd9Sstevel@tonic-gate 	sp -= reserve;
4297c478bd9Sstevel@tonic-gate #else
4307c478bd9Sstevel@tonic-gate #error stack does not grow downwards, routine needs update
4317c478bd9Sstevel@tonic-gate #endif
4327c478bd9Sstevel@tonic-gate 
4337c478bd9Sstevel@tonic-gate 	if (ssize > reserve)
4347c478bd9Sstevel@tonic-gate 		ssize -= reserve;
4357c478bd9Sstevel@tonic-gate 	else
4367c478bd9Sstevel@tonic-gate 		ssize = 0;
4377c478bd9Sstevel@tonic-gate 
4387c478bd9Sstevel@tonic-gate 	/*
4397c478bd9Sstevel@tonic-gate 	 * Normally, the above will leave plenty of space in sp for a
4407c478bd9Sstevel@tonic-gate 	 * request.  Just in case some bozo overrides thr_stksegment() to
4417c478bd9Sstevel@tonic-gate 	 * return an uncommonly small stack size, we turn off stack size
4427c478bd9Sstevel@tonic-gate 	 * checking if there is less than 1k remaining.
4437c478bd9Sstevel@tonic-gate 	 */
4447c478bd9Sstevel@tonic-gate #define	MIN_DOOR_STACK	1024
4457c478bd9Sstevel@tonic-gate 	if (ssize < MIN_DOOR_STACK)
4467c478bd9Sstevel@tonic-gate 		ssize = 0;
4477c478bd9Sstevel@tonic-gate 
4487c478bd9Sstevel@tonic-gate #undef MIN_DOOR_STACK
4497c478bd9Sstevel@tonic-gate 
4507c478bd9Sstevel@tonic-gate 	/*
4517c478bd9Sstevel@tonic-gate 	 * We have to wrap the desc_* arguments for the syscall.  If there are
4527c478bd9Sstevel@tonic-gate 	 * no descriptors being returned, we can skip the wrapping.
4537c478bd9Sstevel@tonic-gate 	 */
4547c478bd9Sstevel@tonic-gate 	if (num_desc != 0) {
4557c478bd9Sstevel@tonic-gate 		door_return_desc_t d;
4567c478bd9Sstevel@tonic-gate 
4577c478bd9Sstevel@tonic-gate 		d.desc_ptr = desc_ptr;
4587c478bd9Sstevel@tonic-gate 		d.desc_num = num_desc;
4597c478bd9Sstevel@tonic-gate 		return (__door_return(data_ptr, data_size, &d, sp, ssize));
4607c478bd9Sstevel@tonic-gate 	}
4617c478bd9Sstevel@tonic-gate 	return (__door_return(data_ptr, data_size, NULL, sp, ssize));
4627c478bd9Sstevel@tonic-gate }
4637c478bd9Sstevel@tonic-gate 
4647c478bd9Sstevel@tonic-gate /*
465*49b225e1SGavin Maltby  * To start and synchronize a number of door service threads at once
466*49b225e1SGavin Maltby  * we use a struct door_xsync_shared shared by all threads, and
467*49b225e1SGavin Maltby  * a struct door_xsync for each thread.  While each thread
468*49b225e1SGavin Maltby  * has its own startup state, all such state are protected by the same
469*49b225e1SGavin Maltby  * shared lock.  This could cause a little contention but it is a one-off
470*49b225e1SGavin Maltby  * cost at door creation.
471*49b225e1SGavin Maltby  */
472*49b225e1SGavin Maltby enum door_xsync_state {
473*49b225e1SGavin Maltby 	DOOR_XSYNC_CREATEWAIT = 0x1c8c8c80,	/* awaits creation handshake */
474*49b225e1SGavin Maltby 	DOOR_XSYNC_ABORT,		/* aborting door_xcreate */
475*49b225e1SGavin Maltby 	DOOR_XSYNC_ABORTED,		/* thread heeded abort request */
476*49b225e1SGavin Maltby 	DOOR_XSYNC_MAXCONCUR,		/* create func decided no more */
477*49b225e1SGavin Maltby 	DOOR_XSYNC_CREATEFAIL,		/* thr_create/pthread_create failure */
478*49b225e1SGavin Maltby 	DOOR_XSYNC_SETSPEC_FAIL,	/* setspecific failed */
479*49b225e1SGavin Maltby 	DOOR_XSYNC_BINDFAIL,		/* door_bind failed */
480*49b225e1SGavin Maltby 	DOOR_XSYNC_BOUND,		/* door_bind succeeded */
481*49b225e1SGavin Maltby 	DOOR_XSYNC_ENTER_SERVICE	/* Go on to door_return */
482*49b225e1SGavin Maltby };
483*49b225e1SGavin Maltby 
484*49b225e1SGavin Maltby /* These stats are incremented non-atomically - indicative only */
485*49b225e1SGavin Maltby uint64_t door_xcreate_n_stats[DOOR_XSYNC_ENTER_SERVICE -
486*49b225e1SGavin Maltby     DOOR_XSYNC_CREATEWAIT + 1];
487*49b225e1SGavin Maltby 
488*49b225e1SGavin Maltby struct door_xsync_shared {
489*49b225e1SGavin Maltby 	pthread_mutex_t lock;
490*49b225e1SGavin Maltby 	pthread_cond_t cv_m2s;
491*49b225e1SGavin Maltby 	pthread_cond_t cv_s2m;
492*49b225e1SGavin Maltby 	struct privdoor_data *pdd;
493*49b225e1SGavin Maltby 	volatile uint32_t waiting;
494*49b225e1SGavin Maltby };
495*49b225e1SGavin Maltby 
496*49b225e1SGavin Maltby struct door_xsync {
497*49b225e1SGavin Maltby 	volatile enum door_xsync_state state;
498*49b225e1SGavin Maltby 	struct door_xsync_shared *sharedp;
499*49b225e1SGavin Maltby };
500*49b225e1SGavin Maltby 
501*49b225e1SGavin Maltby /*
502*49b225e1SGavin Maltby  * Thread start function that xcreated private doors must use in
503*49b225e1SGavin Maltby  * thr_create or pthread_create.  They must also use the argument we
504*49b225e1SGavin Maltby  * provide.  We:
505*49b225e1SGavin Maltby  *
506*49b225e1SGavin Maltby  *	o call a thread setup function if supplied, or apply sensible defaults
507*49b225e1SGavin Maltby  *	o bind the newly-created thread to the door it will service
508*49b225e1SGavin Maltby  *	o synchronize with door_xcreate to indicate that we have successfully
509*49b225e1SGavin Maltby  *	  bound to the door;  door_xcreate will not return until all
510*49b225e1SGavin Maltby  *	  requested threads have at least bound
511*49b225e1SGavin Maltby  *	o enter service with door_return quoting magic sentinel args
512*49b225e1SGavin Maltby  */
513*49b225e1SGavin Maltby void *
514*49b225e1SGavin Maltby door_xcreate_startf(void *arg)
515*49b225e1SGavin Maltby {
516*49b225e1SGavin Maltby 	struct door_xsync *xsp = (struct door_xsync *)arg;
517*49b225e1SGavin Maltby 	struct door_xsync_shared *xssp = xsp->sharedp;
518*49b225e1SGavin Maltby 	struct privdoor_data *pdd = xssp->pdd;
519*49b225e1SGavin Maltby 	enum door_xsync_state next_state;
520*49b225e1SGavin Maltby 
521*49b225e1SGavin Maltby 	privdoor_data_hold(pdd);
522*49b225e1SGavin Maltby 	if (pthread_setspecific(privdoor_key, (const void *)pdd) != 0) {
523*49b225e1SGavin Maltby 		next_state = DOOR_XSYNC_SETSPEC_FAIL;
524*49b225e1SGavin Maltby 		privdoor_data_rele(pdd);
525*49b225e1SGavin Maltby 		goto handshake;
526*49b225e1SGavin Maltby 	}
527*49b225e1SGavin Maltby 
528*49b225e1SGavin Maltby 	if (pdd->pd_setupf != NULL) {
529*49b225e1SGavin Maltby 		(pdd->pd_setupf)(pdd->pd_crcookie);
530*49b225e1SGavin Maltby 	} else {
531*49b225e1SGavin Maltby 		(void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
532*49b225e1SGavin Maltby 		(void) pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
533*49b225e1SGavin Maltby 	}
534*49b225e1SGavin Maltby 
535*49b225e1SGavin Maltby 	if (door_bind(pdd->pd_dfd) == 0)
536*49b225e1SGavin Maltby 		next_state = DOOR_XSYNC_BOUND;
537*49b225e1SGavin Maltby 	else
538*49b225e1SGavin Maltby 		next_state = DOOR_XSYNC_BINDFAIL;
539*49b225e1SGavin Maltby 
540*49b225e1SGavin Maltby handshake:
541*49b225e1SGavin Maltby 	(void) pthread_mutex_lock(&xssp->lock);
542*49b225e1SGavin Maltby 
543*49b225e1SGavin Maltby 	ASSERT(xsp->state == DOOR_XSYNC_CREATEWAIT ||
544*49b225e1SGavin Maltby 	    xsp->state == DOOR_XSYNC_ABORT);
545*49b225e1SGavin Maltby 
546*49b225e1SGavin Maltby 	if (xsp->state == DOOR_XSYNC_ABORT)
547*49b225e1SGavin Maltby 		next_state = DOOR_XSYNC_ABORTED;
548*49b225e1SGavin Maltby 
549*49b225e1SGavin Maltby 	xsp->state = next_state;
550*49b225e1SGavin Maltby 
551*49b225e1SGavin Maltby 	if (--xssp->waiting == 0)
552*49b225e1SGavin Maltby 		(void) pthread_cond_signal(&xssp->cv_s2m);
553*49b225e1SGavin Maltby 
554*49b225e1SGavin Maltby 	if (next_state != DOOR_XSYNC_BOUND) {
555*49b225e1SGavin Maltby 		(void) pthread_mutex_unlock(&xssp->lock);
556*49b225e1SGavin Maltby 		return (NULL);	/* thread exits, key destructor called */
557*49b225e1SGavin Maltby 	}
558*49b225e1SGavin Maltby 
559*49b225e1SGavin Maltby 	while (xsp->state == DOOR_XSYNC_BOUND)
560*49b225e1SGavin Maltby 		(void) pthread_cond_wait(&xssp->cv_m2s, &xssp->lock);
561*49b225e1SGavin Maltby 
562*49b225e1SGavin Maltby 	next_state = xsp->state;
563*49b225e1SGavin Maltby 	ASSERT(next_state == DOOR_XSYNC_ENTER_SERVICE ||
564*49b225e1SGavin Maltby 	    next_state == DOOR_XSYNC_ABORT);
565*49b225e1SGavin Maltby 
566*49b225e1SGavin Maltby 	if (--xssp->waiting == 0)
567*49b225e1SGavin Maltby 		(void) pthread_cond_signal(&xssp->cv_s2m);
568*49b225e1SGavin Maltby 
569*49b225e1SGavin Maltby 	(void) pthread_mutex_unlock(&xssp->lock); /* xssp/xsp can be freed */
570*49b225e1SGavin Maltby 
571*49b225e1SGavin Maltby 	if (next_state == DOOR_XSYNC_ABORT)
572*49b225e1SGavin Maltby 		return (NULL);	/* thread exits, key destructor called */
573*49b225e1SGavin Maltby 
574*49b225e1SGavin Maltby 	(void) door_return(NULL, 0, NULL, 0);
575*49b225e1SGavin Maltby 	return (NULL);
576*49b225e1SGavin Maltby }
577*49b225e1SGavin Maltby 
578*49b225e1SGavin Maltby static int
579*49b225e1SGavin Maltby door_xcreate_n(door_info_t *dip, struct privdoor_data *pdd, int n)
580*49b225e1SGavin Maltby {
581*49b225e1SGavin Maltby 	struct door_xsync_shared *xssp;
582*49b225e1SGavin Maltby 	struct door_xsync *xsp;
583*49b225e1SGavin Maltby 	int i, failidx = -1;
584*49b225e1SGavin Maltby 	int isdepcb = 0;
585*49b225e1SGavin Maltby 	int failerrno;
586*49b225e1SGavin Maltby 	int bound = 0;
587*49b225e1SGavin Maltby #ifdef _STACK_GROWS_DOWNWARD
588*49b225e1SGavin Maltby 	int stkdir = -1;
589*49b225e1SGavin Maltby #else
590*49b225e1SGavin Maltby 	int stkdir = 1;
591*49b225e1SGavin Maltby #endif
592*49b225e1SGavin Maltby 	int rv = 0;
593*49b225e1SGavin Maltby 
594*49b225e1SGavin Maltby 	/*
595*49b225e1SGavin Maltby 	 * If we're called during door creation then we have the
596*49b225e1SGavin Maltby 	 * privdoor_data.  If we're called as part of a depletion callback
597*49b225e1SGavin Maltby 	 * then the current thread has the privdoor_data as TSD.
598*49b225e1SGavin Maltby 	 */
599*49b225e1SGavin Maltby 	if (pdd == NULL) {
600*49b225e1SGavin Maltby 		isdepcb = 1;
601*49b225e1SGavin Maltby 		if ((pdd = pthread_getspecific(privdoor_key)) == NULL)
602*49b225e1SGavin Maltby 			thr_panic("door_xcreate_n - no privdoor_data "
603*49b225e1SGavin Maltby 			    "on existing server thread");
604*49b225e1SGavin Maltby 	}
605*49b225e1SGavin Maltby 
606*49b225e1SGavin Maltby 	/*
607*49b225e1SGavin Maltby 	 * Allocate on our stack.  We'll pass pointers to this to the
608*49b225e1SGavin Maltby 	 * newly-created threads, therefore this function must not return until
609*49b225e1SGavin Maltby 	 * we have synced with server threads that are created.
610*49b225e1SGavin Maltby 	 * We do not limit the number of threads so begin by checking
611*49b225e1SGavin Maltby 	 * that we have space on the stack for this.
612*49b225e1SGavin Maltby 	 */
613*49b225e1SGavin Maltby 	{
614*49b225e1SGavin Maltby 		size_t sz = sizeof (*xssp) + n * sizeof (*xsp) + 32;
615*49b225e1SGavin Maltby 		char dummy;
616*49b225e1SGavin Maltby 
617*49b225e1SGavin Maltby 		if (!stack_inbounds(&dummy + stkdir * sz)) {
618*49b225e1SGavin Maltby 			errno = E2BIG;
619*49b225e1SGavin Maltby 			return (0);
620*49b225e1SGavin Maltby 		}
621*49b225e1SGavin Maltby 	}
622*49b225e1SGavin Maltby 
623*49b225e1SGavin Maltby 	if ((xssp = alloca(sizeof (*xssp))) == NULL ||
624*49b225e1SGavin Maltby 	    (xsp = alloca(n * sizeof (*xsp))) == NULL) {
625*49b225e1SGavin Maltby 		errno = E2BIG;
626*49b225e1SGavin Maltby 		return (0);
627*49b225e1SGavin Maltby 	}
628*49b225e1SGavin Maltby 
629*49b225e1SGavin Maltby 	(void) pthread_mutex_init(&xssp->lock, NULL);
630*49b225e1SGavin Maltby 	(void) pthread_cond_init(&xssp->cv_m2s, NULL);
631*49b225e1SGavin Maltby 	(void) pthread_cond_init(&xssp->cv_s2m, NULL);
632*49b225e1SGavin Maltby 	xssp->pdd = pdd;
633*49b225e1SGavin Maltby 	xssp->waiting = 0;
634*49b225e1SGavin Maltby 
635*49b225e1SGavin Maltby 	(void) pthread_mutex_lock(&xssp->lock);
636*49b225e1SGavin Maltby 
637*49b225e1SGavin Maltby 	for (i = 0; failidx == -1 && i < n; i++) {
638*49b225e1SGavin Maltby 		xsp[i].sharedp = xssp;
639*49b225e1SGavin Maltby 		membar_producer();	/* xssp and xsp[i] for new thread */
640*49b225e1SGavin Maltby 
641*49b225e1SGavin Maltby 		switch ((pdd->pd_crf)(dip, door_xcreate_startf,
642*49b225e1SGavin Maltby 		    (void *)&xsp[i], pdd->pd_crcookie)) {
643*49b225e1SGavin Maltby 		case 1:
644*49b225e1SGavin Maltby 			/*
645*49b225e1SGavin Maltby 			 * Thread successfully created.  Set mailbox
646*49b225e1SGavin Maltby 			 * state and increment the number we have to
647*49b225e1SGavin Maltby 			 * sync with.
648*49b225e1SGavin Maltby 			 */
649*49b225e1SGavin Maltby 			xsp[i].state = DOOR_XSYNC_CREATEWAIT;
650*49b225e1SGavin Maltby 			xssp->waiting++;
651*49b225e1SGavin Maltby 			break;
652*49b225e1SGavin Maltby 		case 0:
653*49b225e1SGavin Maltby 			/*
654*49b225e1SGavin Maltby 			 * Elected to create no further threads.  OK for
655*49b225e1SGavin Maltby 			 * a depletion callback, but not during door_xcreate.
656*49b225e1SGavin Maltby 			 */
657*49b225e1SGavin Maltby 			xsp[i].state = DOOR_XSYNC_MAXCONCUR;
658*49b225e1SGavin Maltby 			if (!isdepcb) {
659*49b225e1SGavin Maltby 				failidx = i;
660*49b225e1SGavin Maltby 				failerrno = EINVAL;
661*49b225e1SGavin Maltby 			}
662*49b225e1SGavin Maltby 			break;
663*49b225e1SGavin Maltby 		case -1:
664*49b225e1SGavin Maltby 			/*
665*49b225e1SGavin Maltby 			 * Thread creation was attempted but failed.
666*49b225e1SGavin Maltby 			 */
667*49b225e1SGavin Maltby 			xsp[i].state = DOOR_XSYNC_CREATEFAIL;
668*49b225e1SGavin Maltby 			failidx = i;
669*49b225e1SGavin Maltby 			failerrno = EPIPE;
670*49b225e1SGavin Maltby 			break;
671*49b225e1SGavin Maltby 		default:
672*49b225e1SGavin Maltby 			/*
673*49b225e1SGavin Maltby 			 * The application-supplied function did not return
674*49b225e1SGavin Maltby 			 * -1/0/1 - best we can do is panic because anything
675*49b225e1SGavin Maltby 			 * else is harder to debug.
676*49b225e1SGavin Maltby 			 */
677*49b225e1SGavin Maltby 			thr_panic("door server create function illegal return");
678*49b225e1SGavin Maltby 			/*NOTREACHED*/
679*49b225e1SGavin Maltby 		}
680*49b225e1SGavin Maltby 	}
681*49b225e1SGavin Maltby 
682*49b225e1SGavin Maltby 	/*
683*49b225e1SGavin Maltby 	 * On initial creation all must succeed; if not then abort
684*49b225e1SGavin Maltby 	 */
685*49b225e1SGavin Maltby 	if (!isdepcb && failidx != -1) {
686*49b225e1SGavin Maltby 		for (i = 0; i < failidx; i++)
687*49b225e1SGavin Maltby 			if (xsp[i].state == DOOR_XSYNC_CREATEWAIT)
688*49b225e1SGavin Maltby 				xsp[i].state = DOOR_XSYNC_ABORT;
689*49b225e1SGavin Maltby 	}
690*49b225e1SGavin Maltby 
691*49b225e1SGavin Maltby 	/*
692*49b225e1SGavin Maltby 	 * Wait for thread startup handshake to complete for all threads
693*49b225e1SGavin Maltby 	 */
694*49b225e1SGavin Maltby 	while (xssp->waiting)
695*49b225e1SGavin Maltby 		(void) pthread_cond_wait(&xssp->cv_s2m, &xssp->lock);
696*49b225e1SGavin Maltby 
697*49b225e1SGavin Maltby 	/*
698*49b225e1SGavin Maltby 	 * If we are aborting for a failed thread create in door_xcreate
699*49b225e1SGavin Maltby 	 * then we're done.
700*49b225e1SGavin Maltby 	 */
701*49b225e1SGavin Maltby 	if (!isdepcb && failidx != -1) {
702*49b225e1SGavin Maltby 		rv = 0;
703*49b225e1SGavin Maltby 		goto out;	/* lock held, failerrno is set */
704*49b225e1SGavin Maltby 	}
705*49b225e1SGavin Maltby 
706*49b225e1SGavin Maltby 	/*
707*49b225e1SGavin Maltby 	 * Did we all succeed in binding?
708*49b225e1SGavin Maltby 	 */
709*49b225e1SGavin Maltby 	for (i = 0; i < n; i++) {
710*49b225e1SGavin Maltby 		int statidx = xsp[i].state - DOOR_XSYNC_CREATEWAIT;
711*49b225e1SGavin Maltby 
712*49b225e1SGavin Maltby 		door_xcreate_n_stats[statidx]++;
713*49b225e1SGavin Maltby 		if (xsp[i].state == DOOR_XSYNC_BOUND)
714*49b225e1SGavin Maltby 			bound++;
715*49b225e1SGavin Maltby 	}
716*49b225e1SGavin Maltby 
717*49b225e1SGavin Maltby 	if (bound == n) {
718*49b225e1SGavin Maltby 		rv = 1;
719*49b225e1SGavin Maltby 	} else {
720*49b225e1SGavin Maltby 		failerrno = EBADF;
721*49b225e1SGavin Maltby 		rv = 0;
722*49b225e1SGavin Maltby 	}
723*49b225e1SGavin Maltby 
724*49b225e1SGavin Maltby 	/*
725*49b225e1SGavin Maltby 	 * During door_xcreate all must succeed in binding - if not then
726*49b225e1SGavin Maltby 	 * we command even those that did bind to abort.  Threads that
727*49b225e1SGavin Maltby 	 * did not get as far as binding have already exited.
728*49b225e1SGavin Maltby 	 */
729*49b225e1SGavin Maltby 	for (i = 0; i < n; i++) {
730*49b225e1SGavin Maltby 		if (xsp[i].state == DOOR_XSYNC_BOUND) {
731*49b225e1SGavin Maltby 			xsp[i].state = (rv == 1 || isdepcb) ?
732*49b225e1SGavin Maltby 			    DOOR_XSYNC_ENTER_SERVICE : DOOR_XSYNC_ABORT;
733*49b225e1SGavin Maltby 			xssp->waiting++;
734*49b225e1SGavin Maltby 		}
735*49b225e1SGavin Maltby 	}
736*49b225e1SGavin Maltby 
737*49b225e1SGavin Maltby 	(void) pthread_cond_broadcast(&xssp->cv_m2s);
738*49b225e1SGavin Maltby 
739*49b225e1SGavin Maltby 	while (xssp->waiting)
740*49b225e1SGavin Maltby 		(void) pthread_cond_wait(&xssp->cv_s2m, &xssp->lock);
741*49b225e1SGavin Maltby 
742*49b225e1SGavin Maltby out:
743*49b225e1SGavin Maltby 	(void) pthread_mutex_unlock(&xssp->lock);
744*49b225e1SGavin Maltby 	(void) pthread_mutex_destroy(&xssp->lock);
745*49b225e1SGavin Maltby 	(void) pthread_cond_destroy(&xssp->cv_m2s);
746*49b225e1SGavin Maltby 	(void) pthread_cond_destroy(&xssp->cv_s2m);
747*49b225e1SGavin Maltby 
748*49b225e1SGavin Maltby 	if (rv == 0)
749*49b225e1SGavin Maltby 		errno = failerrno;
750*49b225e1SGavin Maltby 
751*49b225e1SGavin Maltby 	return (rv);
752*49b225e1SGavin Maltby }
753*49b225e1SGavin Maltby 
754*49b225e1SGavin Maltby /*
755*49b225e1SGavin Maltby  * Call the server creation function to give it the opportunity to
756*49b225e1SGavin Maltby  * create more threads.  Called during a door invocation when we
757*49b225e1SGavin Maltby  * return from door_return(NULL,0, NULL, 0) and notice that we're
758*49b225e1SGavin Maltby  * running on the last available thread.
759*49b225e1SGavin Maltby  */
760*49b225e1SGavin Maltby void
761*49b225e1SGavin Maltby door_depletion_cb(door_info_t *dip)
762*49b225e1SGavin Maltby {
763*49b225e1SGavin Maltby 	if (dip == NULL) {
764*49b225e1SGavin Maltby 		/*
765*49b225e1SGavin Maltby 		 * Non-private doors always use door_server_func.
766*49b225e1SGavin Maltby 		 */
767*49b225e1SGavin Maltby 		(*door_server_func)(NULL);
768*49b225e1SGavin Maltby 		return;
769*49b225e1SGavin Maltby 	}
770*49b225e1SGavin Maltby 
771*49b225e1SGavin Maltby 	if (dip->di_attributes & DOOR_NO_DEPLETION_CB) {
772*49b225e1SGavin Maltby 		/*
773*49b225e1SGavin Maltby 		 * Private, door_xcreate'd door specified no callbacks.
774*49b225e1SGavin Maltby 		 */
775*49b225e1SGavin Maltby 		return;
776*49b225e1SGavin Maltby 	} else if (!(dip->di_attributes & DOOR_PRIVCREATE)) {
777*49b225e1SGavin Maltby 		/*
778*49b225e1SGavin Maltby 		 * Private door with standard/legacy creation semantics.
779*49b225e1SGavin Maltby 		 */
780*49b225e1SGavin Maltby 		dip->di_attributes |= DOOR_DEPLETION_CB;
781*49b225e1SGavin Maltby 		(*door_server_func)(dip);
782*49b225e1SGavin Maltby 		return;
783*49b225e1SGavin Maltby 	} else {
784*49b225e1SGavin Maltby 		/*
785*49b225e1SGavin Maltby 		 * Private, door_xcreate'd door.
786*49b225e1SGavin Maltby 		 */
787*49b225e1SGavin Maltby 		dip->di_attributes |= DOOR_DEPLETION_CB;
788*49b225e1SGavin Maltby 		(void) door_xcreate_n(dip, NULL, 1);
789*49b225e1SGavin Maltby 	}
790*49b225e1SGavin Maltby }
791*49b225e1SGavin Maltby 
792*49b225e1SGavin Maltby /*
793*49b225e1SGavin Maltby  * Install a new server creation function.  The appointed function
794*49b225e1SGavin Maltby  * will receieve depletion callbacks for non-private doors and private
795*49b225e1SGavin Maltby  * doors created with door_create(..., DOOR_PRIVATE).
7967c478bd9Sstevel@tonic-gate  */
7977c478bd9Sstevel@tonic-gate door_server_func_t *
7987c478bd9Sstevel@tonic-gate door_server_create(door_server_func_t *create_func)
7997c478bd9Sstevel@tonic-gate {
8007c478bd9Sstevel@tonic-gate 	door_server_func_t *prev;
8017c478bd9Sstevel@tonic-gate 
8027c478bd9Sstevel@tonic-gate 	lmutex_lock(&door_state_lock);
8037c478bd9Sstevel@tonic-gate 	prev = door_server_func;
8047c478bd9Sstevel@tonic-gate 	door_server_func = create_func;
8057c478bd9Sstevel@tonic-gate 	lmutex_unlock(&door_state_lock);
8067c478bd9Sstevel@tonic-gate 
8077c478bd9Sstevel@tonic-gate 	return (prev);
8087c478bd9Sstevel@tonic-gate }
8097c478bd9Sstevel@tonic-gate 
8107c478bd9Sstevel@tonic-gate /*
811*49b225e1SGavin Maltby  * Thread start function for door_create_server() below.
8127c478bd9Sstevel@tonic-gate  * Create door server threads with cancellation(5) disabled.
8137c478bd9Sstevel@tonic-gate  */
8147c478bd9Sstevel@tonic-gate static void *
8157c478bd9Sstevel@tonic-gate door_create_func(void *arg)
8167c478bd9Sstevel@tonic-gate {
8177c478bd9Sstevel@tonic-gate 	(void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
8187c478bd9Sstevel@tonic-gate 	(void) door_return(NULL, 0, NULL, 0);
8197c478bd9Sstevel@tonic-gate 
8207c478bd9Sstevel@tonic-gate 	return (arg);
8217c478bd9Sstevel@tonic-gate }
8227c478bd9Sstevel@tonic-gate 
8237c478bd9Sstevel@tonic-gate /*
824*49b225e1SGavin Maltby  * The default door_server_func_t.
8257c478bd9Sstevel@tonic-gate  */
8267c478bd9Sstevel@tonic-gate /* ARGSUSED */
8277c478bd9Sstevel@tonic-gate static void
8287c478bd9Sstevel@tonic-gate door_create_server(door_info_t *dip)
8297c478bd9Sstevel@tonic-gate {
8307c478bd9Sstevel@tonic-gate 	(void) thr_create(NULL, 0, door_create_func, NULL, THR_DETACHED, NULL);
8317c478bd9Sstevel@tonic-gate 	yield();	/* Gives server thread a chance to run */
8327c478bd9Sstevel@tonic-gate }
833