1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 
23 /*
24  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
25  * Use is subject to license terms.
26  */
27 
28 #pragma ident	"%Z%%M%	%I%	%E% SMI"
29 
30 /*
31  * System call I/F to doors (outside of vnodes I/F) and misc support
32  * routines
33  */
34 #include <sys/types.h>
35 #include <sys/systm.h>
36 #include <sys/door.h>
37 #include <sys/door_data.h>
38 #include <sys/proc.h>
39 #include <sys/thread.h>
40 #include <sys/class.h>
41 #include <sys/cred.h>
42 #include <sys/kmem.h>
43 #include <sys/cmn_err.h>
44 #include <sys/stack.h>
45 #include <sys/debug.h>
46 #include <sys/cpuvar.h>
47 #include <sys/file.h>
48 #include <sys/fcntl.h>
49 #include <sys/vnode.h>
50 #include <sys/vfs.h>
51 #include <sys/sobject.h>
52 #include <sys/schedctl.h>
53 #include <sys/callb.h>
54 #include <sys/ucred.h>
55 
56 #include <sys/mman.h>
57 #include <sys/sysmacros.h>
58 #include <sys/vmsystm.h>
59 #include <vm/as.h>
60 #include <vm/hat.h>
61 #include <vm/page.h>
62 #include <vm/seg.h>
63 #include <vm/seg_vn.h>
64 #include <vm/seg_vn.h>
65 
66 #include <sys/modctl.h>
67 #include <sys/syscall.h>
68 #include <sys/pathname.h>
69 #include <sys/rctl.h>
70 
71 /*
72  * The maximum amount of data (in bytes) that will be transfered using
73  * an intermediate kernel buffer.  For sizes greater than this we map
74  * in the destination pages and perform a 1-copy transfer.
75  */
76 size_t	door_max_arg = 16 * 1024;
77 
78 /*
79  * Maximum amount of data that will be transfered in a reply to a
80  * door_upcall.  Need to guard against a process returning huge amounts
81  * of data and getting the kernel stuck in kmem_alloc.
82  */
83 size_t	door_max_upcall_reply = 1024 * 1024;
84 
85 /*
86  * Maximum number of descriptors allowed to be passed in a single
87  * door_call or door_return.  We need to allocate kernel memory
88  * for all of them at once, so we can't let it scale without limit.
89  */
90 uint_t door_max_desc = 1024;
91 
92 /*
93  * Definition of a door handle, used by other kernel subsystems when
94  * calling door functions.  This is really a file structure but we
95  * want to hide that fact.
96  */
97 struct __door_handle {
98 	file_t dh_file;
99 };
100 
101 #define	DHTOF(dh) ((file_t *)(dh))
102 #define	FTODH(fp) ((door_handle_t)(fp))
103 
104 static int doorfs(long, long, long, long, long, long);
105 
106 static struct sysent door_sysent = {
107 	6,
108 	SE_ARGC | SE_NOUNLOAD,
109 	(int (*)())doorfs,
110 };
111 
112 static struct modlsys modlsys = {
113 	&mod_syscallops, "doors", &door_sysent
114 };
115 
116 #ifdef _SYSCALL32_IMPL
117 
118 static int
119 doorfs32(int32_t arg1, int32_t arg2, int32_t arg3, int32_t arg4,
120     int32_t arg5, int32_t subcode);
121 
122 static struct sysent door_sysent32 = {
123 	6,
124 	SE_ARGC | SE_NOUNLOAD,
125 	(int (*)())doorfs32,
126 };
127 
128 static struct modlsys modlsys32 = {
129 	&mod_syscallops32,
130 	"32-bit door syscalls",
131 	&door_sysent32
132 };
133 #endif
134 
135 static struct modlinkage modlinkage = {
136 	MODREV_1,
137 	&modlsys,
138 #ifdef _SYSCALL32_IMPL
139 	&modlsys32,
140 #endif
141 	NULL
142 };
143 
144 dev_t	doordev;
145 
146 extern	struct vfs door_vfs;
147 extern	struct vnodeops *door_vnodeops;
148 
149 int
150 _init(void)
151 {
152 	static const fs_operation_def_t door_vfsops_template[] = {
153 		NULL, NULL
154 	};
155 	extern const fs_operation_def_t door_vnodeops_template[];
156 	vfsops_t *door_vfsops;
157 	major_t major;
158 	int error;
159 
160 	mutex_init(&door_knob, NULL, MUTEX_DEFAULT, NULL);
161 	if ((major = getudev()) == (major_t)-1)
162 		return (ENXIO);
163 	doordev = makedevice(major, 0);
164 
165 	/* Create a dummy vfs */
166 	error = vfs_makefsops(door_vfsops_template, &door_vfsops);
167 	if (error != 0) {
168 		cmn_err(CE_WARN, "door init: bad vfs ops");
169 		return (error);
170 	}
171 	vfs_setops(&door_vfs, door_vfsops);
172 	door_vfs.vfs_flag = VFS_RDONLY;
173 	door_vfs.vfs_dev = doordev;
174 	vfs_make_fsid(&(door_vfs.vfs_fsid), doordev, 0);
175 
176 	error = vn_make_ops("doorfs", door_vnodeops_template, &door_vnodeops);
177 	if (error != 0) {
178 		vfs_freevfsops(door_vfsops);
179 		cmn_err(CE_WARN, "door init: bad vnode ops");
180 		return (error);
181 	}
182 	return (mod_install(&modlinkage));
183 }
184 
185 int
186 _info(struct modinfo *modinfop)
187 {
188 	return (mod_info(&modlinkage, modinfop));
189 }
190 
191 /* system call functions */
192 static int door_call(int, void *);
193 static int door_return(caddr_t, size_t, door_desc_t *, uint_t, caddr_t, size_t);
194 static int door_create(void (*pc_cookie)(void *, char *, size_t, door_desc_t *,
195     uint_t), void *data_cookie, uint_t);
196 static int door_revoke(int);
197 static int door_info(int, struct door_info *);
198 static int door_ucred(struct ucred_s *);
199 static int door_bind(int);
200 static int door_unbind(void);
201 static int door_unref(void);
202 static int door_getparam(int, int, size_t *);
203 static int door_setparam(int, int, size_t);
204 
205 #define	DOOR_RETURN_OLD	4		/* historic value, for s10 */
206 
207 /*
208  * System call wrapper for all door related system calls
209  */
210 static int
211 doorfs(long arg1, long arg2, long arg3, long arg4, long arg5, long subcode)
212 {
213 	switch (subcode) {
214 	case DOOR_CALL:
215 		return (door_call(arg1, (void *)arg2));
216 	case DOOR_RETURN: {
217 		door_return_desc_t *drdp = (door_return_desc_t *)arg3;
218 
219 		if (drdp != NULL) {
220 			door_return_desc_t drd;
221 			if (copyin(drdp, &drd, sizeof (drd)))
222 				return (EFAULT);
223 			return (door_return((caddr_t)arg1, arg2, drd.desc_ptr,
224 			    drd.desc_num, (caddr_t)arg4, arg5));
225 		}
226 		return (door_return((caddr_t)arg1, arg2, NULL,
227 		    0, (caddr_t)arg4, arg5));
228 	}
229 	case DOOR_RETURN_OLD:
230 		/*
231 		 * In order to support the S10 runtime environment, we
232 		 * still respond to the old syscall subcode for door_return.
233 		 * We treat it as having no stack limits.  This code should
234 		 * be removed when such support is no longer needed.
235 		 */
236 		return (door_return((caddr_t)arg1, arg2, (door_desc_t *)arg3,
237 		    arg4, (caddr_t)arg5, 0));
238 	case DOOR_CREATE:
239 		return (door_create((void (*)())arg1, (void *)arg2, arg3));
240 	case DOOR_REVOKE:
241 		return (door_revoke(arg1));
242 	case DOOR_INFO:
243 		return (door_info(arg1, (struct door_info *)arg2));
244 	case DOOR_BIND:
245 		return (door_bind(arg1));
246 	case DOOR_UNBIND:
247 		return (door_unbind());
248 	case DOOR_UNREFSYS:
249 		return (door_unref());
250 	case DOOR_UCRED:
251 		return (door_ucred((struct ucred_s *)arg1));
252 	case DOOR_GETPARAM:
253 		return (door_getparam(arg1, arg2, (size_t *)arg3));
254 	case DOOR_SETPARAM:
255 		return (door_setparam(arg1, arg2, arg3));
256 	default:
257 		return (set_errno(EINVAL));
258 	}
259 }
260 
261 #ifdef _SYSCALL32_IMPL
262 /*
263  * System call wrapper for all door related system calls from 32-bit programs.
264  * Needed at the moment because of the casts - they undo some damage
265  * that truss causes (sign-extending the stack pointer) when truss'ing
266  * a 32-bit program using doors.
267  */
268 static int
269 doorfs32(int32_t arg1, int32_t arg2, int32_t arg3,
270     int32_t arg4, int32_t arg5, int32_t subcode)
271 {
272 	switch (subcode) {
273 	case DOOR_CALL:
274 		return (door_call(arg1, (void *)(uintptr_t)(caddr32_t)arg2));
275 	case DOOR_RETURN: {
276 		door_return_desc32_t *drdp =
277 		    (door_return_desc32_t *)(uintptr_t)(caddr32_t)arg3;
278 		if (drdp != NULL) {
279 			door_return_desc32_t drd;
280 			if (copyin(drdp, &drd, sizeof (drd)))
281 				return (EFAULT);
282 			return (door_return(
283 			    (caddr_t)(uintptr_t)(caddr32_t)arg1, arg2,
284 			    (door_desc_t *)(uintptr_t)drd.desc_ptr,
285 			    drd.desc_num, (caddr_t)(uintptr_t)(caddr32_t)arg4,
286 			    (size_t)(uintptr_t)(size32_t)arg5));
287 		}
288 		return (door_return((caddr_t)(uintptr_t)(caddr32_t)arg1,
289 		    arg2, NULL, 0, (caddr_t)(uintptr_t)(caddr32_t)arg4,
290 		    (size_t)(uintptr_t)(size32_t)arg5));
291 	}
292 	case DOOR_RETURN_OLD:
293 		/*
294 		 * In order to support the S10 runtime environment, we
295 		 * still respond to the old syscall subcode for door_return.
296 		 * We treat it as having no stack limits.  This code should
297 		 * be removed when such support is no longer needed.
298 		 */
299 		return (door_return((caddr_t)(uintptr_t)(caddr32_t)arg1, arg2,
300 		    (door_desc_t *)(uintptr_t)(caddr32_t)arg3, arg4,
301 		    (caddr_t)(uintptr_t)(caddr32_t)arg5, 0));
302 	case DOOR_CREATE:
303 		return (door_create((void (*)())(uintptr_t)(caddr32_t)arg1,
304 		    (void *)(uintptr_t)(caddr32_t)arg2, arg3));
305 	case DOOR_REVOKE:
306 		return (door_revoke(arg1));
307 	case DOOR_INFO:
308 		return (door_info(arg1,
309 		    (struct door_info *)(uintptr_t)(caddr32_t)arg2));
310 	case DOOR_BIND:
311 		return (door_bind(arg1));
312 	case DOOR_UNBIND:
313 		return (door_unbind());
314 	case DOOR_UNREFSYS:
315 		return (door_unref());
316 	case DOOR_UCRED:
317 		return (door_ucred(
318 		    (struct ucred_s *)(uintptr_t)(caddr32_t)arg1));
319 	case DOOR_GETPARAM:
320 		return (door_getparam(arg1, arg2,
321 		    (size_t *)(uintptr_t)(caddr32_t)arg3));
322 	case DOOR_SETPARAM:
323 		return (door_setparam(arg1, arg2, (size_t)(size32_t)arg3));
324 
325 	default:
326 		return (set_errno(EINVAL));
327 	}
328 }
329 #endif
330 
331 void shuttle_resume(kthread_t *, kmutex_t *);
332 void shuttle_swtch(kmutex_t *);
333 void shuttle_sleep(kthread_t *);
334 
335 /*
336  * Support routines
337  */
338 static int door_create_common(void (*)(), void *, uint_t, int, int *,
339     file_t **);
340 static int door_overflow(kthread_t *, caddr_t, size_t, door_desc_t *, uint_t);
341 static int door_args(kthread_t *, int);
342 static int door_results(kthread_t *, caddr_t, size_t, door_desc_t *, uint_t);
343 static int door_copy(struct as *, caddr_t, caddr_t, uint_t);
344 static void	door_server_exit(proc_t *, kthread_t *);
345 static void	door_release_server(door_node_t *, kthread_t *);
346 static kthread_t	*door_get_server(door_node_t *);
347 static door_node_t	*door_lookup(int, file_t **);
348 static int	door_translate_in(void);
349 static int	door_translate_out(void);
350 static void	door_fd_rele(door_desc_t *, uint_t, int);
351 static void	door_list_insert(door_node_t *);
352 static void	door_info_common(door_node_t *, door_info_t *, file_t *);
353 static int	door_release_fds(door_desc_t *, uint_t);
354 static void	door_fd_close(door_desc_t *, uint_t);
355 static void	door_fp_close(struct file **, uint_t);
356 
357 static door_data_t *
358 door_my_data(int create_if_missing)
359 {
360 	door_data_t *ddp;
361 
362 	ddp = curthread->t_door;
363 	if (create_if_missing && ddp == NULL)
364 		ddp = curthread->t_door = kmem_zalloc(sizeof (*ddp), KM_SLEEP);
365 
366 	return (ddp);
367 }
368 
369 static door_server_t *
370 door_my_server(int create_if_missing)
371 {
372 	door_data_t *ddp = door_my_data(create_if_missing);
373 
374 	return ((ddp != NULL)? DOOR_SERVER(ddp) : NULL);
375 }
376 
377 static door_client_t *
378 door_my_client(int create_if_missing)
379 {
380 	door_data_t *ddp = door_my_data(create_if_missing);
381 
382 	return ((ddp != NULL)? DOOR_CLIENT(ddp) : NULL);
383 }
384 
385 /*
386  * System call to create a door
387  */
388 int
389 door_create(void (*pc_cookie)(), void *data_cookie, uint_t attributes)
390 {
391 	int fd;
392 	int err;
393 
394 	if ((attributes & ~DOOR_CREATE_MASK) ||
395 	    ((attributes & (DOOR_UNREF | DOOR_UNREF_MULTI)) ==
396 	    (DOOR_UNREF | DOOR_UNREF_MULTI)))
397 		return (set_errno(EINVAL));
398 
399 	if ((err = door_create_common(pc_cookie, data_cookie, attributes, 0,
400 	    &fd, NULL)) != 0)
401 		return (set_errno(err));
402 
403 	f_setfd(fd, FD_CLOEXEC);
404 	return (fd);
405 }
406 
407 /*
408  * Common code for creating user and kernel doors.  If a door was
409  * created, stores a file structure pointer in the location pointed
410  * to by fpp (if fpp is non-NULL) and returns 0.  Also, if a non-NULL
411  * pointer to a file descriptor is passed in as fdp, allocates a file
412  * descriptor representing the door.  If a door could not be created,
413  * returns an error.
414  */
415 static int
416 door_create_common(void (*pc_cookie)(), void *data_cookie, uint_t attributes,
417     int from_kernel, int *fdp, file_t **fpp)
418 {
419 	door_node_t	*dp;
420 	vnode_t		*vp;
421 	struct file	*fp;
422 	static door_id_t index = 0;
423 	proc_t		*p = (from_kernel)? &p0 : curproc;
424 
425 	dp = kmem_zalloc(sizeof (door_node_t), KM_SLEEP);
426 
427 	dp->door_vnode = vn_alloc(KM_SLEEP);
428 	dp->door_target = p;
429 	dp->door_data = data_cookie;
430 	dp->door_pc = pc_cookie;
431 	dp->door_flags = attributes;
432 #ifdef _SYSCALL32_IMPL
433 	if (!from_kernel && get_udatamodel() != DATAMODEL_NATIVE)
434 		dp->door_data_max = UINT32_MAX;
435 	else
436 #endif
437 		dp->door_data_max = SIZE_MAX;
438 	dp->door_data_min = 0UL;
439 	dp->door_desc_max = (attributes & DOOR_REFUSE_DESC)? 0 : INT_MAX;
440 
441 	vp = DTOV(dp);
442 	vn_setops(vp, door_vnodeops);
443 	vp->v_type = VDOOR;
444 	vp->v_vfsp = &door_vfs;
445 	vp->v_data = (caddr_t)dp;
446 	mutex_enter(&door_knob);
447 	dp->door_index = index++;
448 	/* add to per-process door list */
449 	door_list_insert(dp);
450 	mutex_exit(&door_knob);
451 
452 	if (falloc(vp, FREAD | FWRITE, &fp, fdp)) {
453 		/*
454 		 * If the file table is full, remove the door from the
455 		 * per-process list, free the door, and return NULL.
456 		 */
457 		mutex_enter(&door_knob);
458 		door_list_delete(dp);
459 		mutex_exit(&door_knob);
460 		vn_free(vp);
461 		kmem_free(dp, sizeof (door_node_t));
462 		return (EMFILE);
463 	}
464 	vn_exists(vp);
465 	if (fdp != NULL)
466 		setf(*fdp, fp);
467 	mutex_exit(&fp->f_tlock);
468 
469 	if (fpp != NULL)
470 		*fpp = fp;
471 	return (0);
472 }
473 
474 static int
475 door_check_limits(door_node_t *dp, door_arg_t *da, int upcall)
476 {
477 	ASSERT(MUTEX_HELD(&door_knob));
478 
479 	/* we allow unref upcalls through, despite any minimum */
480 	if (da->data_size < dp->door_data_min &&
481 	    !(upcall && da->data_ptr == DOOR_UNREF_DATA))
482 		return (ENOBUFS);
483 
484 	if (da->data_size > dp->door_data_max)
485 		return (ENOBUFS);
486 
487 	if (da->desc_num > 0 && (dp->door_flags & DOOR_REFUSE_DESC))
488 		return (ENOTSUP);
489 
490 	if (da->desc_num > dp->door_desc_max)
491 		return (ENFILE);
492 
493 	return (0);
494 }
495 
496 /*
497  * Door invocation.
498  */
499 int
500 door_call(int did, void *args)
501 {
502 	/* Locals */
503 	door_node_t	*dp;
504 	kthread_t	*server_thread;
505 	int		error = 0;
506 	klwp_t		*lwp;
507 	door_client_t	*ct;		/* curthread door_data */
508 	door_server_t	*st;		/* server thread door_data */
509 	door_desc_t	*start = NULL;
510 	uint_t		ncopied = 0;
511 	size_t		dsize;
512 	/* destructor for data returned by a kernel server */
513 	void		(*destfn)() = NULL;
514 	void		*destarg;
515 	model_t		datamodel;
516 	int		gotresults = 0;
517 
518 	lwp = ttolwp(curthread);
519 	datamodel = lwp_getdatamodel(lwp);
520 
521 	ct = door_my_client(1);
522 
523 	/*
524 	 * Get the arguments
525 	 */
526 	if (args) {
527 		if (datamodel == DATAMODEL_NATIVE) {
528 			if (copyin(args, &ct->d_args, sizeof (door_arg_t)) != 0)
529 				return (set_errno(EFAULT));
530 		} else {
531 			door_arg32_t    da32;
532 
533 			if (copyin(args, &da32, sizeof (door_arg32_t)) != 0)
534 				return (set_errno(EFAULT));
535 			ct->d_args.data_ptr =
536 			    (char *)(uintptr_t)da32.data_ptr;
537 			ct->d_args.data_size = da32.data_size;
538 			ct->d_args.desc_ptr =
539 			    (door_desc_t *)(uintptr_t)da32.desc_ptr;
540 			ct->d_args.desc_num = da32.desc_num;
541 			ct->d_args.rbuf =
542 			    (char *)(uintptr_t)da32.rbuf;
543 			ct->d_args.rsize = da32.rsize;
544 		}
545 	} else {
546 		/* No arguments, and no results allowed */
547 		ct->d_noresults = 1;
548 		ct->d_args.data_size = 0;
549 		ct->d_args.desc_num = 0;
550 		ct->d_args.rsize = 0;
551 	}
552 
553 	if ((dp = door_lookup(did, NULL)) == NULL)
554 		return (set_errno(EBADF));
555 
556 	mutex_enter(&door_knob);
557 	if (DOOR_INVALID(dp)) {
558 		mutex_exit(&door_knob);
559 		error = EBADF;
560 		goto out;
561 	}
562 
563 	/*
564 	 * before we do anything, check that we are not overflowing the
565 	 * required limits.
566 	 */
567 	error = door_check_limits(dp, &ct->d_args, 0);
568 	if (error != 0) {
569 		mutex_exit(&door_knob);
570 		goto out;
571 	}
572 
573 	/*
574 	 * Check for in-kernel door server.
575 	 */
576 	if (dp->door_target == &p0) {
577 		caddr_t rbuf = ct->d_args.rbuf;
578 		size_t rsize = ct->d_args.rsize;
579 
580 		dp->door_active++;
581 		ct->d_kernel = 1;
582 		ct->d_error = DOOR_WAIT;
583 		mutex_exit(&door_knob);
584 		/* translate file descriptors to vnodes */
585 		if (ct->d_args.desc_num) {
586 			error = door_translate_in();
587 			if (error)
588 				goto out;
589 		}
590 		/*
591 		 * Call kernel door server.  Arguments are passed and
592 		 * returned as a door_arg pointer.  When called, data_ptr
593 		 * points to user data and desc_ptr points to a kernel list
594 		 * of door descriptors that have been converted to file
595 		 * structure pointers.  It's the server function's
596 		 * responsibility to copyin the data pointed to by data_ptr
597 		 * (this avoids extra copying in some cases).  On return,
598 		 * data_ptr points to a user buffer of data, and desc_ptr
599 		 * points to a kernel list of door descriptors representing
600 		 * files.  When a reference is passed to a kernel server,
601 		 * it is the server's responsibility to release the reference
602 		 * (by calling closef).  When the server includes a
603 		 * reference in its reply, it is released as part of the
604 		 * the call (the server must duplicate the reference if
605 		 * it wants to retain a copy).  The destfn, if set to
606 		 * non-NULL, is a destructor to be called when the returned
607 		 * kernel data (if any) is no longer needed (has all been
608 		 * translated and copied to user level).
609 		 */
610 		(*(dp->door_pc))(dp->door_data, &ct->d_args,
611 		    &destfn, &destarg, &error);
612 		mutex_enter(&door_knob);
613 		/* not implemented yet */
614 		if (--dp->door_active == 0 && (dp->door_flags & DOOR_DELAY))
615 			door_deliver_unref(dp);
616 		mutex_exit(&door_knob);
617 		if (error)
618 			goto out;
619 
620 		/* translate vnodes to files */
621 		if (ct->d_args.desc_num) {
622 			error = door_translate_out();
623 			if (error)
624 				goto out;
625 		}
626 		ct->d_buf = ct->d_args.rbuf;
627 		ct->d_bufsize = ct->d_args.rsize;
628 		if (rsize < (ct->d_args.data_size +
629 		    (ct->d_args.desc_num * sizeof (door_desc_t)))) {
630 			/* handle overflow */
631 			error = door_overflow(curthread, ct->d_args.data_ptr,
632 			    ct->d_args.data_size, ct->d_args.desc_ptr,
633 			    ct->d_args.desc_num);
634 			if (error)
635 				goto out;
636 			/* door_overflow sets d_args rbuf and rsize */
637 		} else {
638 			ct->d_args.rbuf = rbuf;
639 			ct->d_args.rsize = rsize;
640 		}
641 		goto results;
642 	}
643 
644 	/*
645 	 * Get a server thread from the target domain
646 	 */
647 	if ((server_thread = door_get_server(dp)) == NULL) {
648 		if (DOOR_INVALID(dp))
649 			error = EBADF;
650 		else
651 			error = EAGAIN;
652 		mutex_exit(&door_knob);
653 		goto out;
654 	}
655 
656 	st = DOOR_SERVER(server_thread->t_door);
657 	if (ct->d_args.desc_num || ct->d_args.data_size) {
658 		int is_private = (dp->door_flags & DOOR_PRIVATE);
659 		/*
660 		 * Move data from client to server
661 		 */
662 		DOOR_T_HOLD(st);
663 		mutex_exit(&door_knob);
664 		error = door_args(server_thread, is_private);
665 		mutex_enter(&door_knob);
666 		DOOR_T_RELEASE(st);
667 		if (error) {
668 			/*
669 			 * We're not going to resume this thread after all
670 			 */
671 			door_release_server(dp, server_thread);
672 			shuttle_sleep(server_thread);
673 			mutex_exit(&door_knob);
674 			goto out;
675 		}
676 	}
677 
678 	dp->door_active++;
679 	ct->d_error = DOOR_WAIT;
680 	st->d_caller = curthread;
681 	st->d_active = dp;
682 
683 	shuttle_resume(server_thread, &door_knob);
684 
685 	mutex_enter(&door_knob);
686 shuttle_return:
687 	if ((error = ct->d_error) < 0) {	/* DOOR_WAIT or DOOR_EXIT */
688 		/*
689 		 * Premature wakeup. Find out why (stop, forkall, sig, exit ...)
690 		 */
691 		mutex_exit(&door_knob);		/* May block in ISSIG */
692 		if (ISSIG(curthread, FORREAL) ||
693 		    lwp->lwp_sysabort || MUSTRETURN(curproc, curthread)) {
694 			/* Signal, forkall, ... */
695 			lwp->lwp_sysabort = 0;
696 			mutex_enter(&door_knob);
697 			error = EINTR;
698 			/*
699 			 * If the server has finished processing our call,
700 			 * or exited (calling door_slam()), then d_error
701 			 * will have changed.  If the server hasn't finished
702 			 * yet, d_error will still be DOOR_WAIT, and we
703 			 * let it know we are not interested in any
704 			 * results by sending a SIGCANCEL, unless the door
705 			 * is marked with DOOR_NO_CANCEL.
706 			 */
707 			if (ct->d_error == DOOR_WAIT &&
708 			    st->d_caller == curthread) {
709 				proc_t	*p = ttoproc(server_thread);
710 
711 				st->d_active = NULL;
712 				st->d_caller = NULL;
713 
714 				if (!(dp->door_flags & DOOR_NO_CANCEL)) {
715 					DOOR_T_HOLD(st);
716 					mutex_exit(&door_knob);
717 
718 					mutex_enter(&p->p_lock);
719 					sigtoproc(p, server_thread, SIGCANCEL);
720 					mutex_exit(&p->p_lock);
721 
722 					mutex_enter(&door_knob);
723 					DOOR_T_RELEASE(st);
724 				}
725 			}
726 		} else {
727 			/*
728 			 * Return from stop(), server exit...
729 			 *
730 			 * Note that the server could have done a
731 			 * door_return while the client was in stop state
732 			 * (ISSIG), in which case the error condition
733 			 * is updated by the server.
734 			 */
735 			mutex_enter(&door_knob);
736 			if (ct->d_error == DOOR_WAIT) {
737 				/* Still waiting for a reply */
738 				shuttle_swtch(&door_knob);
739 				mutex_enter(&door_knob);
740 				lwp->lwp_asleep = 0;
741 				goto	shuttle_return;
742 			} else if (ct->d_error == DOOR_EXIT) {
743 				/* Server exit */
744 				error = EINTR;
745 			} else {
746 				/* Server did a door_return during ISSIG */
747 				error = ct->d_error;
748 			}
749 		}
750 		/*
751 		 * Can't exit if the server is currently copying
752 		 * results for me.
753 		 */
754 		while (DOOR_T_HELD(ct))
755 			cv_wait(&ct->d_cv, &door_knob);
756 
757 		/*
758 		 * Find out if results were successfully copied.
759 		 */
760 		if (ct->d_error == 0)
761 			gotresults = 1;
762 	}
763 	lwp->lwp_asleep = 0;		/* /proc */
764 	lwp->lwp_sysabort = 0;		/* /proc */
765 	if (--dp->door_active == 0 && (dp->door_flags & DOOR_DELAY))
766 		door_deliver_unref(dp);
767 	mutex_exit(&door_knob);
768 
769 results:
770 	/*
771 	 * Move the results to userland (if any)
772 	 */
773 
774 	if (ct->d_noresults)
775 		goto out;
776 
777 	if (error) {
778 		/*
779 		 * If server returned results successfully, then we've
780 		 * been interrupted and may need to clean up.
781 		 */
782 		if (gotresults) {
783 			ASSERT(error == EINTR);
784 			door_fp_close(ct->d_fpp, ct->d_args.desc_num);
785 		}
786 		goto out;
787 	}
788 
789 	/*
790 	 * Copy back data if we haven't caused an overflow (already
791 	 * handled) and we are using a 2 copy transfer, or we are
792 	 * returning data from a kernel server.
793 	 */
794 	if (ct->d_args.data_size) {
795 		ct->d_args.data_ptr = ct->d_args.rbuf;
796 		if (ct->d_kernel || (!ct->d_overflow &&
797 		    ct->d_args.data_size <= door_max_arg)) {
798 			if (copyout(ct->d_buf, ct->d_args.rbuf,
799 			    ct->d_args.data_size)) {
800 				door_fp_close(ct->d_fpp, ct->d_args.desc_num);
801 				error = EFAULT;
802 				goto out;
803 			}
804 		}
805 	}
806 
807 	/*
808 	 * stuff returned doors into our proc, copyout the descriptors
809 	 */
810 	if (ct->d_args.desc_num) {
811 		struct file	**fpp;
812 		door_desc_t	*didpp;
813 		uint_t		n = ct->d_args.desc_num;
814 
815 		dsize = n * sizeof (door_desc_t);
816 		start = didpp = kmem_alloc(dsize, KM_SLEEP);
817 		fpp = ct->d_fpp;
818 
819 		while (n--) {
820 			if (door_insert(*fpp, didpp) == -1) {
821 				/* Close remaining files */
822 				door_fp_close(fpp, n + 1);
823 				error = EMFILE;
824 				goto out;
825 			}
826 			fpp++; didpp++; ncopied++;
827 		}
828 
829 		ct->d_args.desc_ptr = (door_desc_t *)(ct->d_args.rbuf +
830 		    roundup(ct->d_args.data_size, sizeof (door_desc_t)));
831 
832 		if (copyout(start, ct->d_args.desc_ptr, dsize)) {
833 			error = EFAULT;
834 			goto out;
835 		}
836 	}
837 
838 	/*
839 	 * Return the results
840 	 */
841 	if (datamodel == DATAMODEL_NATIVE) {
842 		if (copyout(&ct->d_args, args, sizeof (door_arg_t)) != 0)
843 			error = EFAULT;
844 	} else {
845 		door_arg32_t    da32;
846 
847 		da32.data_ptr = (caddr32_t)(uintptr_t)ct->d_args.data_ptr;
848 		da32.data_size = ct->d_args.data_size;
849 		da32.desc_ptr = (caddr32_t)(uintptr_t)ct->d_args.desc_ptr;
850 		da32.desc_num = ct->d_args.desc_num;
851 		da32.rbuf = (caddr32_t)(uintptr_t)ct->d_args.rbuf;
852 		da32.rsize = ct->d_args.rsize;
853 		if (copyout(&da32, args, sizeof (door_arg32_t)) != 0) {
854 			error = EFAULT;
855 		}
856 	}
857 
858 out:
859 	ct->d_noresults = 0;
860 
861 	/* clean up the overflow buffer if an error occurred */
862 	if (error != 0 && ct->d_overflow) {
863 		(void) as_unmap(curproc->p_as, ct->d_args.rbuf,
864 		    ct->d_args.rsize);
865 	}
866 	ct->d_overflow = 0;
867 
868 	/* call destructor */
869 	if (destfn) {
870 		ASSERT(ct->d_kernel);
871 		(*destfn)(dp->door_data, destarg);
872 		ct->d_buf = NULL;
873 		ct->d_bufsize = 0;
874 	}
875 
876 	if (dp)
877 		releasef(did);
878 
879 	if (ct->d_buf) {
880 		ASSERT(!ct->d_kernel);
881 		kmem_free(ct->d_buf, ct->d_bufsize);
882 		ct->d_buf = NULL;
883 		ct->d_bufsize = 0;
884 	}
885 	ct->d_kernel = 0;
886 
887 	/* clean up the descriptor copyout buffer */
888 	if (start != NULL) {
889 		if (error != 0)
890 			door_fd_close(start, ncopied);
891 		kmem_free(start, dsize);
892 	}
893 
894 	if (ct->d_fpp) {
895 		kmem_free(ct->d_fpp, ct->d_fpp_size);
896 		ct->d_fpp = NULL;
897 		ct->d_fpp_size = 0;
898 	}
899 
900 	if (error)
901 		return (set_errno(error));
902 
903 	return (0);
904 }
905 
906 static int
907 door_setparam_common(door_node_t *dp, int from_kernel, int type, size_t val)
908 {
909 	int error = 0;
910 
911 	mutex_enter(&door_knob);
912 
913 	if (DOOR_INVALID(dp)) {
914 		mutex_exit(&door_knob);
915 		return (EBADF);
916 	}
917 
918 	/*
919 	 * door_ki_setparam() can only affect kernel doors.
920 	 * door_setparam() can only affect doors attached to the current
921 	 * process.
922 	 */
923 	if ((from_kernel && dp->door_target != &p0) ||
924 	    (!from_kernel && dp->door_target != curproc)) {
925 		mutex_exit(&door_knob);
926 		return (EPERM);
927 	}
928 
929 	switch (type) {
930 	case DOOR_PARAM_DESC_MAX:
931 		if (val > INT_MAX)
932 			error = ERANGE;
933 		else if ((dp->door_flags & DOOR_REFUSE_DESC) && val != 0)
934 			error = ENOTSUP;
935 		else
936 			dp->door_desc_max = (uint_t)val;
937 		break;
938 
939 	case DOOR_PARAM_DATA_MIN:
940 		if (val > dp->door_data_max)
941 			error = EINVAL;
942 		else
943 			dp->door_data_min = val;
944 		break;
945 
946 	case DOOR_PARAM_DATA_MAX:
947 		if (val < dp->door_data_min)
948 			error = EINVAL;
949 		else
950 			dp->door_data_max = val;
951 		break;
952 
953 	default:
954 		error = EINVAL;
955 		break;
956 	}
957 
958 	mutex_exit(&door_knob);
959 	return (error);
960 }
961 
962 static int
963 door_getparam_common(door_node_t *dp, int type, size_t *out)
964 {
965 	int error = 0;
966 
967 	mutex_enter(&door_knob);
968 	switch (type) {
969 	case DOOR_PARAM_DESC_MAX:
970 		*out = (size_t)dp->door_desc_max;
971 		break;
972 	case DOOR_PARAM_DATA_MIN:
973 		*out = dp->door_data_min;
974 		break;
975 	case DOOR_PARAM_DATA_MAX:
976 		*out = dp->door_data_max;
977 		break;
978 	default:
979 		error = EINVAL;
980 		break;
981 	}
982 	mutex_exit(&door_knob);
983 	return (error);
984 }
985 
986 int
987 door_setparam(int did, int type, size_t val)
988 {
989 	door_node_t *dp;
990 	int error = 0;
991 
992 	if ((dp = door_lookup(did, NULL)) == NULL)
993 		return (set_errno(EBADF));
994 
995 	error = door_setparam_common(dp, 0, type, val);
996 
997 	releasef(did);
998 
999 	if (error)
1000 		return (set_errno(error));
1001 
1002 	return (0);
1003 }
1004 
1005 int
1006 door_getparam(int did, int type, size_t *out)
1007 {
1008 	door_node_t *dp;
1009 	size_t val = 0;
1010 	int error = 0;
1011 
1012 	if ((dp = door_lookup(did, NULL)) == NULL)
1013 		return (set_errno(EBADF));
1014 
1015 	error = door_getparam_common(dp, type, &val);
1016 
1017 	releasef(did);
1018 
1019 	if (error)
1020 		return (set_errno(error));
1021 
1022 	if (get_udatamodel() == DATAMODEL_NATIVE) {
1023 		if (copyout(&val, out, sizeof (val)))
1024 			return (set_errno(EFAULT));
1025 #ifdef _SYSCALL32_IMPL
1026 	} else {
1027 		size32_t val32 = (size32_t)val;
1028 
1029 		if (val != val32)
1030 			return (set_errno(EOVERFLOW));
1031 
1032 		if (copyout(&val32, out, sizeof (val32)))
1033 			return (set_errno(EFAULT));
1034 #endif /* _SYSCALL32_IMPL */
1035 	}
1036 
1037 	return (0);
1038 }
1039 
1040 /*
1041  * A copyout() which proceeds from high addresses to low addresses.  This way,
1042  * stack guard pages are effective.
1043  */
1044 static int
1045 door_stack_copyout(const void *kaddr, void *uaddr, size_t count)
1046 {
1047 	const char *kbase = (const char *)kaddr;
1048 	uintptr_t ubase = (uintptr_t)uaddr;
1049 	size_t pgsize = PAGESIZE;
1050 
1051 	if (count <= pgsize)
1052 		return (copyout(kaddr, uaddr, count));
1053 
1054 	while (count > 0) {
1055 		uintptr_t start, end, offset, amount;
1056 
1057 		end = ubase + count;
1058 		start = P2ALIGN(end - 1, pgsize);
1059 		if (P2ALIGN(ubase, pgsize) == start)
1060 			start = ubase;
1061 
1062 		offset = start - ubase;
1063 		amount = end - start;
1064 
1065 		ASSERT(amount > 0 && amount <= count && amount <= pgsize);
1066 
1067 		if (copyout(kbase + offset, (void *)start, amount))
1068 			return (1);
1069 		count -= amount;
1070 	}
1071 	return (0);
1072 }
1073 
1074 /*
1075  * Writes the stack layout for door_return() into the door_server_t of the
1076  * server thread.
1077  */
1078 static int
1079 door_layout(kthread_t *tp, size_t data_size, uint_t ndesc, int info_needed)
1080 {
1081 	door_server_t *st = DOOR_SERVER(tp->t_door);
1082 	door_layout_t *out = &st->d_layout;
1083 	uintptr_t base_sp = (uintptr_t)st->d_sp;
1084 	size_t ssize = st->d_ssize;
1085 	size_t descsz;
1086 	uintptr_t descp, datap, infop, resultsp, finalsp;
1087 	size_t align = STACK_ALIGN;
1088 	size_t results_sz = sizeof (struct door_results);
1089 	model_t datamodel = lwp_getdatamodel(ttolwp(tp));
1090 
1091 	ASSERT(!st->d_layout_done);
1092 
1093 #ifndef _STACK_GROWS_DOWNWARD
1094 #error stack does not grow downward, door_layout() must change
1095 #endif
1096 
1097 #ifdef _SYSCALL32_IMPL
1098 	if (datamodel != DATAMODEL_NATIVE) {
1099 		align = STACK_ALIGN32;
1100 		results_sz = sizeof (struct door_results32);
1101 	}
1102 #endif
1103 
1104 	descsz = ndesc * sizeof (door_desc_t);
1105 
1106 	/*
1107 	 * To speed up the overflow checking, we do an initial check
1108 	 * that the passed in data size won't cause us to wrap past
1109 	 * base_sp.  Since door_max_desc limits descsz, we can
1110 	 * safely use it here.  65535 is an arbitrary 'bigger than
1111 	 * we need, small enough to not cause trouble' constant;
1112 	 * the only constraint is that it must be > than:
1113 	 *
1114 	 *	5 * STACK_ALIGN +
1115 	 *	    sizeof (door_info_t) +
1116 	 *	    sizeof (door_results_t) +
1117 	 *	    (max adjustment from door_final_sp())
1118 	 *
1119 	 * After we compute the layout, we can safely do a "did we wrap
1120 	 * around" check, followed by a check against the recorded
1121 	 * stack size.
1122 	 */
1123 	if (data_size >= SIZE_MAX - (size_t)65535UL - descsz)
1124 		return (E2BIG);		/* overflow */
1125 
1126 	descp = P2ALIGN(base_sp - descsz, align);
1127 	datap = P2ALIGN(descp - data_size, align);
1128 
1129 	if (info_needed)
1130 		infop = P2ALIGN(datap - sizeof (door_info_t), align);
1131 	else
1132 		infop = datap;
1133 
1134 	resultsp = P2ALIGN(infop - results_sz, align);
1135 	finalsp = door_final_sp(resultsp, align, datamodel);
1136 
1137 	if (finalsp > base_sp)
1138 		return (E2BIG);		/* overflow */
1139 
1140 	if (ssize != 0 && (base_sp - finalsp) > ssize)
1141 		return (E2BIG);		/* doesn't fit in stack */
1142 
1143 	out->dl_descp = (ndesc != 0)? (caddr_t)descp : 0;
1144 	out->dl_datap = (data_size != 0)? (caddr_t)datap : 0;
1145 	out->dl_infop = info_needed? (caddr_t)infop : 0;
1146 	out->dl_resultsp = (caddr_t)resultsp;
1147 	out->dl_sp = (caddr_t)finalsp;
1148 
1149 	st->d_layout_done = 1;
1150 	return (0);
1151 }
1152 
1153 static int
1154 door_server_dispatch(door_client_t *ct, door_node_t *dp)
1155 {
1156 	door_server_t *st = DOOR_SERVER(curthread->t_door);
1157 	door_layout_t *layout = &st->d_layout;
1158 	int error = 0;
1159 
1160 	int is_private = (dp->door_flags & DOOR_PRIVATE);
1161 
1162 	door_pool_t *pool = (is_private)? &dp->door_servers :
1163 	    &curproc->p_server_threads;
1164 
1165 	int empty_pool = (pool->dp_threads == NULL);
1166 
1167 	caddr_t infop = NULL;
1168 	char *datap = NULL;
1169 	size_t datasize = 0;
1170 	size_t descsize;
1171 
1172 	file_t **fpp = ct->d_fpp;
1173 	door_desc_t *start = NULL;
1174 	uint_t ndesc = 0;
1175 	uint_t ncopied = 0;
1176 
1177 	if (ct != NULL) {
1178 		datap = ct->d_args.data_ptr;
1179 		datasize = ct->d_args.data_size;
1180 		ndesc = ct->d_args.desc_num;
1181 	}
1182 
1183 	descsize = ndesc * sizeof (door_desc_t);
1184 
1185 	/*
1186 	 * Reset datap to NULL if we aren't passing any data.  Be careful
1187 	 * to let unref notifications through, though.
1188 	 */
1189 	if (datap == DOOR_UNREF_DATA) {
1190 		if (ct->d_upcall)
1191 			datasize = 0;
1192 		else
1193 			datap = NULL;
1194 	} else if (datasize == 0) {
1195 		datap = NULL;
1196 	}
1197 
1198 	/*
1199 	 * Get the stack layout, if it hasn't already been done.
1200 	 */
1201 	if (!st->d_layout_done) {
1202 		error = door_layout(curthread, datasize, ndesc,
1203 		    (is_private && empty_pool));
1204 		if (error != 0)
1205 			goto fail;
1206 	}
1207 
1208 	/*
1209 	 * fill out the stack, starting from the top.  Layout was already
1210 	 * filled in by door_args() or door_translate_out().
1211 	 */
1212 	if (layout->dl_descp != NULL) {
1213 		ASSERT(ndesc != 0);
1214 		start = kmem_alloc(descsize, KM_SLEEP);
1215 
1216 		while (ndesc > 0) {
1217 			if (door_insert(*fpp, &start[ncopied]) == -1) {
1218 				error = EMFILE;
1219 				goto fail;
1220 			}
1221 			ndesc--;
1222 			ncopied++;
1223 			fpp++;
1224 		}
1225 		if (door_stack_copyout(start, layout->dl_descp, descsize)) {
1226 			error = E2BIG;
1227 			goto fail;
1228 		}
1229 	}
1230 	fpp = NULL;			/* finished processing */
1231 
1232 	if (layout->dl_datap != NULL) {
1233 		ASSERT(datasize != 0);
1234 		datap = layout->dl_datap;
1235 		if (ct->d_upcall || datasize <= door_max_arg) {
1236 			if (door_stack_copyout(ct->d_buf, datap, datasize)) {
1237 				error = E2BIG;
1238 				goto fail;
1239 			}
1240 		}
1241 	}
1242 
1243 	if (is_private && empty_pool) {
1244 		door_info_t di;
1245 
1246 		infop = layout->dl_infop;
1247 		ASSERT(infop != NULL);
1248 
1249 		di.di_target = curproc->p_pid;
1250 		di.di_proc = (door_ptr_t)(uintptr_t)dp->door_pc;
1251 		di.di_data = (door_ptr_t)(uintptr_t)dp->door_data;
1252 		di.di_uniquifier = dp->door_index;
1253 		di.di_attributes = (dp->door_flags & DOOR_ATTR_MASK) |
1254 		    DOOR_LOCAL;
1255 
1256 		if (copyout(&di, infop, sizeof (di))) {
1257 			error = E2BIG;
1258 			goto fail;
1259 		}
1260 	}
1261 
1262 	if (get_udatamodel() == DATAMODEL_NATIVE) {
1263 		struct door_results dr;
1264 
1265 		dr.cookie = dp->door_data;
1266 		dr.data_ptr = datap;
1267 		dr.data_size = datasize;
1268 		dr.desc_ptr = (door_desc_t *)layout->dl_descp;
1269 		dr.desc_num = ncopied;
1270 		dr.pc = dp->door_pc;
1271 		dr.nservers = !empty_pool;
1272 		dr.door_info = (door_info_t *)infop;
1273 
1274 		if (copyout(&dr, layout->dl_resultsp, sizeof (dr))) {
1275 			error = E2BIG;
1276 			goto fail;
1277 		}
1278 #ifdef _SYSCALL32_IMPL
1279 	} else {
1280 		struct door_results32 dr32;
1281 
1282 		dr32.cookie = (caddr32_t)(uintptr_t)dp->door_data;
1283 		dr32.data_ptr = (caddr32_t)(uintptr_t)datap;
1284 		dr32.data_size = (size32_t)datasize;
1285 		dr32.desc_ptr = (caddr32_t)(uintptr_t)layout->dl_descp;
1286 		dr32.desc_num = ncopied;
1287 		dr32.pc = (caddr32_t)(uintptr_t)dp->door_pc;
1288 		dr32.nservers = !empty_pool;
1289 		dr32.door_info = (caddr32_t)(uintptr_t)infop;
1290 
1291 		if (copyout(&dr32, layout->dl_resultsp, sizeof (dr32))) {
1292 			error = E2BIG;
1293 			goto fail;
1294 		}
1295 #endif
1296 	}
1297 
1298 	error = door_finish_dispatch(layout->dl_sp);
1299 fail:
1300 	if (start != NULL) {
1301 		if (error != 0)
1302 			door_fd_close(start, ncopied);
1303 		kmem_free(start, descsize);
1304 	}
1305 	if (fpp != NULL)
1306 		door_fp_close(fpp, ndesc);
1307 
1308 	return (error);
1309 }
1310 
1311 /*
1312  * Return the results (if any) to the caller (if any) and wait for the
1313  * next invocation on a door.
1314  */
1315 int
1316 door_return(caddr_t data_ptr, size_t data_size,
1317     door_desc_t *desc_ptr, uint_t desc_num, caddr_t sp, size_t ssize)
1318 {
1319 	kthread_t	*caller;
1320 	klwp_t		*lwp;
1321 	int		error = 0;
1322 	door_node_t	*dp;
1323 	door_server_t	*st;		/* curthread door_data */
1324 	door_client_t	*ct;		/* caller door_data */
1325 
1326 	st = door_my_server(1);
1327 
1328 	/*
1329 	 * If thread was bound to a door that no longer exists, return
1330 	 * an error.  This can happen if a thread is bound to a door
1331 	 * before the process calls forkall(); in the child, the door
1332 	 * doesn't exist and door_fork() sets the d_invbound flag.
1333 	 */
1334 	if (st->d_invbound)
1335 		return (set_errno(EINVAL));
1336 
1337 	st->d_sp = sp;			/* Save base of stack. */
1338 	st->d_ssize = ssize;		/* and its size */
1339 
1340 	/*
1341 	 * before we release our stack to the whims of our next caller,
1342 	 * copy in the syscall arguments if we're being traced by /proc.
1343 	 */
1344 	if (curthread->t_post_sys && PTOU(ttoproc(curthread))->u_systrap)
1345 		(void) save_syscall_args();
1346 
1347 	/* Make sure the caller hasn't gone away */
1348 	mutex_enter(&door_knob);
1349 	if ((caller = st->d_caller) == NULL || caller->t_door == NULL) {
1350 		if (desc_num != 0) {
1351 			/* close any DOOR_RELEASE descriptors */
1352 			mutex_exit(&door_knob);
1353 			error = door_release_fds(desc_ptr, desc_num);
1354 			if (error)
1355 				return (set_errno(error));
1356 			mutex_enter(&door_knob);
1357 		}
1358 		goto out;
1359 	}
1360 	ct = DOOR_CLIENT(caller->t_door);
1361 
1362 	ct->d_args.data_size = data_size;
1363 	ct->d_args.desc_num = desc_num;
1364 	/*
1365 	 * Transfer results, if any, to the client
1366 	 */
1367 	if (data_size != 0 || desc_num != 0) {
1368 		/*
1369 		 * Prevent the client from exiting until we have finished
1370 		 * moving results.
1371 		 */
1372 		DOOR_T_HOLD(ct);
1373 		mutex_exit(&door_knob);
1374 		error = door_results(caller, data_ptr, data_size,
1375 		    desc_ptr, desc_num);
1376 		mutex_enter(&door_knob);
1377 		DOOR_T_RELEASE(ct);
1378 		/*
1379 		 * Pass EOVERFLOW errors back to the client
1380 		 */
1381 		if (error && error != EOVERFLOW) {
1382 			mutex_exit(&door_knob);
1383 			return (set_errno(error));
1384 		}
1385 	}
1386 out:
1387 	/* Put ourselves on the available server thread list */
1388 	door_release_server(st->d_pool, curthread);
1389 
1390 	/*
1391 	 * Make sure the caller is still waiting to be resumed
1392 	 */
1393 	if (caller) {
1394 		disp_lock_t *tlp;
1395 
1396 		thread_lock(caller);
1397 		ct->d_error = error;		/* Return any errors */
1398 		if (caller->t_state == TS_SLEEP &&
1399 		    SOBJ_TYPE(caller->t_sobj_ops) == SOBJ_SHUTTLE) {
1400 			cpu_t *cp = CPU;
1401 
1402 			tlp = caller->t_lockp;
1403 			/*
1404 			 * Setting t_disp_queue prevents erroneous preemptions
1405 			 * if this thread is still in execution on another
1406 			 * processor
1407 			 */
1408 			caller->t_disp_queue = cp->cpu_disp;
1409 			CL_ACTIVE(caller);
1410 			/*
1411 			 * We are calling thread_onproc() instead of
1412 			 * THREAD_ONPROC() because compiler can reorder
1413 			 * the two stores of t_state and t_lockp in
1414 			 * THREAD_ONPROC().
1415 			 */
1416 			thread_onproc(caller, cp);
1417 			disp_lock_exit_high(tlp);
1418 			shuttle_resume(caller, &door_knob);
1419 		} else {
1420 			/* May have been setrun or in stop state */
1421 			thread_unlock(caller);
1422 			shuttle_swtch(&door_knob);
1423 		}
1424 	} else {
1425 		shuttle_swtch(&door_knob);
1426 	}
1427 
1428 	/*
1429 	 * We've sprung to life. Determine if we are part of a door
1430 	 * invocation, or just interrupted
1431 	 */
1432 	lwp = ttolwp(curthread);
1433 	mutex_enter(&door_knob);
1434 	if ((dp = st->d_active) != NULL) {
1435 		/*
1436 		 * Normal door invocation. Return any error condition
1437 		 * encountered while trying to pass args to the server
1438 		 * thread.
1439 		 */
1440 		lwp->lwp_asleep = 0;
1441 		/*
1442 		 * Prevent the caller from leaving us while we
1443 		 * are copying out the arguments from it's buffer.
1444 		 */
1445 		ASSERT(st->d_caller != NULL);
1446 		ct = DOOR_CLIENT(st->d_caller->t_door);
1447 
1448 		DOOR_T_HOLD(ct);
1449 		mutex_exit(&door_knob);
1450 		error = door_server_dispatch(ct, dp);
1451 		mutex_enter(&door_knob);
1452 		DOOR_T_RELEASE(ct);
1453 
1454 		if (error) {
1455 			caller = st->d_caller;
1456 			if (caller)
1457 				ct = DOOR_CLIENT(caller->t_door);
1458 			else
1459 				ct = NULL;
1460 			goto out;
1461 		}
1462 		mutex_exit(&door_knob);
1463 		return (0);
1464 	} else {
1465 		/*
1466 		 * We are not involved in a door_invocation.
1467 		 * Check for /proc related activity...
1468 		 */
1469 		st->d_caller = NULL;
1470 		door_server_exit(curproc, curthread);
1471 		mutex_exit(&door_knob);
1472 		if (ISSIG(curthread, FORREAL) ||
1473 		    lwp->lwp_sysabort || MUSTRETURN(curproc, curthread)) {
1474 			lwp->lwp_asleep = 0;
1475 			lwp->lwp_sysabort = 0;
1476 			return (set_errno(EINTR));
1477 		}
1478 		/* Go back and wait for another request */
1479 		lwp->lwp_asleep = 0;
1480 		mutex_enter(&door_knob);
1481 		caller = NULL;
1482 		goto out;
1483 	}
1484 }
1485 
1486 /*
1487  * Revoke any future invocations on this door
1488  */
1489 int
1490 door_revoke(int did)
1491 {
1492 	door_node_t	*d;
1493 	int		error;
1494 
1495 	if ((d = door_lookup(did, NULL)) == NULL)
1496 		return (set_errno(EBADF));
1497 
1498 	mutex_enter(&door_knob);
1499 	if (d->door_target != curproc) {
1500 		mutex_exit(&door_knob);
1501 		releasef(did);
1502 		return (set_errno(EPERM));
1503 	}
1504 	d->door_flags |= DOOR_REVOKED;
1505 	if (d->door_flags & DOOR_PRIVATE)
1506 		cv_broadcast(&d->door_servers.dp_cv);
1507 	else
1508 		cv_broadcast(&curproc->p_server_threads.dp_cv);
1509 	mutex_exit(&door_knob);
1510 	releasef(did);
1511 	/* Invalidate the descriptor */
1512 	if ((error = closeandsetf(did, NULL)) != 0)
1513 		return (set_errno(error));
1514 	return (0);
1515 }
1516 
1517 int
1518 door_info(int did, struct door_info *d_info)
1519 {
1520 	door_node_t	*dp;
1521 	door_info_t	di;
1522 	door_server_t	*st;
1523 	file_t		*fp = NULL;
1524 
1525 	if (did == DOOR_QUERY) {
1526 		/* Get information on door current thread is bound to */
1527 		if ((st = door_my_server(0)) == NULL ||
1528 		    (dp = st->d_pool) == NULL)
1529 			/* Thread isn't bound to a door */
1530 			return (set_errno(EBADF));
1531 	} else if ((dp = door_lookup(did, &fp)) == NULL) {
1532 		/* Not a door */
1533 		return (set_errno(EBADF));
1534 	}
1535 
1536 	door_info_common(dp, &di, fp);
1537 
1538 	if (did != DOOR_QUERY)
1539 		releasef(did);
1540 
1541 	if (copyout(&di, d_info, sizeof (struct door_info)))
1542 		return (set_errno(EFAULT));
1543 	return (0);
1544 }
1545 
1546 /*
1547  * Common code for getting information about a door either via the
1548  * door_info system call or the door_ki_info kernel call.
1549  */
1550 void
1551 door_info_common(door_node_t *dp, struct door_info *dip, file_t *fp)
1552 {
1553 	int unref_count;
1554 
1555 	bzero(dip, sizeof (door_info_t));
1556 
1557 	mutex_enter(&door_knob);
1558 	if (dp->door_target == NULL)
1559 		dip->di_target = -1;
1560 	else
1561 		dip->di_target = dp->door_target->p_pid;
1562 
1563 	dip->di_attributes = dp->door_flags & DOOR_ATTR_MASK;
1564 	if (dp->door_target == curproc)
1565 		dip->di_attributes |= DOOR_LOCAL;
1566 	dip->di_proc = (door_ptr_t)(uintptr_t)dp->door_pc;
1567 	dip->di_data = (door_ptr_t)(uintptr_t)dp->door_data;
1568 	dip->di_uniquifier = dp->door_index;
1569 	/*
1570 	 * If this door is in the middle of having an unreferenced
1571 	 * notification delivered, don't count the VN_HOLD by
1572 	 * door_deliver_unref in determining if it is unreferenced.
1573 	 * This handles the case where door_info is called from the
1574 	 * thread delivering the unref notification.
1575 	 */
1576 	if (dp->door_flags & DOOR_UNREF_ACTIVE)
1577 		unref_count = 2;
1578 	else
1579 		unref_count = 1;
1580 	mutex_exit(&door_knob);
1581 
1582 	if (fp == NULL) {
1583 		/*
1584 		 * If this thread is bound to the door, then we can just
1585 		 * check the vnode; a ref count of 1 (or 2 if this is
1586 		 * handling an unref notification) means that the hold
1587 		 * from the door_bind is the only reference to the door
1588 		 * (no file descriptor refers to it).
1589 		 */
1590 		if (DTOV(dp)->v_count == unref_count)
1591 			dip->di_attributes |= DOOR_IS_UNREF;
1592 	} else {
1593 		/*
1594 		 * If we're working from a file descriptor or door handle
1595 		 * we need to look at the file structure count.  We don't
1596 		 * need to hold the vnode lock since this is just a snapshot.
1597 		 */
1598 		mutex_enter(&fp->f_tlock);
1599 		if (fp->f_count == 1 && DTOV(dp)->v_count == unref_count)
1600 			dip->di_attributes |= DOOR_IS_UNREF;
1601 		mutex_exit(&fp->f_tlock);
1602 	}
1603 }
1604 
1605 /*
1606  * Return credentials of the door caller (if any) for this invocation
1607  */
1608 int
1609 door_ucred(struct ucred_s *uch)
1610 {
1611 	kthread_t	*caller;
1612 	door_server_t	*st;
1613 	door_client_t	*ct;
1614 	struct proc	*p;
1615 	struct ucred_s	*res;
1616 	int		err;
1617 
1618 	mutex_enter(&door_knob);
1619 	if ((st = door_my_server(0)) == NULL ||
1620 	    (caller = st->d_caller) == NULL) {
1621 		mutex_exit(&door_knob);
1622 		return (set_errno(EINVAL));
1623 	}
1624 
1625 	ASSERT(caller->t_door != NULL);
1626 	ct = DOOR_CLIENT(caller->t_door);
1627 
1628 	/* Prevent caller from exiting while we examine the cred */
1629 	DOOR_T_HOLD(ct);
1630 	mutex_exit(&door_knob);
1631 
1632 	/* Get the credentials of the calling process */
1633 	p = ttoproc(caller);
1634 
1635 	res = pgetucred(p);
1636 
1637 	mutex_enter(&door_knob);
1638 	DOOR_T_RELEASE(ct);
1639 	mutex_exit(&door_knob);
1640 
1641 	err = copyout(res, uch, res->uc_size);
1642 
1643 	kmem_free(res, res->uc_size);
1644 
1645 	if (err != 0)
1646 		return (set_errno(EFAULT));
1647 
1648 	return (0);
1649 }
1650 
1651 /*
1652  * Bind the current lwp to the server thread pool associated with 'did'
1653  */
1654 int
1655 door_bind(int did)
1656 {
1657 	door_node_t	*dp;
1658 	door_server_t	*st;
1659 
1660 	if ((dp = door_lookup(did, NULL)) == NULL) {
1661 		/* Not a door */
1662 		return (set_errno(EBADF));
1663 	}
1664 
1665 	/*
1666 	 * Can't bind to a non-private door, and can't bind to a door
1667 	 * served by another process.
1668 	 */
1669 	if ((dp->door_flags & DOOR_PRIVATE) == 0 ||
1670 	    dp->door_target != curproc) {
1671 		releasef(did);
1672 		return (set_errno(EINVAL));
1673 	}
1674 
1675 	st = door_my_server(1);
1676 	if (st->d_pool)
1677 		door_unbind_thread(st->d_pool);
1678 	st->d_pool = dp;
1679 	st->d_invbound = 0;
1680 	door_bind_thread(dp);
1681 	releasef(did);
1682 
1683 	return (0);
1684 }
1685 
1686 /*
1687  * Unbind the current lwp from it's server thread pool
1688  */
1689 int
1690 door_unbind(void)
1691 {
1692 	door_server_t *st;
1693 
1694 	if ((st = door_my_server(0)) == NULL)
1695 		return (set_errno(EBADF));
1696 
1697 	if (st->d_invbound) {
1698 		ASSERT(st->d_pool == NULL);
1699 		st->d_invbound = 0;
1700 		return (0);
1701 	}
1702 	if (st->d_pool == NULL)
1703 		return (set_errno(EBADF));
1704 	door_unbind_thread(st->d_pool);
1705 	st->d_pool = NULL;
1706 	return (0);
1707 }
1708 
1709 /*
1710  * Create a descriptor for the associated file and fill in the
1711  * attributes associated with it.
1712  *
1713  * Return 0 for success, -1 otherwise;
1714  */
1715 int
1716 door_insert(struct file *fp, door_desc_t *dp)
1717 {
1718 	struct vnode *vp;
1719 	int	fd;
1720 	door_attr_t attributes = DOOR_DESCRIPTOR;
1721 
1722 	ASSERT(MUTEX_NOT_HELD(&door_knob));
1723 	if ((fd = ufalloc(0)) == -1)
1724 		return (-1);
1725 	setf(fd, fp);
1726 	dp->d_data.d_desc.d_descriptor = fd;
1727 
1728 	/* Fill in the attributes */
1729 	if (VOP_REALVP(fp->f_vnode, &vp))
1730 		vp = fp->f_vnode;
1731 	if (vp && vp->v_type == VDOOR) {
1732 		if (VTOD(vp)->door_target == curproc)
1733 			attributes |= DOOR_LOCAL;
1734 		attributes |= VTOD(vp)->door_flags & DOOR_ATTR_MASK;
1735 		dp->d_data.d_desc.d_id = VTOD(vp)->door_index;
1736 	}
1737 	dp->d_attributes = attributes;
1738 	return (0);
1739 }
1740 
1741 /*
1742  * Return an available thread for this server.  A NULL return value indicates
1743  * that either:
1744  *	The door has been revoked, or
1745  *	a signal was received.
1746  * The two conditions can be differentiated using DOOR_INVALID(dp).
1747  */
1748 static kthread_t *
1749 door_get_server(door_node_t *dp)
1750 {
1751 	kthread_t **ktp;
1752 	kthread_t *server_t;
1753 	door_pool_t *pool;
1754 	door_server_t *st;
1755 	int signalled;
1756 
1757 	disp_lock_t *tlp;
1758 	cpu_t *cp;
1759 
1760 	ASSERT(MUTEX_HELD(&door_knob));
1761 
1762 	if (dp->door_flags & DOOR_PRIVATE)
1763 		pool = &dp->door_servers;
1764 	else
1765 		pool = &dp->door_target->p_server_threads;
1766 
1767 	for (;;) {
1768 		/*
1769 		 * We search the thread pool, looking for a server thread
1770 		 * ready to take an invocation (i.e. one which is still
1771 		 * sleeping on a shuttle object).  If none are available,
1772 		 * we sleep on the pool's CV, and will be signaled when a
1773 		 * thread is added to the pool.
1774 		 *
1775 		 * This relies on the fact that once a thread in the thread
1776 		 * pool wakes up, it *must* remove and add itself to the pool
1777 		 * before it can receive door calls.
1778 		 */
1779 		if (DOOR_INVALID(dp))
1780 			return (NULL);	/* Target has become invalid */
1781 
1782 		for (ktp = &pool->dp_threads;
1783 		    (server_t = *ktp) != NULL;
1784 		    ktp = &st->d_servers) {
1785 			st = DOOR_SERVER(server_t->t_door);
1786 
1787 			thread_lock(server_t);
1788 			if (server_t->t_state == TS_SLEEP &&
1789 			    SOBJ_TYPE(server_t->t_sobj_ops) == SOBJ_SHUTTLE)
1790 				break;
1791 			thread_unlock(server_t);
1792 		}
1793 		if (server_t != NULL)
1794 			break;		/* we've got a live one! */
1795 
1796 		if (!cv_wait_sig_swap_core(&pool->dp_cv, &door_knob,
1797 		    &signalled)) {
1798 			/*
1799 			 * If we were signalled and the door is still
1800 			 * valid, pass the signal on to another waiter.
1801 			 */
1802 			if (signalled && !DOOR_INVALID(dp))
1803 				cv_signal(&pool->dp_cv);
1804 			return (NULL);	/* Got a signal */
1805 		}
1806 	}
1807 
1808 	/*
1809 	 * We've got a thread_lock()ed thread which is still on the
1810 	 * shuttle.  Take it off the list of available server threads
1811 	 * and mark it as ONPROC.  We are committed to resuming this
1812 	 * thread now.
1813 	 */
1814 	tlp = server_t->t_lockp;
1815 	cp = CPU;
1816 
1817 	*ktp = st->d_servers;
1818 	st->d_servers = NULL;
1819 	/*
1820 	 * Setting t_disp_queue prevents erroneous preemptions
1821 	 * if this thread is still in execution on another processor
1822 	 */
1823 	server_t->t_disp_queue = cp->cpu_disp;
1824 	CL_ACTIVE(server_t);
1825 	/*
1826 	 * We are calling thread_onproc() instead of
1827 	 * THREAD_ONPROC() because compiler can reorder
1828 	 * the two stores of t_state and t_lockp in
1829 	 * THREAD_ONPROC().
1830 	 */
1831 	thread_onproc(server_t, cp);
1832 	disp_lock_exit(tlp);
1833 	return (server_t);
1834 }
1835 
1836 /*
1837  * Put a server thread back in the pool.
1838  */
1839 static void
1840 door_release_server(door_node_t *dp, kthread_t *t)
1841 {
1842 	door_server_t *st = DOOR_SERVER(t->t_door);
1843 	door_pool_t *pool;
1844 
1845 	ASSERT(MUTEX_HELD(&door_knob));
1846 	st->d_active = NULL;
1847 	st->d_caller = NULL;
1848 	st->d_layout_done = 0;
1849 	if (dp && (dp->door_flags & DOOR_PRIVATE)) {
1850 		ASSERT(dp->door_target == NULL ||
1851 		    dp->door_target == ttoproc(t));
1852 		pool = &dp->door_servers;
1853 	} else {
1854 		pool = &ttoproc(t)->p_server_threads;
1855 	}
1856 
1857 	st->d_servers = pool->dp_threads;
1858 	pool->dp_threads = t;
1859 
1860 	/* If someone is waiting for a server thread, wake him up */
1861 	cv_signal(&pool->dp_cv);
1862 }
1863 
1864 /*
1865  * Remove a server thread from the pool if present.
1866  */
1867 static void
1868 door_server_exit(proc_t *p, kthread_t *t)
1869 {
1870 	door_pool_t *pool;
1871 	kthread_t **next;
1872 	door_server_t *st = DOOR_SERVER(t->t_door);
1873 
1874 	ASSERT(MUTEX_HELD(&door_knob));
1875 	if (st->d_pool != NULL) {
1876 		ASSERT(st->d_pool->door_flags & DOOR_PRIVATE);
1877 		pool = &st->d_pool->door_servers;
1878 	} else {
1879 		pool = &p->p_server_threads;
1880 	}
1881 
1882 	next = &pool->dp_threads;
1883 	while (*next != NULL) {
1884 		if (*next == t) {
1885 			*next = DOOR_SERVER(t->t_door)->d_servers;
1886 			return;
1887 		}
1888 		next = &(DOOR_SERVER((*next)->t_door)->d_servers);
1889 	}
1890 }
1891 
1892 /*
1893  * Lookup the door descriptor. Caller must call releasef when finished
1894  * with associated door.
1895  */
1896 static door_node_t *
1897 door_lookup(int did, file_t **fpp)
1898 {
1899 	vnode_t	*vp;
1900 	file_t *fp;
1901 
1902 	ASSERT(MUTEX_NOT_HELD(&door_knob));
1903 	if ((fp = getf(did)) == NULL)
1904 		return (NULL);
1905 	/*
1906 	 * Use the underlying vnode (we may be namefs mounted)
1907 	 */
1908 	if (VOP_REALVP(fp->f_vnode, &vp))
1909 		vp = fp->f_vnode;
1910 
1911 	if (vp == NULL || vp->v_type != VDOOR) {
1912 		releasef(did);
1913 		return (NULL);
1914 	}
1915 
1916 	if (fpp)
1917 		*fpp = fp;
1918 
1919 	return (VTOD(vp));
1920 }
1921 
1922 /*
1923  * The current thread is exiting, so clean up any pending
1924  * invocation details
1925  */
1926 void
1927 door_slam(void)
1928 {
1929 	door_node_t *dp;
1930 	door_data_t *dt;
1931 	door_client_t *ct;
1932 	door_server_t *st;
1933 
1934 	/*
1935 	 * If we are an active door server, notify our
1936 	 * client that we are exiting and revoke our door.
1937 	 */
1938 	if ((dt = door_my_data(0)) == NULL)
1939 		return;
1940 	ct = DOOR_CLIENT(dt);
1941 	st = DOOR_SERVER(dt);
1942 
1943 	mutex_enter(&door_knob);
1944 	for (;;) {
1945 		if (DOOR_T_HELD(ct))
1946 			cv_wait(&ct->d_cv, &door_knob);
1947 		else if (DOOR_T_HELD(st))
1948 			cv_wait(&st->d_cv, &door_knob);
1949 		else
1950 			break;			/* neither flag is set */
1951 	}
1952 	curthread->t_door = NULL;
1953 	if ((dp = st->d_active) != NULL) {
1954 		kthread_t *t = st->d_caller;
1955 		proc_t *p = curproc;
1956 
1957 		/* Revoke our door if the process is exiting */
1958 		if (dp->door_target == p && (p->p_flag & SEXITING)) {
1959 			door_list_delete(dp);
1960 			dp->door_target = NULL;
1961 			dp->door_flags |= DOOR_REVOKED;
1962 			if (dp->door_flags & DOOR_PRIVATE)
1963 				cv_broadcast(&dp->door_servers.dp_cv);
1964 			else
1965 				cv_broadcast(&p->p_server_threads.dp_cv);
1966 		}
1967 
1968 		if (t != NULL) {
1969 			/*
1970 			 * Let the caller know we are gone
1971 			 */
1972 			DOOR_CLIENT(t->t_door)->d_error = DOOR_EXIT;
1973 			thread_lock(t);
1974 			if (t->t_state == TS_SLEEP &&
1975 			    SOBJ_TYPE(t->t_sobj_ops) == SOBJ_SHUTTLE)
1976 				setrun_locked(t);
1977 			thread_unlock(t);
1978 		}
1979 	}
1980 	mutex_exit(&door_knob);
1981 	if (st->d_pool)
1982 		door_unbind_thread(st->d_pool);	/* Implicit door_unbind */
1983 	kmem_free(dt, sizeof (door_data_t));
1984 }
1985 
1986 /*
1987  * Set DOOR_REVOKED for all doors of the current process. This is called
1988  * on exit before all lwp's are being terminated so that door calls will
1989  * return with an error.
1990  */
1991 void
1992 door_revoke_all()
1993 {
1994 	door_node_t *dp;
1995 	proc_t *p = ttoproc(curthread);
1996 
1997 	mutex_enter(&door_knob);
1998 	for (dp = p->p_door_list; dp != NULL; dp = dp->door_list) {
1999 		ASSERT(dp->door_target == p);
2000 		dp->door_flags |= DOOR_REVOKED;
2001 		if (dp->door_flags & DOOR_PRIVATE)
2002 			cv_broadcast(&dp->door_servers.dp_cv);
2003 	}
2004 	cv_broadcast(&p->p_server_threads.dp_cv);
2005 	mutex_exit(&door_knob);
2006 }
2007 
2008 /*
2009  * The process is exiting, and all doors it created need to be revoked.
2010  */
2011 void
2012 door_exit(void)
2013 {
2014 	door_node_t *dp;
2015 	proc_t *p = ttoproc(curthread);
2016 
2017 	ASSERT(p->p_lwpcnt == 1);
2018 	/*
2019 	 * Walk the list of active doors created by this process and
2020 	 * revoke them all.
2021 	 */
2022 	mutex_enter(&door_knob);
2023 	for (dp = p->p_door_list; dp != NULL; dp = dp->door_list) {
2024 		dp->door_target = NULL;
2025 		dp->door_flags |= DOOR_REVOKED;
2026 		if (dp->door_flags & DOOR_PRIVATE)
2027 			cv_broadcast(&dp->door_servers.dp_cv);
2028 	}
2029 	cv_broadcast(&p->p_server_threads.dp_cv);
2030 	/* Clear the list */
2031 	p->p_door_list = NULL;
2032 
2033 	/* Clean up the unref list */
2034 	while ((dp = p->p_unref_list) != NULL) {
2035 		p->p_unref_list = dp->door_ulist;
2036 		dp->door_ulist = NULL;
2037 		mutex_exit(&door_knob);
2038 		VN_RELE(DTOV(dp));
2039 		mutex_enter(&door_knob);
2040 	}
2041 	mutex_exit(&door_knob);
2042 }
2043 
2044 
2045 /*
2046  * The process is executing forkall(), and we need to flag threads that
2047  * are bound to a door in the child.  This will make the child threads
2048  * return an error to door_return unless they call door_unbind first.
2049  */
2050 void
2051 door_fork(kthread_t *parent, kthread_t *child)
2052 {
2053 	door_data_t *pt = parent->t_door;
2054 	door_server_t *st = DOOR_SERVER(pt);
2055 	door_data_t *dt;
2056 
2057 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2058 	if (pt != NULL && (st->d_pool != NULL || st->d_invbound)) {
2059 		/* parent thread is bound to a door */
2060 		dt = child->t_door =
2061 		    kmem_zalloc(sizeof (door_data_t), KM_SLEEP);
2062 		DOOR_SERVER(dt)->d_invbound = 1;
2063 	}
2064 }
2065 
2066 /*
2067  * Deliver queued unrefs to appropriate door server.
2068  */
2069 static int
2070 door_unref(void)
2071 {
2072 	door_node_t	*dp;
2073 	static door_arg_t unref_args = { DOOR_UNREF_DATA, 0, 0, 0, 0, 0 };
2074 	proc_t *p = ttoproc(curthread);
2075 
2076 	/* make sure there's only one unref thread per process */
2077 	mutex_enter(&door_knob);
2078 	if (p->p_unref_thread) {
2079 		mutex_exit(&door_knob);
2080 		return (set_errno(EALREADY));
2081 	}
2082 	p->p_unref_thread = 1;
2083 	mutex_exit(&door_knob);
2084 
2085 	(void) door_my_data(1);			/* create info, if necessary */
2086 
2087 	for (;;) {
2088 		mutex_enter(&door_knob);
2089 
2090 		/* Grab a queued request */
2091 		while ((dp = p->p_unref_list) == NULL) {
2092 			if (!cv_wait_sig(&p->p_unref_cv, &door_knob)) {
2093 				/*
2094 				 * Interrupted.
2095 				 * Return so we can finish forkall() or exit().
2096 				 */
2097 				p->p_unref_thread = 0;
2098 				mutex_exit(&door_knob);
2099 				return (set_errno(EINTR));
2100 			}
2101 		}
2102 		p->p_unref_list = dp->door_ulist;
2103 		dp->door_ulist = NULL;
2104 		dp->door_flags |= DOOR_UNREF_ACTIVE;
2105 		mutex_exit(&door_knob);
2106 
2107 		(void) door_upcall(DTOV(dp), &unref_args);
2108 
2109 		mutex_enter(&door_knob);
2110 		ASSERT(dp->door_flags & DOOR_UNREF_ACTIVE);
2111 		dp->door_flags &= ~DOOR_UNREF_ACTIVE;
2112 		mutex_exit(&door_knob);
2113 		VN_RELE(DTOV(dp));
2114 	}
2115 }
2116 
2117 
2118 /*
2119  * Deliver queued unrefs to kernel door server.
2120  */
2121 /* ARGSUSED */
2122 static void
2123 door_unref_kernel(caddr_t arg)
2124 {
2125 	door_node_t	*dp;
2126 	static door_arg_t unref_args = { DOOR_UNREF_DATA, 0, 0, 0, 0, 0 };
2127 	proc_t *p = ttoproc(curthread);
2128 	callb_cpr_t cprinfo;
2129 
2130 	/* should only be one of these */
2131 	mutex_enter(&door_knob);
2132 	if (p->p_unref_thread) {
2133 		mutex_exit(&door_knob);
2134 		return;
2135 	}
2136 	p->p_unref_thread = 1;
2137 	mutex_exit(&door_knob);
2138 
2139 	(void) door_my_data(1);		/* make sure we have a door_data_t */
2140 
2141 	CALLB_CPR_INIT(&cprinfo, &door_knob, callb_generic_cpr, "door_unref");
2142 	for (;;) {
2143 		mutex_enter(&door_knob);
2144 		/* Grab a queued request */
2145 		while ((dp = p->p_unref_list) == NULL) {
2146 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
2147 			cv_wait(&p->p_unref_cv, &door_knob);
2148 			CALLB_CPR_SAFE_END(&cprinfo, &door_knob);
2149 		}
2150 		p->p_unref_list = dp->door_ulist;
2151 		dp->door_ulist = NULL;
2152 		dp->door_flags |= DOOR_UNREF_ACTIVE;
2153 		mutex_exit(&door_knob);
2154 
2155 		(*(dp->door_pc))(dp->door_data, &unref_args, NULL, NULL, NULL);
2156 
2157 		mutex_enter(&door_knob);
2158 		ASSERT(dp->door_flags & DOOR_UNREF_ACTIVE);
2159 		dp->door_flags &= ~DOOR_UNREF_ACTIVE;
2160 		mutex_exit(&door_knob);
2161 		VN_RELE(DTOV(dp));
2162 	}
2163 }
2164 
2165 
2166 /*
2167  * Queue an unref invocation for processing for the current process
2168  * The door may or may not be revoked at this point.
2169  */
2170 void
2171 door_deliver_unref(door_node_t *d)
2172 {
2173 	struct proc *server = d->door_target;
2174 
2175 	ASSERT(MUTEX_HELD(&door_knob));
2176 	ASSERT(d->door_active == 0);
2177 
2178 	if (server == NULL)
2179 		return;
2180 	/*
2181 	 * Create a lwp to deliver unref calls if one isn't already running.
2182 	 *
2183 	 * A separate thread is used to deliver unrefs since the current
2184 	 * thread may be holding resources (e.g. locks) in user land that
2185 	 * may be needed by the unref processing. This would cause a
2186 	 * deadlock.
2187 	 */
2188 	if (d->door_flags & DOOR_UNREF_MULTI) {
2189 		/* multiple unrefs */
2190 		d->door_flags &= ~DOOR_DELAY;
2191 	} else {
2192 		/* Only 1 unref per door */
2193 		d->door_flags &= ~(DOOR_UNREF|DOOR_DELAY);
2194 	}
2195 	mutex_exit(&door_knob);
2196 
2197 	/*
2198 	 * Need to bump the vnode count before putting the door on the
2199 	 * list so it doesn't get prematurely released by door_unref.
2200 	 */
2201 	VN_HOLD(DTOV(d));
2202 
2203 	mutex_enter(&door_knob);
2204 	/* is this door already on the unref list? */
2205 	if (d->door_flags & DOOR_UNREF_MULTI) {
2206 		door_node_t *dp;
2207 		for (dp = server->p_unref_list; dp != NULL;
2208 		    dp = dp->door_ulist) {
2209 			if (d == dp) {
2210 				/* already there, don't need to add another */
2211 				mutex_exit(&door_knob);
2212 				VN_RELE(DTOV(d));
2213 				mutex_enter(&door_knob);
2214 				return;
2215 			}
2216 		}
2217 	}
2218 	ASSERT(d->door_ulist == NULL);
2219 	d->door_ulist = server->p_unref_list;
2220 	server->p_unref_list = d;
2221 	cv_broadcast(&server->p_unref_cv);
2222 }
2223 
2224 /*
2225  * The callers buffer isn't big enough for all of the data/fd's. Allocate
2226  * space in the callers address space for the results and copy the data
2227  * there.
2228  *
2229  * For EOVERFLOW, we must clean up the server's door descriptors.
2230  */
2231 static int
2232 door_overflow(
2233 	kthread_t	*caller,
2234 	caddr_t		data_ptr,	/* data location */
2235 	size_t		data_size,	/* data size */
2236 	door_desc_t	*desc_ptr,	/* descriptor location */
2237 	uint_t		desc_num)	/* descriptor size */
2238 {
2239 	proc_t *callerp = ttoproc(caller);
2240 	struct as *as = callerp->p_as;
2241 	door_client_t *ct = DOOR_CLIENT(caller->t_door);
2242 	caddr_t	addr;			/* Resulting address in target */
2243 	size_t	rlen;			/* Rounded len */
2244 	size_t	len;
2245 	uint_t	i;
2246 	size_t	ds = desc_num * sizeof (door_desc_t);
2247 
2248 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2249 	ASSERT(DOOR_T_HELD(ct) || ct->d_kernel);
2250 
2251 	/* Do initial overflow check */
2252 	if (!ufcanalloc(callerp, desc_num))
2253 		return (EMFILE);
2254 
2255 	/*
2256 	 * Allocate space for this stuff in the callers address space
2257 	 */
2258 	rlen = roundup(data_size + ds, PAGESIZE);
2259 	as_rangelock(as);
2260 	map_addr_proc(&addr, rlen, 0, 1, as->a_userlimit, ttoproc(caller), 0);
2261 	if (addr == NULL ||
2262 	    as_map(as, addr, rlen, segvn_create, zfod_argsp) != 0) {
2263 		/* No virtual memory available, or anon mapping failed */
2264 		as_rangeunlock(as);
2265 		if (!ct->d_kernel && desc_num > 0) {
2266 			int error = door_release_fds(desc_ptr, desc_num);
2267 			if (error)
2268 				return (error);
2269 		}
2270 		return (EOVERFLOW);
2271 	}
2272 	as_rangeunlock(as);
2273 
2274 	if (ct->d_kernel)
2275 		goto out;
2276 
2277 	if (data_size != 0) {
2278 		caddr_t	src = data_ptr;
2279 		caddr_t saddr = addr;
2280 
2281 		/* Copy any data */
2282 		len = data_size;
2283 		while (len != 0) {
2284 			int	amount;
2285 			int	error;
2286 
2287 			amount = len > PAGESIZE ? PAGESIZE : len;
2288 			if ((error = door_copy(as, src, saddr, amount)) != 0) {
2289 				(void) as_unmap(as, addr, rlen);
2290 				return (error);
2291 			}
2292 			saddr += amount;
2293 			src += amount;
2294 			len -= amount;
2295 		}
2296 	}
2297 	/* Copy any fd's */
2298 	if (desc_num != 0) {
2299 		door_desc_t	*didpp, *start;
2300 		struct file	**fpp;
2301 		int		fpp_size;
2302 
2303 		start = didpp = kmem_alloc(ds, KM_SLEEP);
2304 		if (copyin(desc_ptr, didpp, ds)) {
2305 			kmem_free(start, ds);
2306 			(void) as_unmap(as, addr, rlen);
2307 			return (EFAULT);
2308 		}
2309 
2310 		fpp_size = desc_num * sizeof (struct file *);
2311 		if (fpp_size > ct->d_fpp_size) {
2312 			/* make more space */
2313 			if (ct->d_fpp_size)
2314 				kmem_free(ct->d_fpp, ct->d_fpp_size);
2315 			ct->d_fpp_size = fpp_size;
2316 			ct->d_fpp = kmem_alloc(ct->d_fpp_size, KM_SLEEP);
2317 		}
2318 		fpp = ct->d_fpp;
2319 
2320 		for (i = 0; i < desc_num; i++) {
2321 			struct file *fp;
2322 			int fd = didpp->d_data.d_desc.d_descriptor;
2323 
2324 			if (!(didpp->d_attributes & DOOR_DESCRIPTOR) ||
2325 			    (fp = getf(fd)) == NULL) {
2326 				/* close translated references */
2327 				door_fp_close(ct->d_fpp, fpp - ct->d_fpp);
2328 				/* close untranslated references */
2329 				door_fd_rele(didpp, desc_num - i, 0);
2330 				kmem_free(start, ds);
2331 				(void) as_unmap(as, addr, rlen);
2332 				return (EINVAL);
2333 			}
2334 			mutex_enter(&fp->f_tlock);
2335 			fp->f_count++;
2336 			mutex_exit(&fp->f_tlock);
2337 
2338 			*fpp = fp;
2339 			releasef(fd);
2340 
2341 			if (didpp->d_attributes & DOOR_RELEASE) {
2342 				/* release passed reference */
2343 				(void) closeandsetf(fd, NULL);
2344 			}
2345 
2346 			fpp++; didpp++;
2347 		}
2348 		kmem_free(start, ds);
2349 	}
2350 
2351 out:
2352 	ct->d_overflow = 1;
2353 	ct->d_args.rbuf = addr;
2354 	ct->d_args.rsize = rlen;
2355 	return (0);
2356 }
2357 
2358 /*
2359  * Transfer arguments from the client to the server.
2360  */
2361 static int
2362 door_args(kthread_t *server, int is_private)
2363 {
2364 	door_server_t *st = DOOR_SERVER(server->t_door);
2365 	door_client_t *ct = DOOR_CLIENT(curthread->t_door);
2366 	uint_t	ndid;
2367 	size_t	dsize;
2368 	int	error;
2369 
2370 	ASSERT(DOOR_T_HELD(st));
2371 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2372 
2373 	ndid = ct->d_args.desc_num;
2374 	if (ndid > door_max_desc)
2375 		return (E2BIG);
2376 
2377 	/*
2378 	 * Get the stack layout, and fail now if it won't fit.
2379 	 */
2380 	error = door_layout(server, ct->d_args.data_size, ndid, is_private);
2381 	if (error != 0)
2382 		return (error);
2383 
2384 	dsize = ndid * sizeof (door_desc_t);
2385 	if (ct->d_args.data_size != 0) {
2386 		if (ct->d_args.data_size <= door_max_arg) {
2387 			/*
2388 			 * Use a 2 copy method for small amounts of data
2389 			 *
2390 			 * Allocate a little more than we need for the
2391 			 * args, in the hope that the results will fit
2392 			 * without having to reallocate a buffer
2393 			 */
2394 			ASSERT(ct->d_buf == NULL);
2395 			ct->d_bufsize = roundup(ct->d_args.data_size,
2396 			    DOOR_ROUND);
2397 			ct->d_buf = kmem_alloc(ct->d_bufsize, KM_SLEEP);
2398 			if (copyin(ct->d_args.data_ptr,
2399 			    ct->d_buf, ct->d_args.data_size) != 0) {
2400 				kmem_free(ct->d_buf, ct->d_bufsize);
2401 				ct->d_buf = NULL;
2402 				ct->d_bufsize = 0;
2403 				return (EFAULT);
2404 			}
2405 		} else {
2406 			struct as	*as;
2407 			caddr_t		src;
2408 			caddr_t		dest;
2409 			size_t		len = ct->d_args.data_size;
2410 			uintptr_t	base;
2411 
2412 			/*
2413 			 * Use a 1 copy method
2414 			 */
2415 			as = ttoproc(server)->p_as;
2416 			src = ct->d_args.data_ptr;
2417 
2418 			dest = st->d_layout.dl_datap;
2419 			base = (uintptr_t)dest;
2420 
2421 			/*
2422 			 * Copy data directly into server.  We proceed
2423 			 * downward from the top of the stack, to mimic
2424 			 * normal stack usage. This allows the guard page
2425 			 * to stop us before we corrupt anything.
2426 			 */
2427 			while (len != 0) {
2428 				uintptr_t start;
2429 				uintptr_t end;
2430 				uintptr_t offset;
2431 				size_t	amount;
2432 
2433 				/*
2434 				 * Locate the next part to copy.
2435 				 */
2436 				end = base + len;
2437 				start = P2ALIGN(end - 1, PAGESIZE);
2438 
2439 				/*
2440 				 * if we are on the final (first) page, fix
2441 				 * up the start position.
2442 				 */
2443 				if (P2ALIGN(base, PAGESIZE) == start)
2444 					start = base;
2445 
2446 				offset = start - base;	/* the copy offset */
2447 				amount = end - start;	/* # bytes to copy */
2448 
2449 				ASSERT(amount > 0 && amount <= len &&
2450 				    amount <= PAGESIZE);
2451 
2452 				error = door_copy(as, src + offset,
2453 				    dest + offset, amount);
2454 				if (error != 0)
2455 					return (error);
2456 				len -= amount;
2457 			}
2458 		}
2459 	}
2460 	/*
2461 	 * Copyin the door args and translate them into files
2462 	 */
2463 	if (ndid != 0) {
2464 		door_desc_t	*didpp;
2465 		door_desc_t	*start;
2466 		struct file	**fpp;
2467 
2468 		start = didpp = kmem_alloc(dsize, KM_SLEEP);
2469 
2470 		if (copyin(ct->d_args.desc_ptr, didpp, dsize)) {
2471 			kmem_free(start, dsize);
2472 			return (EFAULT);
2473 		}
2474 		ct->d_fpp_size = ndid * sizeof (struct file *);
2475 		ct->d_fpp = kmem_alloc(ct->d_fpp_size, KM_SLEEP);
2476 		fpp = ct->d_fpp;
2477 		while (ndid--) {
2478 			struct file *fp;
2479 			int fd = didpp->d_data.d_desc.d_descriptor;
2480 
2481 			/* We only understand file descriptors as passed objs */
2482 			if (!(didpp->d_attributes & DOOR_DESCRIPTOR) ||
2483 			    (fp = getf(fd)) == NULL) {
2484 				/* close translated references */
2485 				door_fp_close(ct->d_fpp, fpp - ct->d_fpp);
2486 				/* close untranslated references */
2487 				door_fd_rele(didpp, ndid + 1, 0);
2488 				kmem_free(start, dsize);
2489 				kmem_free(ct->d_fpp, ct->d_fpp_size);
2490 				ct->d_fpp = NULL;
2491 				ct->d_fpp_size = 0;
2492 				return (EINVAL);
2493 			}
2494 			/* Hold the fp */
2495 			mutex_enter(&fp->f_tlock);
2496 			fp->f_count++;
2497 			mutex_exit(&fp->f_tlock);
2498 
2499 			*fpp = fp;
2500 			releasef(fd);
2501 
2502 			if (didpp->d_attributes & DOOR_RELEASE) {
2503 				/* release passed reference */
2504 				(void) closeandsetf(fd, NULL);
2505 			}
2506 
2507 			fpp++; didpp++;
2508 		}
2509 		kmem_free(start, dsize);
2510 	}
2511 	return (0);
2512 }
2513 
2514 /*
2515  * Transfer arguments from a user client to a kernel server.  This copies in
2516  * descriptors and translates them into door handles.  It doesn't touch the
2517  * other data, letting the kernel server deal with that (to avoid needing
2518  * to copy the data twice).
2519  */
2520 static int
2521 door_translate_in(void)
2522 {
2523 	door_client_t *ct = DOOR_CLIENT(curthread->t_door);
2524 	uint_t	ndid;
2525 
2526 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2527 	ndid = ct->d_args.desc_num;
2528 	if (ndid > door_max_desc)
2529 		return (E2BIG);
2530 	/*
2531 	 * Copyin the door args and translate them into door handles.
2532 	 */
2533 	if (ndid != 0) {
2534 		door_desc_t	*didpp;
2535 		door_desc_t	*start;
2536 		size_t		dsize = ndid * sizeof (door_desc_t);
2537 		struct file	*fp;
2538 
2539 		start = didpp = kmem_alloc(dsize, KM_SLEEP);
2540 
2541 		if (copyin(ct->d_args.desc_ptr, didpp, dsize)) {
2542 			kmem_free(start, dsize);
2543 			return (EFAULT);
2544 		}
2545 		while (ndid--) {
2546 			vnode_t	*vp;
2547 			int fd = didpp->d_data.d_desc.d_descriptor;
2548 
2549 			/*
2550 			 * We only understand file descriptors as passed objs
2551 			 */
2552 			if ((didpp->d_attributes & DOOR_DESCRIPTOR) &&
2553 			    (fp = getf(fd)) != NULL) {
2554 				didpp->d_data.d_handle = FTODH(fp);
2555 				/* Hold the door */
2556 				door_ki_hold(didpp->d_data.d_handle);
2557 
2558 				releasef(fd);
2559 
2560 				if (didpp->d_attributes & DOOR_RELEASE) {
2561 					/* release passed reference */
2562 					(void) closeandsetf(fd, NULL);
2563 				}
2564 
2565 				if (VOP_REALVP(fp->f_vnode, &vp))
2566 					vp = fp->f_vnode;
2567 
2568 				/* Set attributes */
2569 				didpp->d_attributes = DOOR_HANDLE |
2570 				    (VTOD(vp)->door_flags & DOOR_ATTR_MASK);
2571 			} else {
2572 				/* close translated references */
2573 				door_fd_close(start, didpp - start);
2574 				/* close untranslated references */
2575 				door_fd_rele(didpp, ndid + 1, 0);
2576 				kmem_free(start, dsize);
2577 				return (EINVAL);
2578 			}
2579 			didpp++;
2580 		}
2581 		ct->d_args.desc_ptr = start;
2582 	}
2583 	return (0);
2584 }
2585 
2586 /*
2587  * Translate door arguments from kernel to user.  This copies the passed
2588  * door handles.  It doesn't touch other data.  It is used by door_upcall,
2589  * and for data returned by a door_call to a kernel server.
2590  */
2591 static int
2592 door_translate_out(void)
2593 {
2594 	door_client_t *ct = DOOR_CLIENT(curthread->t_door);
2595 	uint_t	ndid;
2596 
2597 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2598 	ndid = ct->d_args.desc_num;
2599 	if (ndid > door_max_desc) {
2600 		door_fd_rele(ct->d_args.desc_ptr, ndid, 1);
2601 		return (E2BIG);
2602 	}
2603 	/*
2604 	 * Translate the door args into files
2605 	 */
2606 	if (ndid != 0) {
2607 		door_desc_t	*didpp = ct->d_args.desc_ptr;
2608 		struct file	**fpp;
2609 
2610 		ct->d_fpp_size = ndid * sizeof (struct file *);
2611 		fpp = ct->d_fpp = kmem_alloc(ct->d_fpp_size, KM_SLEEP);
2612 		while (ndid--) {
2613 			struct file *fp = NULL;
2614 			int fd = -1;
2615 
2616 			/*
2617 			 * We understand file descriptors and door
2618 			 * handles as passed objs.
2619 			 */
2620 			if (didpp->d_attributes & DOOR_DESCRIPTOR) {
2621 				fd = didpp->d_data.d_desc.d_descriptor;
2622 				fp = getf(fd);
2623 			} else if (didpp->d_attributes & DOOR_HANDLE)
2624 				fp = DHTOF(didpp->d_data.d_handle);
2625 			if (fp != NULL) {
2626 				/* Hold the fp */
2627 				mutex_enter(&fp->f_tlock);
2628 				fp->f_count++;
2629 				mutex_exit(&fp->f_tlock);
2630 
2631 				*fpp = fp;
2632 				if (didpp->d_attributes & DOOR_DESCRIPTOR)
2633 					releasef(fd);
2634 				if (didpp->d_attributes & DOOR_RELEASE) {
2635 					/* release passed reference */
2636 					if (fd >= 0)
2637 						(void) closeandsetf(fd, NULL);
2638 					else
2639 						(void) closef(fp);
2640 				}
2641 			} else {
2642 				/* close translated references */
2643 				door_fp_close(ct->d_fpp, fpp - ct->d_fpp);
2644 				/* close untranslated references */
2645 				door_fd_rele(didpp, ndid + 1, 1);
2646 				kmem_free(ct->d_fpp, ct->d_fpp_size);
2647 				ct->d_fpp = NULL;
2648 				ct->d_fpp_size = 0;
2649 				return (EINVAL);
2650 			}
2651 			fpp++; didpp++;
2652 		}
2653 	}
2654 	return (0);
2655 }
2656 
2657 /*
2658  * Move the results from the server to the client
2659  */
2660 static int
2661 door_results(kthread_t *caller, caddr_t data_ptr, size_t data_size,
2662 		door_desc_t *desc_ptr, uint_t desc_num)
2663 {
2664 	door_client_t	*ct = DOOR_CLIENT(caller->t_door);
2665 	size_t		dsize;
2666 	size_t		rlen;
2667 	size_t		result_size;
2668 
2669 	ASSERT(DOOR_T_HELD(ct));
2670 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2671 
2672 	if (ct->d_noresults)
2673 		return (E2BIG);		/* No results expected */
2674 
2675 	if (desc_num > door_max_desc)
2676 		return (E2BIG);		/* Too many descriptors */
2677 
2678 	dsize = desc_num * sizeof (door_desc_t);
2679 	/*
2680 	 * Check if the results are bigger than the clients buffer
2681 	 */
2682 	if (dsize)
2683 		rlen = roundup(data_size, sizeof (door_desc_t));
2684 	else
2685 		rlen = data_size;
2686 	if ((result_size = rlen + dsize) == 0)
2687 		return (0);
2688 
2689 	if (ct->d_upcall) {
2690 		/*
2691 		 * Handle upcalls
2692 		 */
2693 		if (ct->d_args.rbuf == NULL || ct->d_args.rsize < result_size) {
2694 			/*
2695 			 * If there's no return buffer or the buffer is too
2696 			 * small, allocate a new one.  The old buffer (if it
2697 			 * exists) will be freed by the upcall client.
2698 			 */
2699 			if (result_size > door_max_upcall_reply)
2700 				return (E2BIG);
2701 			ct->d_args.rsize = result_size;
2702 			ct->d_args.rbuf = kmem_alloc(result_size, KM_SLEEP);
2703 		}
2704 		ct->d_args.data_ptr = ct->d_args.rbuf;
2705 		if (data_size != 0 &&
2706 		    copyin(data_ptr, ct->d_args.data_ptr, data_size) != 0)
2707 			return (EFAULT);
2708 	} else if (result_size > ct->d_args.rsize) {
2709 		return (door_overflow(caller, data_ptr, data_size,
2710 		    desc_ptr, desc_num));
2711 	} else if (data_size != 0) {
2712 		if (data_size <= door_max_arg) {
2713 			/*
2714 			 * Use a 2 copy method for small amounts of data
2715 			 */
2716 			if (ct->d_buf == NULL) {
2717 				ct->d_bufsize = data_size;
2718 				ct->d_buf = kmem_alloc(ct->d_bufsize, KM_SLEEP);
2719 			} else if (ct->d_bufsize < data_size) {
2720 				kmem_free(ct->d_buf, ct->d_bufsize);
2721 				ct->d_bufsize = data_size;
2722 				ct->d_buf = kmem_alloc(ct->d_bufsize, KM_SLEEP);
2723 			}
2724 			if (copyin(data_ptr, ct->d_buf, data_size) != 0)
2725 				return (EFAULT);
2726 		} else {
2727 			struct as *as = ttoproc(caller)->p_as;
2728 			caddr_t	dest = ct->d_args.rbuf;
2729 			caddr_t	src = data_ptr;
2730 			size_t	len = data_size;
2731 
2732 			/* Copy data directly into client */
2733 			while (len != 0) {
2734 				uint_t	amount;
2735 				uint_t	max;
2736 				uint_t	off;
2737 				int	error;
2738 
2739 				off = (uintptr_t)dest & PAGEOFFSET;
2740 				if (off)
2741 					max = PAGESIZE - off;
2742 				else
2743 					max = PAGESIZE;
2744 				amount = len > max ? max : len;
2745 				error = door_copy(as, src, dest, amount);
2746 				if (error != 0)
2747 					return (error);
2748 				dest += amount;
2749 				src += amount;
2750 				len -= amount;
2751 			}
2752 		}
2753 	}
2754 
2755 	/*
2756 	 * Copyin the returned door ids and translate them into door_node_t
2757 	 */
2758 	if (desc_num != 0) {
2759 		door_desc_t *start;
2760 		door_desc_t *didpp;
2761 		struct file **fpp;
2762 		size_t	fpp_size;
2763 		uint_t	i;
2764 
2765 		/* First, check if we would overflow client */
2766 		if (!ufcanalloc(ttoproc(caller), desc_num))
2767 			return (EMFILE);
2768 
2769 		start = didpp = kmem_alloc(dsize, KM_SLEEP);
2770 		if (copyin(desc_ptr, didpp, dsize)) {
2771 			kmem_free(start, dsize);
2772 			return (EFAULT);
2773 		}
2774 		fpp_size = desc_num * sizeof (struct file *);
2775 		if (fpp_size > ct->d_fpp_size) {
2776 			/* make more space */
2777 			if (ct->d_fpp_size)
2778 				kmem_free(ct->d_fpp, ct->d_fpp_size);
2779 			ct->d_fpp_size = fpp_size;
2780 			ct->d_fpp = kmem_alloc(fpp_size, KM_SLEEP);
2781 		}
2782 		fpp = ct->d_fpp;
2783 
2784 		for (i = 0; i < desc_num; i++) {
2785 			struct file *fp;
2786 			int fd = didpp->d_data.d_desc.d_descriptor;
2787 
2788 			/* Only understand file descriptor results */
2789 			if (!(didpp->d_attributes & DOOR_DESCRIPTOR) ||
2790 			    (fp = getf(fd)) == NULL) {
2791 				/* close translated references */
2792 				door_fp_close(ct->d_fpp, fpp - ct->d_fpp);
2793 				/* close untranslated references */
2794 				door_fd_rele(didpp, desc_num - i, 0);
2795 				kmem_free(start, dsize);
2796 				return (EINVAL);
2797 			}
2798 
2799 			mutex_enter(&fp->f_tlock);
2800 			fp->f_count++;
2801 			mutex_exit(&fp->f_tlock);
2802 
2803 			*fpp = fp;
2804 			releasef(fd);
2805 
2806 			if (didpp->d_attributes & DOOR_RELEASE) {
2807 				/* release passed reference */
2808 				(void) closeandsetf(fd, NULL);
2809 			}
2810 
2811 			fpp++; didpp++;
2812 		}
2813 		kmem_free(start, dsize);
2814 	}
2815 	return (0);
2816 }
2817 
2818 /*
2819  * Close all the descriptors.
2820  */
2821 static void
2822 door_fd_close(door_desc_t *d, uint_t n)
2823 {
2824 	uint_t	i;
2825 
2826 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2827 	for (i = 0; i < n; i++) {
2828 		if (d->d_attributes & DOOR_DESCRIPTOR) {
2829 			(void) closeandsetf(
2830 			    d->d_data.d_desc.d_descriptor, NULL);
2831 		} else if (d->d_attributes & DOOR_HANDLE) {
2832 			door_ki_rele(d->d_data.d_handle);
2833 		}
2834 		d++;
2835 	}
2836 }
2837 
2838 /*
2839  * Close descriptors that have the DOOR_RELEASE attribute set.
2840  */
2841 void
2842 door_fd_rele(door_desc_t *d, uint_t n, int from_kernel)
2843 {
2844 	uint_t	i;
2845 
2846 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2847 	for (i = 0; i < n; i++) {
2848 		if (d->d_attributes & DOOR_RELEASE) {
2849 			if (d->d_attributes & DOOR_DESCRIPTOR) {
2850 				(void) closeandsetf(
2851 				    d->d_data.d_desc.d_descriptor, NULL);
2852 			} else if (from_kernel &&
2853 			    (d->d_attributes & DOOR_HANDLE)) {
2854 				door_ki_rele(d->d_data.d_handle);
2855 			}
2856 		}
2857 		d++;
2858 	}
2859 }
2860 
2861 /*
2862  * Copy descriptors into the kernel so we can release any marked
2863  * DOOR_RELEASE.
2864  */
2865 int
2866 door_release_fds(door_desc_t *desc_ptr, uint_t ndesc)
2867 {
2868 	size_t dsize;
2869 	door_desc_t *didpp;
2870 	uint_t desc_num;
2871 
2872 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2873 	ASSERT(ndesc != 0);
2874 
2875 	desc_num = MIN(ndesc, door_max_desc);
2876 
2877 	dsize = desc_num * sizeof (door_desc_t);
2878 	didpp = kmem_alloc(dsize, KM_SLEEP);
2879 
2880 	while (ndesc > 0) {
2881 		uint_t count = MIN(ndesc, desc_num);
2882 
2883 		if (copyin(desc_ptr, didpp, count * sizeof (door_desc_t))) {
2884 			kmem_free(didpp, dsize);
2885 			return (EFAULT);
2886 		}
2887 		door_fd_rele(didpp, count, 0);
2888 
2889 		ndesc -= count;
2890 		desc_ptr += count;
2891 	}
2892 	kmem_free(didpp, dsize);
2893 	return (0);
2894 }
2895 
2896 /*
2897  * Decrement ref count on all the files passed
2898  */
2899 static void
2900 door_fp_close(struct file **fp, uint_t n)
2901 {
2902 	uint_t	i;
2903 
2904 	ASSERT(MUTEX_NOT_HELD(&door_knob));
2905 
2906 	for (i = 0; i < n; i++)
2907 		(void) closef(fp[i]);
2908 }
2909 
2910 /*
2911  * Copy data from 'src' in current address space to 'dest' in 'as' for 'len'
2912  * bytes.
2913  *
2914  * Performs this using 1 mapin and 1 copy operation.
2915  *
2916  * We really should do more than 1 page at a time to improve
2917  * performance, but for now this is treated as an anomalous condition.
2918  */
2919 static int
2920 door_copy(struct as *as, caddr_t src, caddr_t dest, uint_t len)
2921 {
2922 	caddr_t	kaddr;
2923 	caddr_t	rdest;
2924 	uint_t	off;
2925 	page_t	**pplist;
2926 	page_t	*pp = NULL;
2927 	int	error = 0;
2928 
2929 	ASSERT(len <= PAGESIZE);
2930 	off = (uintptr_t)dest & PAGEOFFSET;	/* offset within the page */
2931 	rdest = (caddr_t)((uintptr_t)dest &
2932 	    (uintptr_t)PAGEMASK);	/* Page boundary */
2933 	ASSERT(off + len <= PAGESIZE);
2934 
2935 	/*
2936 	 * Lock down destination page.
2937 	 */
2938 	if (as_pagelock(as, &pplist, rdest, PAGESIZE, S_WRITE))
2939 		return (E2BIG);
2940 	/*
2941 	 * Check if we have a shadow page list from as_pagelock. If not,
2942 	 * we took the slow path and have to find our page struct the hard
2943 	 * way.
2944 	 */
2945 	if (pplist == NULL) {
2946 		pfn_t	pfnum;
2947 
2948 		/* MMU mapping is already locked down */
2949 		AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
2950 		pfnum = hat_getpfnum(as->a_hat, rdest);
2951 		AS_LOCK_EXIT(as, &as->a_lock);
2952 
2953 		/*
2954 		 * TODO: The pfn step should not be necessary - need
2955 		 * a hat_getpp() function.
2956 		 */
2957 		if (pf_is_memory(pfnum)) {
2958 			pp = page_numtopp_nolock(pfnum);
2959 			ASSERT(pp == NULL || PAGE_LOCKED(pp));
2960 		} else
2961 			pp = NULL;
2962 		if (pp == NULL) {
2963 			as_pageunlock(as, pplist, rdest, PAGESIZE, S_WRITE);
2964 			return (E2BIG);
2965 		}
2966 	} else {
2967 		pp = *pplist;
2968 	}
2969 	/*
2970 	 * Map destination page into kernel address
2971 	 */
2972 	kaddr = (caddr_t)ppmapin(pp, PROT_READ | PROT_WRITE, (caddr_t)-1);
2973 
2974 	/*
2975 	 * Copy from src to dest
2976 	 */
2977 	if (copyin(src, kaddr + off, len) != 0)
2978 		error = EFAULT;
2979 	/*
2980 	 * Unmap destination page from kernel
2981 	 */
2982 	ppmapout(kaddr);
2983 	/*
2984 	 * Unlock destination page
2985 	 */
2986 	as_pageunlock(as, pplist, rdest, PAGESIZE, S_WRITE);
2987 	return (error);
2988 }
2989 
2990 /*
2991  * General kernel upcall using doors
2992  *	Returns 0 on success, errno for failures.
2993  *	Caller must have a hold on the door based vnode, and on any
2994  *	references passed in desc_ptr.  The references are released
2995  *	in the event of an error, and passed without duplication
2996  *	otherwise.  Note that param->rbuf must be 64-bit aligned in
2997  *	a 64-bit kernel, since it may be used to store door descriptors
2998  *	if they are returned by the server.
2999  */
3000 int
3001 door_upcall(vnode_t *vp, door_arg_t *param)
3002 {
3003 	/* Locals */
3004 	door_node_t	*dp;
3005 	kthread_t	*server_thread;
3006 	int		error = 0;
3007 	klwp_t		*lwp;
3008 	door_client_t	*ct;		/* curthread door_data */
3009 	door_server_t	*st;		/* server thread door_data */
3010 	int		gotresults = 0;
3011 
3012 	if (vp->v_type != VDOOR) {
3013 		if (param->desc_num)
3014 			door_fd_rele(param->desc_ptr, param->desc_num, 1);
3015 		return (EINVAL);
3016 	}
3017 
3018 	lwp = ttolwp(curthread);
3019 	ct = door_my_client(1);
3020 	dp = VTOD(vp);	/* Convert to a door_node_t */
3021 
3022 	mutex_enter(&door_knob);
3023 	if (DOOR_INVALID(dp)) {
3024 		mutex_exit(&door_knob);
3025 		if (param->desc_num)
3026 			door_fd_rele(param->desc_ptr, param->desc_num, 1);
3027 		error = EBADF;
3028 		goto out;
3029 	}
3030 
3031 	if (dp->door_target == &p0) {
3032 		/* Can't do an upcall to a kernel server */
3033 		mutex_exit(&door_knob);
3034 		if (param->desc_num)
3035 			door_fd_rele(param->desc_ptr, param->desc_num, 1);
3036 		error = EINVAL;
3037 		goto out;
3038 	}
3039 
3040 	error = door_check_limits(dp, param, 1);
3041 	if (error != 0) {
3042 		mutex_exit(&door_knob);
3043 		if (param->desc_num)
3044 			door_fd_rele(param->desc_ptr, param->desc_num, 1);
3045 		goto out;
3046 	}
3047 
3048 	/*
3049 	 * Get a server thread from the target domain
3050 	 */
3051 	if ((server_thread = door_get_server(dp)) == NULL) {
3052 		if (DOOR_INVALID(dp))
3053 			error = EBADF;
3054 		else
3055 			error = EAGAIN;
3056 		mutex_exit(&door_knob);
3057 		if (param->desc_num)
3058 			door_fd_rele(param->desc_ptr, param->desc_num, 1);
3059 		goto out;
3060 	}
3061 
3062 	st = DOOR_SERVER(server_thread->t_door);
3063 	ct->d_buf = param->data_ptr;
3064 	ct->d_bufsize = param->data_size;
3065 	ct->d_args = *param;	/* structure assignment */
3066 
3067 	if (ct->d_args.desc_num) {
3068 		/*
3069 		 * Move data from client to server
3070 		 */
3071 		DOOR_T_HOLD(st);
3072 		mutex_exit(&door_knob);
3073 		error = door_translate_out();
3074 		mutex_enter(&door_knob);
3075 		DOOR_T_RELEASE(st);
3076 		if (error) {
3077 			/*
3078 			 * We're not going to resume this thread after all
3079 			 */
3080 			door_release_server(dp, server_thread);
3081 			shuttle_sleep(server_thread);
3082 			mutex_exit(&door_knob);
3083 			goto out;
3084 		}
3085 	}
3086 
3087 	ct->d_upcall = 1;
3088 	if (param->rsize == 0)
3089 		ct->d_noresults = 1;
3090 	else
3091 		ct->d_noresults = 0;
3092 
3093 	dp->door_active++;
3094 
3095 	ct->d_error = DOOR_WAIT;
3096 	st->d_caller = curthread;
3097 	st->d_active = dp;
3098 
3099 	shuttle_resume(server_thread, &door_knob);
3100 
3101 	mutex_enter(&door_knob);
3102 shuttle_return:
3103 	if ((error = ct->d_error) < 0) {	/* DOOR_WAIT or DOOR_EXIT */
3104 		/*
3105 		 * Premature wakeup. Find out why (stop, forkall, sig, exit ...)
3106 		 */
3107 		mutex_exit(&door_knob);		/* May block in ISSIG */
3108 		if (lwp && (ISSIG(curthread, FORREAL) ||
3109 		    lwp->lwp_sysabort || MUSTRETURN(curproc, curthread))) {
3110 			/* Signal, forkall, ... */
3111 			lwp->lwp_sysabort = 0;
3112 			mutex_enter(&door_knob);
3113 			error = EINTR;
3114 			/*
3115 			 * If the server has finished processing our call,
3116 			 * or exited (calling door_slam()), then d_error
3117 			 * will have changed.  If the server hasn't finished
3118 			 * yet, d_error will still be DOOR_WAIT, and we
3119 			 * let it know we are not interested in any
3120 			 * results by sending a SIGCANCEL, unless the door
3121 			 * is marked with DOOR_NO_CANCEL.
3122 			 */
3123 			if (ct->d_error == DOOR_WAIT &&
3124 			    st->d_caller == curthread) {
3125 				proc_t	*p = ttoproc(server_thread);
3126 
3127 				st->d_active = NULL;
3128 				st->d_caller = NULL;
3129 				if (!(dp->door_flags & DOOR_NO_CANCEL)) {
3130 					DOOR_T_HOLD(st);
3131 					mutex_exit(&door_knob);
3132 
3133 					mutex_enter(&p->p_lock);
3134 					sigtoproc(p, server_thread, SIGCANCEL);
3135 					mutex_exit(&p->p_lock);
3136 
3137 					mutex_enter(&door_knob);
3138 					DOOR_T_RELEASE(st);
3139 				}
3140 			}
3141 		} else {
3142 			/*
3143 			 * Return from stop(), server exit...
3144 			 *
3145 			 * Note that the server could have done a
3146 			 * door_return while the client was in stop state
3147 			 * (ISSIG), in which case the error condition
3148 			 * is updated by the server.
3149 			 */
3150 			mutex_enter(&door_knob);
3151 			if (ct->d_error == DOOR_WAIT) {
3152 				/* Still waiting for a reply */
3153 				shuttle_swtch(&door_knob);
3154 				mutex_enter(&door_knob);
3155 				if (lwp)
3156 					lwp->lwp_asleep = 0;
3157 				goto	shuttle_return;
3158 			} else if (ct->d_error == DOOR_EXIT) {
3159 				/* Server exit */
3160 				error = EINTR;
3161 			} else {
3162 				/* Server did a door_return during ISSIG */
3163 				error = ct->d_error;
3164 			}
3165 		}
3166 		/*
3167 		 * Can't exit if the server is currently copying
3168 		 * results for me
3169 		 */
3170 		while (DOOR_T_HELD(ct))
3171 			cv_wait(&ct->d_cv, &door_knob);
3172 
3173 		/*
3174 		 * Find out if results were successfully copied.
3175 		 */
3176 		if (ct->d_error == 0)
3177 			gotresults = 1;
3178 	}
3179 	if (lwp) {
3180 		lwp->lwp_asleep = 0;		/* /proc */
3181 		lwp->lwp_sysabort = 0;		/* /proc */
3182 	}
3183 	if (--dp->door_active == 0 && (dp->door_flags & DOOR_DELAY))
3184 		door_deliver_unref(dp);
3185 	mutex_exit(&door_knob);
3186 
3187 	/*
3188 	 * Translate returned doors (if any)
3189 	 */
3190 
3191 	if (ct->d_noresults)
3192 		goto out;
3193 
3194 	if (error) {
3195 		/*
3196 		 * If server returned results successfully, then we've
3197 		 * been interrupted and may need to clean up.
3198 		 */
3199 		if (gotresults) {
3200 			ASSERT(error == EINTR);
3201 			door_fp_close(ct->d_fpp, ct->d_args.desc_num);
3202 		}
3203 		goto out;
3204 	}
3205 
3206 	if (ct->d_args.desc_num) {
3207 		struct file	**fpp;
3208 		door_desc_t	*didpp;
3209 		vnode_t		*vp;
3210 		uint_t		n = ct->d_args.desc_num;
3211 
3212 		didpp = ct->d_args.desc_ptr = (door_desc_t *)(ct->d_args.rbuf +
3213 		    roundup(ct->d_args.data_size, sizeof (door_desc_t)));
3214 		fpp = ct->d_fpp;
3215 
3216 		while (n--) {
3217 			struct file *fp;
3218 
3219 			fp = *fpp;
3220 			if (VOP_REALVP(fp->f_vnode, &vp))
3221 				vp = fp->f_vnode;
3222 
3223 			didpp->d_attributes = DOOR_HANDLE |
3224 			    (VTOD(vp)->door_flags & DOOR_ATTR_MASK);
3225 			didpp->d_data.d_handle = FTODH(fp);
3226 
3227 			fpp++; didpp++;
3228 		}
3229 	}
3230 
3231 	/* on return data is in rbuf */
3232 	*param = ct->d_args;		/* structure assignment */
3233 
3234 out:
3235 	if (ct->d_fpp) {
3236 		kmem_free(ct->d_fpp, ct->d_fpp_size);
3237 		ct->d_fpp = NULL;
3238 		ct->d_fpp_size = 0;
3239 	}
3240 
3241 	ct->d_upcall = 0;
3242 	ct->d_noresults = 0;
3243 	ct->d_buf = NULL;
3244 	ct->d_bufsize = 0;
3245 	return (error);
3246 }
3247 
3248 /*
3249  * Add a door to the per-process list of active doors for which the
3250  * process is a server.
3251  */
3252 static void
3253 door_list_insert(door_node_t *dp)
3254 {
3255 	proc_t *p = dp->door_target;
3256 
3257 	ASSERT(MUTEX_HELD(&door_knob));
3258 	dp->door_list = p->p_door_list;
3259 	p->p_door_list = dp;
3260 }
3261 
3262 /*
3263  * Remove a door from the per-process list of active doors.
3264  */
3265 void
3266 door_list_delete(door_node_t *dp)
3267 {
3268 	door_node_t **pp;
3269 
3270 	ASSERT(MUTEX_HELD(&door_knob));
3271 	/*
3272 	 * Find the door in the list.  If the door belongs to another process,
3273 	 * it's OK to use p_door_list since that process can't exit until all
3274 	 * doors have been taken off the list (see door_exit).
3275 	 */
3276 	pp = &(dp->door_target->p_door_list);
3277 	while (*pp != dp)
3278 		pp = &((*pp)->door_list);
3279 
3280 	/* found it, take it off the list */
3281 	*pp = dp->door_list;
3282 }
3283 
3284 
3285 /*
3286  * External kernel interfaces for doors.  These functions are available
3287  * outside the doorfs module for use in creating and using doors from
3288  * within the kernel.
3289  */
3290 
3291 /*
3292  * door_ki_upcall invokes a user-level door server from the kernel.
3293  */
3294 int
3295 door_ki_upcall(door_handle_t dh, door_arg_t *param)
3296 {
3297 	file_t *fp = DHTOF(dh);
3298 	vnode_t *realvp;
3299 
3300 	if (VOP_REALVP(fp->f_vnode, &realvp))
3301 		realvp = fp->f_vnode;
3302 	return (door_upcall(realvp, param));
3303 }
3304 
3305 /*
3306  * Function call to create a "kernel" door server.  A kernel door
3307  * server provides a way for a user-level process to invoke a function
3308  * in the kernel through a door_call.  From the caller's point of
3309  * view, a kernel door server looks the same as a user-level one
3310  * (except the server pid is 0).  Unlike normal door calls, the
3311  * kernel door function is invoked via a normal function call in the
3312  * same thread and context as the caller.
3313  */
3314 int
3315 door_ki_create(void (*pc_cookie)(), void *data_cookie, uint_t attributes,
3316     door_handle_t *dhp)
3317 {
3318 	int err;
3319 	file_t *fp;
3320 
3321 	/* no DOOR_PRIVATE */
3322 	if ((attributes & ~DOOR_KI_CREATE_MASK) ||
3323 	    (attributes & (DOOR_UNREF | DOOR_UNREF_MULTI)) ==
3324 	    (DOOR_UNREF | DOOR_UNREF_MULTI))
3325 		return (EINVAL);
3326 
3327 	err = door_create_common(pc_cookie, data_cookie, attributes,
3328 	    1, NULL, &fp);
3329 	if (err == 0 && (attributes & (DOOR_UNREF | DOOR_UNREF_MULTI)) &&
3330 	    p0.p_unref_thread == 0) {
3331 		/* need to create unref thread for process 0 */
3332 		(void) thread_create(NULL, 0, door_unref_kernel, NULL, 0, &p0,
3333 		    TS_RUN, minclsyspri);
3334 	}
3335 	if (err == 0) {
3336 		*dhp = FTODH(fp);
3337 	}
3338 	return (err);
3339 }
3340 
3341 void
3342 door_ki_hold(door_handle_t dh)
3343 {
3344 	file_t *fp = DHTOF(dh);
3345 
3346 	mutex_enter(&fp->f_tlock);
3347 	fp->f_count++;
3348 	mutex_exit(&fp->f_tlock);
3349 }
3350 
3351 void
3352 door_ki_rele(door_handle_t dh)
3353 {
3354 	file_t *fp = DHTOF(dh);
3355 
3356 	(void) closef(fp);
3357 }
3358 
3359 int
3360 door_ki_open(char *pathname, door_handle_t *dhp)
3361 {
3362 	file_t *fp;
3363 	vnode_t *vp;
3364 	int err;
3365 
3366 	if ((err = lookupname(pathname, UIO_SYSSPACE, FOLLOW, NULL, &vp)) != 0)
3367 		return (err);
3368 	if (err = VOP_OPEN(&vp, FREAD, kcred)) {
3369 		VN_RELE(vp);
3370 		return (err);
3371 	}
3372 	if (vp->v_type != VDOOR) {
3373 		VN_RELE(vp);
3374 		return (EINVAL);
3375 	}
3376 	if ((err = falloc(vp, FREAD | FWRITE, &fp, NULL)) != 0) {
3377 		VN_RELE(vp);
3378 		return (err);
3379 	}
3380 	/* falloc returns with f_tlock held on success */
3381 	mutex_exit(&fp->f_tlock);
3382 	*dhp = FTODH(fp);
3383 	return (0);
3384 }
3385 
3386 int
3387 door_ki_info(door_handle_t dh, struct door_info *dip)
3388 {
3389 	file_t *fp = DHTOF(dh);
3390 	vnode_t *vp;
3391 
3392 	if (VOP_REALVP(fp->f_vnode, &vp))
3393 		vp = fp->f_vnode;
3394 	if (vp->v_type != VDOOR)
3395 		return (EINVAL);
3396 	door_info_common(VTOD(vp), dip, fp);
3397 	return (0);
3398 }
3399 
3400 door_handle_t
3401 door_ki_lookup(int did)
3402 {
3403 	file_t *fp;
3404 	door_handle_t dh;
3405 
3406 	/* is the descriptor really a door? */
3407 	if (door_lookup(did, &fp) == NULL)
3408 		return (NULL);
3409 	/* got the door, put a hold on it and release the fd */
3410 	dh = FTODH(fp);
3411 	door_ki_hold(dh);
3412 	releasef(did);
3413 	return (dh);
3414 }
3415 
3416 int
3417 door_ki_setparam(door_handle_t dh, int type, size_t val)
3418 {
3419 	file_t *fp = DHTOF(dh);
3420 	vnode_t *vp;
3421 
3422 	if (VOP_REALVP(fp->f_vnode, &vp))
3423 		vp = fp->f_vnode;
3424 	if (vp->v_type != VDOOR)
3425 		return (EINVAL);
3426 	return (door_setparam_common(VTOD(vp), 1, type, val));
3427 }
3428 
3429 int
3430 door_ki_getparam(door_handle_t dh, int type, size_t *out)
3431 {
3432 	file_t *fp = DHTOF(dh);
3433 	vnode_t *vp;
3434 
3435 	if (VOP_REALVP(fp->f_vnode, &vp))
3436 		vp = fp->f_vnode;
3437 	if (vp->v_type != VDOOR)
3438 		return (EINVAL);
3439 	return (door_getparam_common(VTOD(vp), type, out));
3440 }
3441