xref: /original-bsd/sys/kern/uipc_usrreq.c (revision 612877e1)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)uipc_usrreq.c	8.3 (Berkeley) 01/04/94
8  */
9 
10 #include <sys/param.h>
11 #include <sys/systm.h>
12 #include <sys/proc.h>
13 #include <sys/filedesc.h>
14 #include <sys/domain.h>
15 #include <sys/protosw.h>
16 #include <sys/socket.h>
17 #include <sys/socketvar.h>
18 #include <sys/unpcb.h>
19 #include <sys/un.h>
20 #include <sys/namei.h>
21 #include <sys/vnode.h>
22 #include <sys/file.h>
23 #include <sys/stat.h>
24 #include <sys/mbuf.h>
25 
26 /*
27  * Unix communications domain.
28  *
29  * TODO:
30  *	SEQPACKET, RDM
31  *	rethink name space problems
32  *	need a proper out-of-band
33  */
34 struct	sockaddr sun_noname = { sizeof(sun_noname), AF_UNIX };
35 ino_t	unp_ino;			/* prototype for fake inode numbers */
36 
37 /*ARGSUSED*/
38 uipc_usrreq(so, req, m, nam, control)
39 	struct socket *so;
40 	int req;
41 	struct mbuf *m, *nam, *control;
42 {
43 	struct unpcb *unp = sotounpcb(so);
44 	register struct socket *so2;
45 	register int error = 0;
46 	struct proc *p = curproc;	/* XXX */
47 
48 	if (req == PRU_CONTROL)
49 		return (EOPNOTSUPP);
50 	if (req != PRU_SEND && control && control->m_len) {
51 		error = EOPNOTSUPP;
52 		goto release;
53 	}
54 	if (unp == 0 && req != PRU_ATTACH) {
55 		error = EINVAL;
56 		goto release;
57 	}
58 	switch (req) {
59 
60 	case PRU_ATTACH:
61 		if (unp) {
62 			error = EISCONN;
63 			break;
64 		}
65 		error = unp_attach(so);
66 		break;
67 
68 	case PRU_DETACH:
69 		unp_detach(unp);
70 		break;
71 
72 	case PRU_BIND:
73 		error = unp_bind(unp, nam, p);
74 		break;
75 
76 	case PRU_LISTEN:
77 		if (unp->unp_vnode == 0)
78 			error = EINVAL;
79 		break;
80 
81 	case PRU_CONNECT:
82 		error = unp_connect(so, nam, p);
83 		break;
84 
85 	case PRU_CONNECT2:
86 		error = unp_connect2(so, (struct socket *)nam);
87 		break;
88 
89 	case PRU_DISCONNECT:
90 		unp_disconnect(unp);
91 		break;
92 
93 	case PRU_ACCEPT:
94 		/*
95 		 * Pass back name of connected socket,
96 		 * if it was bound and we are still connected
97 		 * (our peer may have closed already!).
98 		 */
99 		if (unp->unp_conn && unp->unp_conn->unp_addr) {
100 			nam->m_len = unp->unp_conn->unp_addr->m_len;
101 			bcopy(mtod(unp->unp_conn->unp_addr, caddr_t),
102 			    mtod(nam, caddr_t), (unsigned)nam->m_len);
103 		} else {
104 			nam->m_len = sizeof(sun_noname);
105 			*(mtod(nam, struct sockaddr *)) = sun_noname;
106 		}
107 		break;
108 
109 	case PRU_SHUTDOWN:
110 		socantsendmore(so);
111 		unp_shutdown(unp);
112 		break;
113 
114 	case PRU_RCVD:
115 		switch (so->so_type) {
116 
117 		case SOCK_DGRAM:
118 			panic("uipc 1");
119 			/*NOTREACHED*/
120 
121 		case SOCK_STREAM:
122 #define	rcv (&so->so_rcv)
123 #define snd (&so2->so_snd)
124 			if (unp->unp_conn == 0)
125 				break;
126 			so2 = unp->unp_conn->unp_socket;
127 			/*
128 			 * Adjust backpressure on sender
129 			 * and wakeup any waiting to write.
130 			 */
131 			snd->sb_mbmax += unp->unp_mbcnt - rcv->sb_mbcnt;
132 			unp->unp_mbcnt = rcv->sb_mbcnt;
133 			snd->sb_hiwat += unp->unp_cc - rcv->sb_cc;
134 			unp->unp_cc = rcv->sb_cc;
135 			sowwakeup(so2);
136 #undef snd
137 #undef rcv
138 			break;
139 
140 		default:
141 			panic("uipc 2");
142 		}
143 		break;
144 
145 	case PRU_SEND:
146 		if (control && (error = unp_internalize(control, p)))
147 			break;
148 		switch (so->so_type) {
149 
150 		case SOCK_DGRAM: {
151 			struct sockaddr *from;
152 
153 			if (nam) {
154 				if (unp->unp_conn) {
155 					error = EISCONN;
156 					break;
157 				}
158 				error = unp_connect(so, nam, p);
159 				if (error)
160 					break;
161 			} else {
162 				if (unp->unp_conn == 0) {
163 					error = ENOTCONN;
164 					break;
165 				}
166 			}
167 			so2 = unp->unp_conn->unp_socket;
168 			if (unp->unp_addr)
169 				from = mtod(unp->unp_addr, struct sockaddr *);
170 			else
171 				from = &sun_noname;
172 			if (sbappendaddr(&so2->so_rcv, from, m, control)) {
173 				sorwakeup(so2);
174 				m = 0;
175 				control = 0;
176 			} else
177 				error = ENOBUFS;
178 			if (nam)
179 				unp_disconnect(unp);
180 			break;
181 		}
182 
183 		case SOCK_STREAM:
184 #define	rcv (&so2->so_rcv)
185 #define	snd (&so->so_snd)
186 			if (so->so_state & SS_CANTSENDMORE) {
187 				error = EPIPE;
188 				break;
189 			}
190 			if (unp->unp_conn == 0)
191 				panic("uipc 3");
192 			so2 = unp->unp_conn->unp_socket;
193 			/*
194 			 * Send to paired receive port, and then reduce
195 			 * send buffer hiwater marks to maintain backpressure.
196 			 * Wake up readers.
197 			 */
198 			if (control) {
199 				if (sbappendcontrol(rcv, m, control))
200 					control = 0;
201 			} else
202 				sbappend(rcv, m);
203 			snd->sb_mbmax -=
204 			    rcv->sb_mbcnt - unp->unp_conn->unp_mbcnt;
205 			unp->unp_conn->unp_mbcnt = rcv->sb_mbcnt;
206 			snd->sb_hiwat -= rcv->sb_cc - unp->unp_conn->unp_cc;
207 			unp->unp_conn->unp_cc = rcv->sb_cc;
208 			sorwakeup(so2);
209 			m = 0;
210 #undef snd
211 #undef rcv
212 			break;
213 
214 		default:
215 			panic("uipc 4");
216 		}
217 		break;
218 
219 	case PRU_ABORT:
220 		unp_drop(unp, ECONNABORTED);
221 		break;
222 
223 	case PRU_SENSE:
224 		((struct stat *) m)->st_blksize = so->so_snd.sb_hiwat;
225 		if (so->so_type == SOCK_STREAM && unp->unp_conn != 0) {
226 			so2 = unp->unp_conn->unp_socket;
227 			((struct stat *) m)->st_blksize += so2->so_rcv.sb_cc;
228 		}
229 		((struct stat *) m)->st_dev = NODEV;
230 		if (unp->unp_ino == 0)
231 			unp->unp_ino = unp_ino++;
232 		((struct stat *) m)->st_ino = unp->unp_ino;
233 		return (0);
234 
235 	case PRU_RCVOOB:
236 		return (EOPNOTSUPP);
237 
238 	case PRU_SENDOOB:
239 		error = EOPNOTSUPP;
240 		break;
241 
242 	case PRU_SOCKADDR:
243 		if (unp->unp_addr) {
244 			nam->m_len = unp->unp_addr->m_len;
245 			bcopy(mtod(unp->unp_addr, caddr_t),
246 			    mtod(nam, caddr_t), (unsigned)nam->m_len);
247 		} else
248 			nam->m_len = 0;
249 		break;
250 
251 	case PRU_PEERADDR:
252 		if (unp->unp_conn && unp->unp_conn->unp_addr) {
253 			nam->m_len = unp->unp_conn->unp_addr->m_len;
254 			bcopy(mtod(unp->unp_conn->unp_addr, caddr_t),
255 			    mtod(nam, caddr_t), (unsigned)nam->m_len);
256 		} else
257 			nam->m_len = 0;
258 		break;
259 
260 	case PRU_SLOWTIMO:
261 		break;
262 
263 	default:
264 		panic("piusrreq");
265 	}
266 release:
267 	if (control)
268 		m_freem(control);
269 	if (m)
270 		m_freem(m);
271 	return (error);
272 }
273 
274 /*
275  * Both send and receive buffers are allocated PIPSIZ bytes of buffering
276  * for stream sockets, although the total for sender and receiver is
277  * actually only PIPSIZ.
278  * Datagram sockets really use the sendspace as the maximum datagram size,
279  * and don't really want to reserve the sendspace.  Their recvspace should
280  * be large enough for at least one max-size datagram plus address.
281  */
282 #define	PIPSIZ	4096
283 u_long	unpst_sendspace = PIPSIZ;
284 u_long	unpst_recvspace = PIPSIZ;
285 u_long	unpdg_sendspace = 2*1024;	/* really max datagram size */
286 u_long	unpdg_recvspace = 4*1024;
287 
288 int	unp_rights;			/* file descriptors in flight */
289 
290 unp_attach(so)
291 	struct socket *so;
292 {
293 	register struct mbuf *m;
294 	register struct unpcb *unp;
295 	int error;
296 
297 	if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
298 		switch (so->so_type) {
299 
300 		case SOCK_STREAM:
301 			error = soreserve(so, unpst_sendspace, unpst_recvspace);
302 			break;
303 
304 		case SOCK_DGRAM:
305 			error = soreserve(so, unpdg_sendspace, unpdg_recvspace);
306 			break;
307 
308 		default:
309 			panic("unp_attach");
310 		}
311 		if (error)
312 			return (error);
313 	}
314 	m = m_getclr(M_DONTWAIT, MT_PCB);
315 	if (m == NULL)
316 		return (ENOBUFS);
317 	unp = mtod(m, struct unpcb *);
318 	so->so_pcb = (caddr_t)unp;
319 	unp->unp_socket = so;
320 	return (0);
321 }
322 
323 unp_detach(unp)
324 	register struct unpcb *unp;
325 {
326 
327 	if (unp->unp_vnode) {
328 		unp->unp_vnode->v_socket = 0;
329 		vrele(unp->unp_vnode);
330 		unp->unp_vnode = 0;
331 	}
332 	if (unp->unp_conn)
333 		unp_disconnect(unp);
334 	while (unp->unp_refs)
335 		unp_drop(unp->unp_refs, ECONNRESET);
336 	soisdisconnected(unp->unp_socket);
337 	unp->unp_socket->so_pcb = 0;
338 	m_freem(unp->unp_addr);
339 	(void) m_free(dtom(unp));
340 	if (unp_rights) {
341 		/*
342 		 * Normally the receive buffer is flushed later,
343 		 * in sofree, but if our receive buffer holds references
344 		 * to descriptors that are now garbage, we will dispose
345 		 * of those descriptor references after the garbage collector
346 		 * gets them (resulting in a "panic: closef: count < 0").
347 		 */
348 		sorflush(unp->unp_socket);
349 		unp_gc();
350 	}
351 }
352 
353 unp_bind(unp, nam, p)
354 	struct unpcb *unp;
355 	struct mbuf *nam;
356 	struct proc *p;
357 {
358 	struct sockaddr_un *soun = mtod(nam, struct sockaddr_un *);
359 	register struct vnode *vp;
360 	struct vattr vattr;
361 	int error;
362 	struct nameidata nd;
363 
364 	NDINIT(&nd, CREATE, FOLLOW | LOCKPARENT, UIO_SYSSPACE,
365 		soun->sun_path, p);
366 	if (unp->unp_vnode != NULL)
367 		return (EINVAL);
368 	if (nam->m_len == MLEN) {
369 		if (*(mtod(nam, caddr_t) + nam->m_len - 1) != 0)
370 			return (EINVAL);
371 	} else
372 		*(mtod(nam, caddr_t) + nam->m_len) = 0;
373 /* SHOULD BE ABLE TO ADOPT EXISTING AND wakeup() ALA FIFO's */
374 	if (error = namei(&nd))
375 		return (error);
376 	vp = nd.ni_vp;
377 	if (vp != NULL) {
378 		VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
379 		if (nd.ni_dvp == vp)
380 			vrele(nd.ni_dvp);
381 		else
382 			vput(nd.ni_dvp);
383 		vrele(vp);
384 		return (EADDRINUSE);
385 	}
386 	VATTR_NULL(&vattr);
387 	vattr.va_type = VSOCK;
388 	vattr.va_mode = ACCESSPERMS;
389 	LEASE_CHECK(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE);
390 	if (error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr))
391 		return (error);
392 	vp = nd.ni_vp;
393 	vp->v_socket = unp->unp_socket;
394 	unp->unp_vnode = vp;
395 	unp->unp_addr = m_copy(nam, 0, (int)M_COPYALL);
396 	VOP_UNLOCK(vp);
397 	return (0);
398 }
399 
400 unp_connect(so, nam, p)
401 	struct socket *so;
402 	struct mbuf *nam;
403 	struct proc *p;
404 {
405 	register struct sockaddr_un *soun = mtod(nam, struct sockaddr_un *);
406 	register struct vnode *vp;
407 	register struct socket *so2, *so3;
408 	struct unpcb *unp2, *unp3;
409 	int error;
410 	struct nameidata nd;
411 
412 	NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, soun->sun_path, p);
413 	if (nam->m_data + nam->m_len == &nam->m_dat[MLEN]) {	/* XXX */
414 		if (*(mtod(nam, caddr_t) + nam->m_len - 1) != 0)
415 			return (EMSGSIZE);
416 	} else
417 		*(mtod(nam, caddr_t) + nam->m_len) = 0;
418 	if (error = namei(&nd))
419 		return (error);
420 	vp = nd.ni_vp;
421 	if (vp->v_type != VSOCK) {
422 		error = ENOTSOCK;
423 		goto bad;
424 	}
425 	if (error = VOP_ACCESS(vp, VWRITE, p->p_ucred, p))
426 		goto bad;
427 	so2 = vp->v_socket;
428 	if (so2 == 0) {
429 		error = ECONNREFUSED;
430 		goto bad;
431 	}
432 	if (so->so_type != so2->so_type) {
433 		error = EPROTOTYPE;
434 		goto bad;
435 	}
436 	if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
437 		if ((so2->so_options & SO_ACCEPTCONN) == 0 ||
438 		    (so3 = sonewconn(so2, 0)) == 0) {
439 			error = ECONNREFUSED;
440 			goto bad;
441 		}
442 		unp2 = sotounpcb(so2);
443 		unp3 = sotounpcb(so3);
444 		if (unp2->unp_addr)
445 			unp3->unp_addr =
446 				  m_copy(unp2->unp_addr, 0, (int)M_COPYALL);
447 		so2 = so3;
448 	}
449 	error = unp_connect2(so, so2);
450 bad:
451 	vput(vp);
452 	return (error);
453 }
454 
455 unp_connect2(so, so2)
456 	register struct socket *so;
457 	register struct socket *so2;
458 {
459 	register struct unpcb *unp = sotounpcb(so);
460 	register struct unpcb *unp2;
461 
462 	if (so2->so_type != so->so_type)
463 		return (EPROTOTYPE);
464 	unp2 = sotounpcb(so2);
465 	unp->unp_conn = unp2;
466 	switch (so->so_type) {
467 
468 	case SOCK_DGRAM:
469 		unp->unp_nextref = unp2->unp_refs;
470 		unp2->unp_refs = unp;
471 		soisconnected(so);
472 		break;
473 
474 	case SOCK_STREAM:
475 		unp2->unp_conn = unp;
476 		soisconnected(so);
477 		soisconnected(so2);
478 		break;
479 
480 	default:
481 		panic("unp_connect2");
482 	}
483 	return (0);
484 }
485 
486 unp_disconnect(unp)
487 	struct unpcb *unp;
488 {
489 	register struct unpcb *unp2 = unp->unp_conn;
490 
491 	if (unp2 == 0)
492 		return;
493 	unp->unp_conn = 0;
494 	switch (unp->unp_socket->so_type) {
495 
496 	case SOCK_DGRAM:
497 		if (unp2->unp_refs == unp)
498 			unp2->unp_refs = unp->unp_nextref;
499 		else {
500 			unp2 = unp2->unp_refs;
501 			for (;;) {
502 				if (unp2 == 0)
503 					panic("unp_disconnect");
504 				if (unp2->unp_nextref == unp)
505 					break;
506 				unp2 = unp2->unp_nextref;
507 			}
508 			unp2->unp_nextref = unp->unp_nextref;
509 		}
510 		unp->unp_nextref = 0;
511 		unp->unp_socket->so_state &= ~SS_ISCONNECTED;
512 		break;
513 
514 	case SOCK_STREAM:
515 		soisdisconnected(unp->unp_socket);
516 		unp2->unp_conn = 0;
517 		soisdisconnected(unp2->unp_socket);
518 		break;
519 	}
520 }
521 
522 #ifdef notdef
523 unp_abort(unp)
524 	struct unpcb *unp;
525 {
526 
527 	unp_detach(unp);
528 }
529 #endif
530 
531 unp_shutdown(unp)
532 	struct unpcb *unp;
533 {
534 	struct socket *so;
535 
536 	if (unp->unp_socket->so_type == SOCK_STREAM && unp->unp_conn &&
537 	    (so = unp->unp_conn->unp_socket))
538 		socantrcvmore(so);
539 }
540 
541 unp_drop(unp, errno)
542 	struct unpcb *unp;
543 	int errno;
544 {
545 	struct socket *so = unp->unp_socket;
546 
547 	so->so_error = errno;
548 	unp_disconnect(unp);
549 	if (so->so_head) {
550 		so->so_pcb = (caddr_t) 0;
551 		m_freem(unp->unp_addr);
552 		(void) m_free(dtom(unp));
553 		sofree(so);
554 	}
555 }
556 
557 #ifdef notdef
558 unp_drain()
559 {
560 
561 }
562 #endif
563 
564 unp_externalize(rights)
565 	struct mbuf *rights;
566 {
567 	struct proc *p = curproc;		/* XXX */
568 	register int i;
569 	register struct cmsghdr *cm = mtod(rights, struct cmsghdr *);
570 	register struct file **rp = (struct file **)(cm + 1);
571 	register struct file *fp;
572 	int newfds = (cm->cmsg_len - sizeof(*cm)) / sizeof (int);
573 	int f;
574 
575 	if (!fdavail(p, newfds)) {
576 		for (i = 0; i < newfds; i++) {
577 			fp = *rp;
578 			unp_discard(fp);
579 			*rp++ = 0;
580 		}
581 		return (EMSGSIZE);
582 	}
583 	for (i = 0; i < newfds; i++) {
584 		if (fdalloc(p, 0, &f))
585 			panic("unp_externalize");
586 		fp = *rp;
587 		p->p_fd->fd_ofiles[f] = fp;
588 		fp->f_msgcount--;
589 		unp_rights--;
590 		*(int *)rp++ = f;
591 	}
592 	return (0);
593 }
594 
595 unp_internalize(control, p)
596 	struct mbuf *control;
597 	struct proc *p;
598 {
599 	struct filedesc *fdp = p->p_fd;
600 	register struct cmsghdr *cm = mtod(control, struct cmsghdr *);
601 	register struct file **rp;
602 	register struct file *fp;
603 	register int i, fd;
604 	int oldfds;
605 
606 	if (cm->cmsg_type != SCM_RIGHTS || cm->cmsg_level != SOL_SOCKET ||
607 	    cm->cmsg_len != control->m_len)
608 		return (EINVAL);
609 	oldfds = (cm->cmsg_len - sizeof (*cm)) / sizeof (int);
610 	rp = (struct file **)(cm + 1);
611 	for (i = 0; i < oldfds; i++) {
612 		fd = *(int *)rp++;
613 		if ((unsigned)fd >= fdp->fd_nfiles ||
614 		    fdp->fd_ofiles[fd] == NULL)
615 			return (EBADF);
616 	}
617 	rp = (struct file **)(cm + 1);
618 	for (i = 0; i < oldfds; i++) {
619 		fp = fdp->fd_ofiles[*(int *)rp];
620 		*rp++ = fp;
621 		fp->f_count++;
622 		fp->f_msgcount++;
623 		unp_rights++;
624 	}
625 	return (0);
626 }
627 
628 int	unp_defer, unp_gcing;
629 int	unp_mark();
630 extern	struct domain unixdomain;
631 
632 unp_gc()
633 {
634 	register struct file *fp, *nextfp;
635 	register struct socket *so;
636 	struct file **extra_ref, **fpp;
637 	int nunref, i;
638 
639 	if (unp_gcing)
640 		return;
641 	unp_gcing = 1;
642 	unp_defer = 0;
643 	for (fp = filehead; fp; fp = fp->f_filef)
644 		fp->f_flag &= ~(FMARK|FDEFER);
645 	do {
646 		for (fp = filehead; fp; fp = fp->f_filef) {
647 			if (fp->f_count == 0)
648 				continue;
649 			if (fp->f_flag & FDEFER) {
650 				fp->f_flag &= ~FDEFER;
651 				unp_defer--;
652 			} else {
653 				if (fp->f_flag & FMARK)
654 					continue;
655 				if (fp->f_count == fp->f_msgcount)
656 					continue;
657 				fp->f_flag |= FMARK;
658 			}
659 			if (fp->f_type != DTYPE_SOCKET ||
660 			    (so = (struct socket *)fp->f_data) == 0)
661 				continue;
662 			if (so->so_proto->pr_domain != &unixdomain ||
663 			    (so->so_proto->pr_flags&PR_RIGHTS) == 0)
664 				continue;
665 #ifdef notdef
666 			if (so->so_rcv.sb_flags & SB_LOCK) {
667 				/*
668 				 * This is problematical; it's not clear
669 				 * we need to wait for the sockbuf to be
670 				 * unlocked (on a uniprocessor, at least),
671 				 * and it's also not clear what to do
672 				 * if sbwait returns an error due to receipt
673 				 * of a signal.  If sbwait does return
674 				 * an error, we'll go into an infinite
675 				 * loop.  Delete all of this for now.
676 				 */
677 				(void) sbwait(&so->so_rcv);
678 				goto restart;
679 			}
680 #endif
681 			unp_scan(so->so_rcv.sb_mb, unp_mark);
682 		}
683 	} while (unp_defer);
684 	/*
685 	 * We grab an extra reference to each of the file table entries
686 	 * that are not otherwise accessible and then free the rights
687 	 * that are stored in messages on them.
688 	 *
689 	 * The bug in the orginal code is a little tricky, so I'll describe
690 	 * what's wrong with it here.
691 	 *
692 	 * It is incorrect to simply unp_discard each entry for f_msgcount
693 	 * times -- consider the case of sockets A and B that contain
694 	 * references to each other.  On a last close of some other socket,
695 	 * we trigger a gc since the number of outstanding rights (unp_rights)
696 	 * is non-zero.  If during the sweep phase the gc code un_discards,
697 	 * we end up doing a (full) closef on the descriptor.  A closef on A
698 	 * results in the following chain.  Closef calls soo_close, which
699 	 * calls soclose.   Soclose calls first (through the switch
700 	 * uipc_usrreq) unp_detach, which re-invokes unp_gc.  Unp_gc simply
701 	 * returns because the previous instance had set unp_gcing, and
702 	 * we return all the way back to soclose, which marks the socket
703 	 * with SS_NOFDREF, and then calls sofree.  Sofree calls sorflush
704 	 * to free up the rights that are queued in messages on the socket A,
705 	 * i.e., the reference on B.  The sorflush calls via the dom_dispose
706 	 * switch unp_dispose, which unp_scans with unp_discard.  This second
707 	 * instance of unp_discard just calls closef on B.
708 	 *
709 	 * Well, a similar chain occurs on B, resulting in a sorflush on B,
710 	 * which results in another closef on A.  Unfortunately, A is already
711 	 * being closed, and the descriptor has already been marked with
712 	 * SS_NOFDREF, and soclose panics at this point.
713 	 *
714 	 * Here, we first take an extra reference to each inaccessible
715 	 * descriptor.  Then, we call sorflush ourself, since we know
716 	 * it is a Unix domain socket anyhow.  After we destroy all the
717 	 * rights carried in messages, we do a last closef to get rid
718 	 * of our extra reference.  This is the last close, and the
719 	 * unp_detach etc will shut down the socket.
720 	 *
721 	 * 91/09/19, bsy@cs.cmu.edu
722 	 */
723 	extra_ref = malloc(nfiles * sizeof(struct file *), M_FILE, M_WAITOK);
724 	for (nunref = 0, fp = filehead, fpp = extra_ref; fp; fp = nextfp) {
725 		nextfp = fp->f_filef;
726 		if (fp->f_count == 0)
727 			continue;
728 		if (fp->f_count == fp->f_msgcount && !(fp->f_flag & FMARK)) {
729 			*fpp++ = fp;
730 			nunref++;
731 			fp->f_count++;
732 		}
733 	}
734 	for (i = nunref, fpp = extra_ref; --i >= 0; ++fpp)
735 		sorflush((struct socket *)(*fpp)->f_data);
736 	for (i = nunref, fpp = extra_ref; --i >= 0; ++fpp)
737 		closef(*fpp);
738 	free((caddr_t)extra_ref, M_FILE);
739 	unp_gcing = 0;
740 }
741 
742 unp_dispose(m)
743 	struct mbuf *m;
744 {
745 	int unp_discard();
746 
747 	if (m)
748 		unp_scan(m, unp_discard);
749 }
750 
751 unp_scan(m0, op)
752 	register struct mbuf *m0;
753 	int (*op)();
754 {
755 	register struct mbuf *m;
756 	register struct file **rp;
757 	register struct cmsghdr *cm;
758 	register int i;
759 	int qfds;
760 
761 	while (m0) {
762 		for (m = m0; m; m = m->m_next)
763 			if (m->m_type == MT_CONTROL &&
764 			    m->m_len >= sizeof(*cm)) {
765 				cm = mtod(m, struct cmsghdr *);
766 				if (cm->cmsg_level != SOL_SOCKET ||
767 				    cm->cmsg_type != SCM_RIGHTS)
768 					continue;
769 				qfds = (cm->cmsg_len - sizeof *cm)
770 						/ sizeof (struct file *);
771 				rp = (struct file **)(cm + 1);
772 				for (i = 0; i < qfds; i++)
773 					(*op)(*rp++);
774 				break;		/* XXX, but saves time */
775 			}
776 		m0 = m0->m_act;
777 	}
778 }
779 
780 unp_mark(fp)
781 	struct file *fp;
782 {
783 
784 	if (fp->f_flag & FMARK)
785 		return;
786 	unp_defer++;
787 	fp->f_flag |= (FMARK|FDEFER);
788 }
789 
790 unp_discard(fp)
791 	struct file *fp;
792 {
793 
794 	fp->f_msgcount--;
795 	unp_rights--;
796 	(void) closef(fp, (struct proc *)NULL);
797 }
798