xref: /dragonfly/sys/kern/uipc_syscalls.c (revision fcf53d9b)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1990, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * sendfile(2) and related extensions:
6  * Copyright (c) 1998, David Greenman. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)uipc_syscalls.c	8.4 (Berkeley) 2/21/94
37  * $FreeBSD: src/sys/kern/uipc_syscalls.c,v 1.65.2.17 2003/04/04 17:11:16 tegge Exp $
38  */
39 
40 #include "opt_ktrace.h"
41 #include "opt_sctp.h"
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/sysproto.h>
47 #include <sys/malloc.h>
48 #include <sys/filedesc.h>
49 #include <sys/event.h>
50 #include <sys/proc.h>
51 #include <sys/fcntl.h>
52 #include <sys/file.h>
53 #include <sys/filio.h>
54 #include <sys/kern_syscall.h>
55 #include <sys/mbuf.h>
56 #include <sys/protosw.h>
57 #include <sys/sfbuf.h>
58 #include <sys/socket.h>
59 #include <sys/socketvar.h>
60 #include <sys/socketops.h>
61 #include <sys/uio.h>
62 #include <sys/vnode.h>
63 #include <sys/lock.h>
64 #include <sys/mount.h>
65 #ifdef KTRACE
66 #include <sys/ktrace.h>
67 #endif
68 #include <vm/vm.h>
69 #include <vm/vm_object.h>
70 #include <vm/vm_page.h>
71 #include <vm/vm_pageout.h>
72 #include <vm/vm_kern.h>
73 #include <vm/vm_extern.h>
74 #include <sys/file2.h>
75 #include <sys/signalvar.h>
76 #include <sys/serialize.h>
77 
78 #include <sys/thread2.h>
79 #include <sys/msgport2.h>
80 #include <sys/socketvar2.h>
81 #include <net/netmsg2.h>
82 
83 #ifdef SCTP
84 #include <netinet/sctp_peeloff.h>
85 #endif /* SCTP */
86 
87 /*
88  * System call interface to the socket abstraction.
89  */
90 
91 extern	struct fileops socketops;
92 
93 /*
94  * socket_args(int domain, int type, int protocol)
95  */
96 int
97 kern_socket(int domain, int type, int protocol, int *res)
98 {
99 	struct thread *td = curthread;
100 	struct filedesc *fdp = td->td_proc->p_fd;
101 	struct socket *so;
102 	struct file *fp;
103 	int fd, error;
104 
105 	KKASSERT(td->td_lwp);
106 
107 	error = falloc(td->td_lwp, &fp, &fd);
108 	if (error)
109 		return (error);
110 	error = socreate(domain, &so, type, protocol, td);
111 	if (error) {
112 		fsetfd(fdp, NULL, fd);
113 	} else {
114 		fp->f_type = DTYPE_SOCKET;
115 		fp->f_flag = FREAD | FWRITE;
116 		fp->f_ops = &socketops;
117 		fp->f_data = so;
118 		*res = fd;
119 		fsetfd(fdp, fp, fd);
120 	}
121 	fdrop(fp);
122 	return (error);
123 }
124 
125 /*
126  * MPALMOSTSAFE
127  */
128 int
129 sys_socket(struct socket_args *uap)
130 {
131 	int error;
132 
133 	error = kern_socket(uap->domain, uap->type, uap->protocol,
134 			    &uap->sysmsg_iresult);
135 
136 	return (error);
137 }
138 
139 int
140 kern_bind(int s, struct sockaddr *sa)
141 {
142 	struct thread *td = curthread;
143 	struct proc *p = td->td_proc;
144 	struct file *fp;
145 	int error;
146 
147 	KKASSERT(p);
148 	error = holdsock(p->p_fd, s, &fp);
149 	if (error)
150 		return (error);
151 	error = sobind((struct socket *)fp->f_data, sa, td);
152 	fdrop(fp);
153 	return (error);
154 }
155 
156 /*
157  * bind_args(int s, caddr_t name, int namelen)
158  *
159  * MPALMOSTSAFE
160  */
161 int
162 sys_bind(struct bind_args *uap)
163 {
164 	struct sockaddr *sa;
165 	int error;
166 
167 	error = getsockaddr(&sa, uap->name, uap->namelen);
168 	if (error)
169 		return (error);
170 	error = kern_bind(uap->s, sa);
171 	FREE(sa, M_SONAME);
172 
173 	return (error);
174 }
175 
176 int
177 kern_listen(int s, int backlog)
178 {
179 	struct thread *td = curthread;
180 	struct proc *p = td->td_proc;
181 	struct file *fp;
182 	int error;
183 
184 	KKASSERT(p);
185 	error = holdsock(p->p_fd, s, &fp);
186 	if (error)
187 		return (error);
188 	error = solisten((struct socket *)fp->f_data, backlog, td);
189 	fdrop(fp);
190 	return(error);
191 }
192 
193 /*
194  * listen_args(int s, int backlog)
195  *
196  * MPALMOSTSAFE
197  */
198 int
199 sys_listen(struct listen_args *uap)
200 {
201 	int error;
202 
203 	error = kern_listen(uap->s, uap->backlog);
204 	return (error);
205 }
206 
207 /*
208  * Returns the accepted socket as well.
209  *
210  * NOTE!  The sockets sitting on so_comp/so_incomp might have 0 refs, the
211  *	  pool token is absolutely required to avoid a sofree() race,
212  *	  as well as to avoid tailq handling races.
213  */
214 static boolean_t
215 soaccept_predicate(struct netmsg_so_notify *msg)
216 {
217 	struct socket *head = msg->base.nm_so;
218 	struct socket *so;
219 
220 	if (head->so_error != 0) {
221 		msg->base.lmsg.ms_error = head->so_error;
222 		return (TRUE);
223 	}
224 	lwkt_getpooltoken(head);
225 	if (!TAILQ_EMPTY(&head->so_comp)) {
226 		/* Abuse nm_so field as copy in/copy out parameter. XXX JH */
227 		so = TAILQ_FIRST(&head->so_comp);
228 		TAILQ_REMOVE(&head->so_comp, so, so_list);
229 		head->so_qlen--;
230 		soclrstate(so, SS_COMP);
231 		so->so_head = NULL;
232 		soreference(so);
233 
234 		lwkt_relpooltoken(head);
235 
236 		msg->base.lmsg.ms_error = 0;
237 		msg->base.nm_so = so;
238 		return (TRUE);
239 	}
240 	lwkt_relpooltoken(head);
241 	if (head->so_state & SS_CANTRCVMORE) {
242 		msg->base.lmsg.ms_error = ECONNABORTED;
243 		return (TRUE);
244 	}
245 	if (msg->nm_fflags & FNONBLOCK) {
246 		msg->base.lmsg.ms_error = EWOULDBLOCK;
247 		return (TRUE);
248 	}
249 
250 	return (FALSE);
251 }
252 
253 /*
254  * The second argument to kern_accept() is a handle to a struct sockaddr.
255  * This allows kern_accept() to return a pointer to an allocated struct
256  * sockaddr which must be freed later with FREE().  The caller must
257  * initialize *name to NULL.
258  */
259 int
260 kern_accept(int s, int fflags, struct sockaddr **name, int *namelen, int *res)
261 {
262 	struct thread *td = curthread;
263 	struct filedesc *fdp = td->td_proc->p_fd;
264 	struct file *lfp = NULL;
265 	struct file *nfp = NULL;
266 	struct sockaddr *sa;
267 	struct socket *head, *so;
268 	struct netmsg_so_notify msg;
269 	int fd;
270 	u_int fflag;		/* type must match fp->f_flag */
271 	int error, tmp;
272 
273 	*res = -1;
274 	if (name && namelen && *namelen < 0)
275 		return (EINVAL);
276 
277 	error = holdsock(td->td_proc->p_fd, s, &lfp);
278 	if (error)
279 		return (error);
280 
281 	error = falloc(td->td_lwp, &nfp, &fd);
282 	if (error) {		/* Probably ran out of file descriptors. */
283 		fdrop(lfp);
284 		return (error);
285 	}
286 	head = (struct socket *)lfp->f_data;
287 	if ((head->so_options & SO_ACCEPTCONN) == 0) {
288 		error = EINVAL;
289 		goto done;
290 	}
291 
292 	if (fflags & O_FBLOCKING)
293 		fflags |= lfp->f_flag & ~FNONBLOCK;
294 	else if (fflags & O_FNONBLOCKING)
295 		fflags |= lfp->f_flag | FNONBLOCK;
296 	else
297 		fflags = lfp->f_flag;
298 
299 	/* optimize for uniprocessor case later XXX JH */
300 	netmsg_init_abortable(&msg.base, head, &curthread->td_msgport,
301 			      0, netmsg_so_notify, netmsg_so_notify_doabort);
302 	msg.nm_predicate = soaccept_predicate;
303 	msg.nm_fflags = fflags;
304 	msg.nm_etype = NM_REVENT;
305 	error = lwkt_domsg(head->so_port, &msg.base.lmsg, PCATCH);
306 	if (error)
307 		goto done;
308 
309 	/*
310 	 * At this point we have the connection that's ready to be accepted.
311 	 *
312 	 * NOTE! soaccept_predicate() ref'd so for us, and soaccept() expects
313 	 * 	 to eat the ref and turn it into a descriptor.
314 	 */
315 	so = msg.base.nm_so;
316 
317 	fflag = lfp->f_flag;
318 
319 	/* connection has been removed from the listen queue */
320 	KNOTE(&head->so_rcv.ssb_kq.ki_note, 0);
321 
322 	if (head->so_sigio != NULL)
323 		fsetown(fgetown(&head->so_sigio), &so->so_sigio);
324 
325 	nfp->f_type = DTYPE_SOCKET;
326 	nfp->f_flag = fflag;
327 	nfp->f_ops = &socketops;
328 	nfp->f_data = so;
329 	/* Sync socket nonblocking/async state with file flags */
330 	tmp = fflag & FNONBLOCK;
331 	fo_ioctl(nfp, FIONBIO, (caddr_t)&tmp, td->td_ucred, NULL);
332 	tmp = fflag & FASYNC;
333 	fo_ioctl(nfp, FIOASYNC, (caddr_t)&tmp, td->td_ucred, NULL);
334 
335 	sa = NULL;
336 	error = soaccept(so, &sa);
337 
338 	/*
339 	 * Set the returned name and namelen as applicable.  Set the returned
340 	 * namelen to 0 for older code which might ignore the return value
341 	 * from accept.
342 	 */
343 	if (error == 0) {
344 		if (sa && name && namelen) {
345 			if (*namelen > sa->sa_len)
346 				*namelen = sa->sa_len;
347 			*name = sa;
348 		} else {
349 			if (sa)
350 				FREE(sa, M_SONAME);
351 		}
352 	}
353 
354 done:
355 	/*
356 	 * If an error occured clear the reserved descriptor, else associate
357 	 * nfp with it.
358 	 *
359 	 * Note that *res is normally ignored if an error is returned but
360 	 * a syscall message will still have access to the result code.
361 	 */
362 	if (error) {
363 		fsetfd(fdp, NULL, fd);
364 	} else {
365 		*res = fd;
366 		fsetfd(fdp, nfp, fd);
367 	}
368 	fdrop(nfp);
369 	fdrop(lfp);
370 	return (error);
371 }
372 
373 /*
374  * accept(int s, caddr_t name, int *anamelen)
375  *
376  * MPALMOSTSAFE
377  */
378 int
379 sys_accept(struct accept_args *uap)
380 {
381 	struct sockaddr *sa = NULL;
382 	int sa_len;
383 	int error;
384 
385 	if (uap->name) {
386 		error = copyin(uap->anamelen, &sa_len, sizeof(sa_len));
387 		if (error)
388 			return (error);
389 
390 		error = kern_accept(uap->s, 0, &sa, &sa_len,
391 				    &uap->sysmsg_iresult);
392 
393 		if (error == 0)
394 			error = copyout(sa, uap->name, sa_len);
395 		if (error == 0) {
396 			error = copyout(&sa_len, uap->anamelen,
397 			    sizeof(*uap->anamelen));
398 		}
399 		if (sa)
400 			FREE(sa, M_SONAME);
401 	} else {
402 		error = kern_accept(uap->s, 0, NULL, 0,
403 				    &uap->sysmsg_iresult);
404 	}
405 	return (error);
406 }
407 
408 /*
409  * extaccept(int s, int fflags, caddr_t name, int *anamelen)
410  *
411  * MPALMOSTSAFE
412  */
413 int
414 sys_extaccept(struct extaccept_args *uap)
415 {
416 	struct sockaddr *sa = NULL;
417 	int sa_len;
418 	int error;
419 	int fflags = uap->flags & O_FMASK;
420 
421 	if (uap->name) {
422 		error = copyin(uap->anamelen, &sa_len, sizeof(sa_len));
423 		if (error)
424 			return (error);
425 
426 		error = kern_accept(uap->s, fflags, &sa, &sa_len,
427 				    &uap->sysmsg_iresult);
428 
429 		if (error == 0)
430 			error = copyout(sa, uap->name, sa_len);
431 		if (error == 0) {
432 			error = copyout(&sa_len, uap->anamelen,
433 			    sizeof(*uap->anamelen));
434 		}
435 		if (sa)
436 			FREE(sa, M_SONAME);
437 	} else {
438 		error = kern_accept(uap->s, fflags, NULL, 0,
439 				    &uap->sysmsg_iresult);
440 	}
441 	return (error);
442 }
443 
444 
445 /*
446  * Returns TRUE if predicate satisfied.
447  */
448 static boolean_t
449 soconnected_predicate(struct netmsg_so_notify *msg)
450 {
451 	struct socket *so = msg->base.nm_so;
452 
453 	/* check predicate */
454 	if (!(so->so_state & SS_ISCONNECTING) || so->so_error != 0) {
455 		msg->base.lmsg.ms_error = so->so_error;
456 		return (TRUE);
457 	}
458 
459 	return (FALSE);
460 }
461 
462 int
463 kern_connect(int s, int fflags, struct sockaddr *sa)
464 {
465 	struct thread *td = curthread;
466 	struct proc *p = td->td_proc;
467 	struct file *fp;
468 	struct socket *so;
469 	int error, interrupted = 0;
470 
471 	error = holdsock(p->p_fd, s, &fp);
472 	if (error)
473 		return (error);
474 	so = (struct socket *)fp->f_data;
475 
476 	if (fflags & O_FBLOCKING)
477 		/* fflags &= ~FNONBLOCK; */;
478 	else if (fflags & O_FNONBLOCKING)
479 		fflags |= FNONBLOCK;
480 	else
481 		fflags = fp->f_flag;
482 
483 	if (so->so_state & SS_ISCONNECTING) {
484 		error = EALREADY;
485 		goto done;
486 	}
487 	error = soconnect(so, sa, td);
488 	if (error)
489 		goto bad;
490 	if ((fflags & FNONBLOCK) && (so->so_state & SS_ISCONNECTING)) {
491 		error = EINPROGRESS;
492 		goto done;
493 	}
494 	if ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
495 		struct netmsg_so_notify msg;
496 
497 		netmsg_init_abortable(&msg.base, so,
498 				      &curthread->td_msgport,
499 				      0,
500 				      netmsg_so_notify,
501 				      netmsg_so_notify_doabort);
502 		msg.nm_predicate = soconnected_predicate;
503 		msg.nm_etype = NM_REVENT;
504 		error = lwkt_domsg(so->so_port, &msg.base.lmsg, PCATCH);
505 		if (error == EINTR || error == ERESTART)
506 			interrupted = 1;
507 	}
508 	if (error == 0) {
509 		error = so->so_error;
510 		so->so_error = 0;
511 	}
512 bad:
513 	if (!interrupted)
514 		soclrstate(so, SS_ISCONNECTING);
515 	if (error == ERESTART)
516 		error = EINTR;
517 done:
518 	fdrop(fp);
519 	return (error);
520 }
521 
522 /*
523  * connect_args(int s, caddr_t name, int namelen)
524  *
525  * MPALMOSTSAFE
526  */
527 int
528 sys_connect(struct connect_args *uap)
529 {
530 	struct sockaddr *sa;
531 	int error;
532 
533 	error = getsockaddr(&sa, uap->name, uap->namelen);
534 	if (error)
535 		return (error);
536 	error = kern_connect(uap->s, 0, sa);
537 	FREE(sa, M_SONAME);
538 
539 	return (error);
540 }
541 
542 /*
543  * connect_args(int s, int fflags, caddr_t name, int namelen)
544  *
545  * MPALMOSTSAFE
546  */
547 int
548 sys_extconnect(struct extconnect_args *uap)
549 {
550 	struct sockaddr *sa;
551 	int error;
552 	int fflags = uap->flags & O_FMASK;
553 
554 	error = getsockaddr(&sa, uap->name, uap->namelen);
555 	if (error)
556 		return (error);
557 	error = kern_connect(uap->s, fflags, sa);
558 	FREE(sa, M_SONAME);
559 
560 	return (error);
561 }
562 
563 int
564 kern_socketpair(int domain, int type, int protocol, int *sv)
565 {
566 	struct thread *td = curthread;
567 	struct filedesc *fdp;
568 	struct file *fp1, *fp2;
569 	struct socket *so1, *so2;
570 	int fd1, fd2, error;
571 
572 	fdp = td->td_proc->p_fd;
573 	error = socreate(domain, &so1, type, protocol, td);
574 	if (error)
575 		return (error);
576 	error = socreate(domain, &so2, type, protocol, td);
577 	if (error)
578 		goto free1;
579 	error = falloc(td->td_lwp, &fp1, &fd1);
580 	if (error)
581 		goto free2;
582 	sv[0] = fd1;
583 	fp1->f_data = so1;
584 	error = falloc(td->td_lwp, &fp2, &fd2);
585 	if (error)
586 		goto free3;
587 	fp2->f_data = so2;
588 	sv[1] = fd2;
589 	error = soconnect2(so1, so2);
590 	if (error)
591 		goto free4;
592 	if (type == SOCK_DGRAM) {
593 		/*
594 		 * Datagram socket connection is asymmetric.
595 		 */
596 		 error = soconnect2(so2, so1);
597 		 if (error)
598 			goto free4;
599 	}
600 	fp1->f_type = fp2->f_type = DTYPE_SOCKET;
601 	fp1->f_flag = fp2->f_flag = FREAD|FWRITE;
602 	fp1->f_ops = fp2->f_ops = &socketops;
603 	fsetfd(fdp, fp1, fd1);
604 	fsetfd(fdp, fp2, fd2);
605 	fdrop(fp1);
606 	fdrop(fp2);
607 	return (error);
608 free4:
609 	fsetfd(fdp, NULL, fd2);
610 	fdrop(fp2);
611 free3:
612 	fsetfd(fdp, NULL, fd1);
613 	fdrop(fp1);
614 free2:
615 	(void)soclose(so2, 0);
616 free1:
617 	(void)soclose(so1, 0);
618 	return (error);
619 }
620 
621 /*
622  * socketpair(int domain, int type, int protocol, int *rsv)
623  *
624  * MPALMOSTSAFE
625  */
626 int
627 sys_socketpair(struct socketpair_args *uap)
628 {
629 	int error, sockv[2];
630 
631 	error = kern_socketpair(uap->domain, uap->type, uap->protocol, sockv);
632 
633 	if (error == 0)
634 		error = copyout(sockv, uap->rsv, sizeof(sockv));
635 	return (error);
636 }
637 
638 int
639 kern_sendmsg(int s, struct sockaddr *sa, struct uio *auio,
640 	     struct mbuf *control, int flags, size_t *res)
641 {
642 	struct thread *td = curthread;
643 	struct lwp *lp = td->td_lwp;
644 	struct proc *p = td->td_proc;
645 	struct file *fp;
646 	size_t len;
647 	int error;
648 	struct socket *so;
649 #ifdef KTRACE
650 	struct iovec *ktriov = NULL;
651 	struct uio ktruio;
652 #endif
653 
654 	error = holdsock(p->p_fd, s, &fp);
655 	if (error)
656 		return (error);
657 #ifdef KTRACE
658 	if (KTRPOINT(td, KTR_GENIO)) {
659 		int iovlen = auio->uio_iovcnt * sizeof (struct iovec);
660 
661 		MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
662 		bcopy((caddr_t)auio->uio_iov, (caddr_t)ktriov, iovlen);
663 		ktruio = *auio;
664 	}
665 #endif
666 	len = auio->uio_resid;
667 	so = (struct socket *)fp->f_data;
668 	if ((flags & (MSG_FNONBLOCKING|MSG_FBLOCKING)) == 0) {
669 		if (fp->f_flag & FNONBLOCK)
670 			flags |= MSG_FNONBLOCKING;
671 	}
672 	error = so_pru_sosend(so, sa, auio, NULL, control, flags, td);
673 	if (error) {
674 		if (auio->uio_resid != len && (error == ERESTART ||
675 		    error == EINTR || error == EWOULDBLOCK))
676 			error = 0;
677 		if (error == EPIPE && !(flags & MSG_NOSIGNAL))
678 			lwpsignal(p, lp, SIGPIPE);
679 	}
680 #ifdef KTRACE
681 	if (ktriov != NULL) {
682 		if (error == 0) {
683 			ktruio.uio_iov = ktriov;
684 			ktruio.uio_resid = len - auio->uio_resid;
685 			ktrgenio(lp, s, UIO_WRITE, &ktruio, error);
686 		}
687 		FREE(ktriov, M_TEMP);
688 	}
689 #endif
690 	if (error == 0)
691 		*res  = len - auio->uio_resid;
692 	fdrop(fp);
693 	return (error);
694 }
695 
696 /*
697  * sendto_args(int s, caddr_t buf, size_t len, int flags, caddr_t to, int tolen)
698  *
699  * MPALMOSTSAFE
700  */
701 int
702 sys_sendto(struct sendto_args *uap)
703 {
704 	struct thread *td = curthread;
705 	struct uio auio;
706 	struct iovec aiov;
707 	struct sockaddr *sa = NULL;
708 	int error;
709 
710 	if (uap->to) {
711 		error = getsockaddr(&sa, uap->to, uap->tolen);
712 		if (error)
713 			return (error);
714 	}
715 	aiov.iov_base = uap->buf;
716 	aiov.iov_len = uap->len;
717 	auio.uio_iov = &aiov;
718 	auio.uio_iovcnt = 1;
719 	auio.uio_offset = 0;
720 	auio.uio_resid = uap->len;
721 	auio.uio_segflg = UIO_USERSPACE;
722 	auio.uio_rw = UIO_WRITE;
723 	auio.uio_td = td;
724 
725 	error = kern_sendmsg(uap->s, sa, &auio, NULL, uap->flags,
726 			     &uap->sysmsg_szresult);
727 
728 	if (sa)
729 		FREE(sa, M_SONAME);
730 	return (error);
731 }
732 
733 /*
734  * sendmsg_args(int s, caddr_t msg, int flags)
735  *
736  * MPALMOSTSAFE
737  */
738 int
739 sys_sendmsg(struct sendmsg_args *uap)
740 {
741 	struct thread *td = curthread;
742 	struct msghdr msg;
743 	struct uio auio;
744 	struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
745 	struct sockaddr *sa = NULL;
746 	struct mbuf *control = NULL;
747 	int error;
748 
749 	error = copyin(uap->msg, (caddr_t)&msg, sizeof(msg));
750 	if (error)
751 		return (error);
752 
753 	/*
754 	 * Conditionally copyin msg.msg_name.
755 	 */
756 	if (msg.msg_name) {
757 		error = getsockaddr(&sa, msg.msg_name, msg.msg_namelen);
758 		if (error)
759 			return (error);
760 	}
761 
762 	/*
763 	 * Populate auio.
764 	 */
765 	error = iovec_copyin(msg.msg_iov, &iov, aiov, msg.msg_iovlen,
766 			     &auio.uio_resid);
767 	if (error)
768 		goto cleanup2;
769 	auio.uio_iov = iov;
770 	auio.uio_iovcnt = msg.msg_iovlen;
771 	auio.uio_offset = 0;
772 	auio.uio_segflg = UIO_USERSPACE;
773 	auio.uio_rw = UIO_WRITE;
774 	auio.uio_td = td;
775 
776 	/*
777 	 * Conditionally copyin msg.msg_control.
778 	 */
779 	if (msg.msg_control) {
780 		if (msg.msg_controllen < sizeof(struct cmsghdr) ||
781 		    msg.msg_controllen > MLEN) {
782 			error = EINVAL;
783 			goto cleanup;
784 		}
785 		control = m_get(MB_WAIT, MT_CONTROL);
786 		if (control == NULL) {
787 			error = ENOBUFS;
788 			goto cleanup;
789 		}
790 		control->m_len = msg.msg_controllen;
791 		error = copyin(msg.msg_control, mtod(control, caddr_t),
792 			       msg.msg_controllen);
793 		if (error) {
794 			m_free(control);
795 			goto cleanup;
796 		}
797 	}
798 
799 	error = kern_sendmsg(uap->s, sa, &auio, control, uap->flags,
800 			     &uap->sysmsg_szresult);
801 
802 cleanup:
803 	iovec_free(&iov, aiov);
804 cleanup2:
805 	if (sa)
806 		FREE(sa, M_SONAME);
807 	return (error);
808 }
809 
810 /*
811  * kern_recvmsg() takes a handle to sa and control.  If the handle is non-
812  * null, it returns a dynamically allocated struct sockaddr and an mbuf.
813  * Don't forget to FREE() and m_free() these if they are returned.
814  */
815 int
816 kern_recvmsg(int s, struct sockaddr **sa, struct uio *auio,
817 	     struct mbuf **control, int *flags, size_t *res)
818 {
819 	struct thread *td = curthread;
820 	struct proc *p = td->td_proc;
821 	struct file *fp;
822 	size_t len;
823 	int error;
824 	int lflags;
825 	struct socket *so;
826 #ifdef KTRACE
827 	struct iovec *ktriov = NULL;
828 	struct uio ktruio;
829 #endif
830 
831 	error = holdsock(p->p_fd, s, &fp);
832 	if (error)
833 		return (error);
834 #ifdef KTRACE
835 	if (KTRPOINT(td, KTR_GENIO)) {
836 		int iovlen = auio->uio_iovcnt * sizeof (struct iovec);
837 
838 		MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
839 		bcopy(auio->uio_iov, ktriov, iovlen);
840 		ktruio = *auio;
841 	}
842 #endif
843 	len = auio->uio_resid;
844 	so = (struct socket *)fp->f_data;
845 
846 	if (flags == NULL || (*flags & (MSG_FNONBLOCKING|MSG_FBLOCKING)) == 0) {
847 		if (fp->f_flag & FNONBLOCK) {
848 			if (flags) {
849 				*flags |= MSG_FNONBLOCKING;
850 			} else {
851 				lflags = MSG_FNONBLOCKING;
852 				flags = &lflags;
853 			}
854 		}
855 	}
856 
857 	error = so_pru_soreceive(so, sa, auio, NULL, control, flags);
858 	if (error) {
859 		if (auio->uio_resid != len && (error == ERESTART ||
860 		    error == EINTR || error == EWOULDBLOCK))
861 			error = 0;
862 	}
863 #ifdef KTRACE
864 	if (ktriov != NULL) {
865 		if (error == 0) {
866 			ktruio.uio_iov = ktriov;
867 			ktruio.uio_resid = len - auio->uio_resid;
868 			ktrgenio(td->td_lwp, s, UIO_READ, &ktruio, error);
869 		}
870 		FREE(ktriov, M_TEMP);
871 	}
872 #endif
873 	if (error == 0)
874 		*res = len - auio->uio_resid;
875 	fdrop(fp);
876 	return (error);
877 }
878 
879 /*
880  * recvfrom_args(int s, caddr_t buf, size_t len, int flags,
881  *			caddr_t from, int *fromlenaddr)
882  *
883  * MPALMOSTSAFE
884  */
885 int
886 sys_recvfrom(struct recvfrom_args *uap)
887 {
888 	struct thread *td = curthread;
889 	struct uio auio;
890 	struct iovec aiov;
891 	struct sockaddr *sa = NULL;
892 	int error, fromlen;
893 
894 	if (uap->from && uap->fromlenaddr) {
895 		error = copyin(uap->fromlenaddr, &fromlen, sizeof(fromlen));
896 		if (error)
897 			return (error);
898 		if (fromlen < 0)
899 			return (EINVAL);
900 	} else {
901 		fromlen = 0;
902 	}
903 	aiov.iov_base = uap->buf;
904 	aiov.iov_len = uap->len;
905 	auio.uio_iov = &aiov;
906 	auio.uio_iovcnt = 1;
907 	auio.uio_offset = 0;
908 	auio.uio_resid = uap->len;
909 	auio.uio_segflg = UIO_USERSPACE;
910 	auio.uio_rw = UIO_READ;
911 	auio.uio_td = td;
912 
913 	error = kern_recvmsg(uap->s, uap->from ? &sa : NULL, &auio, NULL,
914 			     &uap->flags, &uap->sysmsg_szresult);
915 
916 	if (error == 0 && uap->from) {
917 		/* note: sa may still be NULL */
918 		if (sa) {
919 			fromlen = MIN(fromlen, sa->sa_len);
920 			error = copyout(sa, uap->from, fromlen);
921 		} else {
922 			fromlen = 0;
923 		}
924 		if (error == 0) {
925 			error = copyout(&fromlen, uap->fromlenaddr,
926 					sizeof(fromlen));
927 		}
928 	}
929 	if (sa)
930 		FREE(sa, M_SONAME);
931 
932 	return (error);
933 }
934 
935 /*
936  * recvmsg_args(int s, struct msghdr *msg, int flags)
937  *
938  * MPALMOSTSAFE
939  */
940 int
941 sys_recvmsg(struct recvmsg_args *uap)
942 {
943 	struct thread *td = curthread;
944 	struct msghdr msg;
945 	struct uio auio;
946 	struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
947 	struct mbuf *m, *control = NULL;
948 	struct sockaddr *sa = NULL;
949 	caddr_t ctlbuf;
950 	socklen_t *ufromlenp, *ucontrollenp;
951 	int error, fromlen, controllen, len, flags, *uflagsp;
952 
953 	/*
954 	 * This copyin handles everything except the iovec.
955 	 */
956 	error = copyin(uap->msg, &msg, sizeof(msg));
957 	if (error)
958 		return (error);
959 
960 	if (msg.msg_name && msg.msg_namelen < 0)
961 		return (EINVAL);
962 	if (msg.msg_control && msg.msg_controllen < 0)
963 		return (EINVAL);
964 
965 	ufromlenp = (socklen_t *)((caddr_t)uap->msg + offsetof(struct msghdr,
966 		    msg_namelen));
967 	ucontrollenp = (socklen_t *)((caddr_t)uap->msg + offsetof(struct msghdr,
968 		       msg_controllen));
969 	uflagsp = (int *)((caddr_t)uap->msg + offsetof(struct msghdr,
970 							msg_flags));
971 
972 	/*
973 	 * Populate auio.
974 	 */
975 	error = iovec_copyin(msg.msg_iov, &iov, aiov, msg.msg_iovlen,
976 			     &auio.uio_resid);
977 	if (error)
978 		return (error);
979 	auio.uio_iov = iov;
980 	auio.uio_iovcnt = msg.msg_iovlen;
981 	auio.uio_offset = 0;
982 	auio.uio_segflg = UIO_USERSPACE;
983 	auio.uio_rw = UIO_READ;
984 	auio.uio_td = td;
985 
986 	flags = uap->flags;
987 
988 	error = kern_recvmsg(uap->s,
989 			     (msg.msg_name ? &sa : NULL), &auio,
990 			     (msg.msg_control ? &control : NULL), &flags,
991 			     &uap->sysmsg_szresult);
992 
993 	/*
994 	 * Conditionally copyout the name and populate the namelen field.
995 	 */
996 	if (error == 0 && msg.msg_name) {
997 		/* note: sa may still be NULL */
998 		if (sa != NULL) {
999 			fromlen = MIN(msg.msg_namelen, sa->sa_len);
1000 			error = copyout(sa, msg.msg_name, fromlen);
1001 		} else {
1002 			fromlen = 0;
1003 		}
1004 		if (error == 0)
1005 			error = copyout(&fromlen, ufromlenp,
1006 			    sizeof(*ufromlenp));
1007 	}
1008 
1009 	/*
1010 	 * Copyout msg.msg_control and msg.msg_controllen.
1011 	 */
1012 	if (error == 0 && msg.msg_control) {
1013 		len = msg.msg_controllen;
1014 		m = control;
1015 		ctlbuf = (caddr_t)msg.msg_control;
1016 
1017 		while(m && len > 0) {
1018 			unsigned int tocopy;
1019 
1020 			if (len >= m->m_len) {
1021 				tocopy = m->m_len;
1022 			} else {
1023 				msg.msg_flags |= MSG_CTRUNC;
1024 				tocopy = len;
1025 			}
1026 
1027 			error = copyout(mtod(m, caddr_t), ctlbuf, tocopy);
1028 			if (error)
1029 				goto cleanup;
1030 
1031 			ctlbuf += tocopy;
1032 			len -= tocopy;
1033 			m = m->m_next;
1034 		}
1035 		controllen = ctlbuf - (caddr_t)msg.msg_control;
1036 		error = copyout(&controllen, ucontrollenp,
1037 		    sizeof(*ucontrollenp));
1038 	}
1039 
1040 	if (error == 0)
1041 		error = copyout(&flags, uflagsp, sizeof(*uflagsp));
1042 
1043 cleanup:
1044 	if (sa)
1045 		FREE(sa, M_SONAME);
1046 	iovec_free(&iov, aiov);
1047 	if (control)
1048 		m_freem(control);
1049 	return (error);
1050 }
1051 
1052 /*
1053  * If sopt->sopt_td == NULL, then sopt->sopt_val is treated as an
1054  * in kernel pointer instead of a userland pointer.  This allows us
1055  * to manipulate socket options in the emulation code.
1056  */
1057 int
1058 kern_setsockopt(int s, struct sockopt *sopt)
1059 {
1060 	struct thread *td = curthread;
1061 	struct proc *p = td->td_proc;
1062 	struct file *fp;
1063 	int error;
1064 
1065 	if (sopt->sopt_val == NULL && sopt->sopt_valsize != 0)
1066 		return (EFAULT);
1067 	if (sopt->sopt_val != NULL && sopt->sopt_valsize == 0)
1068 		return (EINVAL);
1069 	if (sopt->sopt_valsize < 0)
1070 		return (EINVAL);
1071 
1072 	error = holdsock(p->p_fd, s, &fp);
1073 	if (error)
1074 		return (error);
1075 
1076 	error = sosetopt((struct socket *)fp->f_data, sopt);
1077 	fdrop(fp);
1078 	return (error);
1079 }
1080 
1081 /*
1082  * setsockopt_args(int s, int level, int name, caddr_t val, int valsize)
1083  *
1084  * MPALMOSTSAFE
1085  */
1086 int
1087 sys_setsockopt(struct setsockopt_args *uap)
1088 {
1089 	struct thread *td = curthread;
1090 	struct sockopt sopt;
1091 	int error;
1092 
1093 	sopt.sopt_level = uap->level;
1094 	sopt.sopt_name = uap->name;
1095 	sopt.sopt_valsize = uap->valsize;
1096 	sopt.sopt_td = td;
1097 	sopt.sopt_val = NULL;
1098 
1099 	if (sopt.sopt_valsize < 0 || sopt.sopt_valsize > SOMAXOPT_SIZE)
1100 		return (EINVAL);
1101 	if (uap->val) {
1102 		sopt.sopt_val = kmalloc(sopt.sopt_valsize, M_TEMP, M_WAITOK);
1103 		error = copyin(uap->val, sopt.sopt_val, sopt.sopt_valsize);
1104 		if (error)
1105 			goto out;
1106 	}
1107 
1108 	error = kern_setsockopt(uap->s, &sopt);
1109 out:
1110 	if (uap->val)
1111 		kfree(sopt.sopt_val, M_TEMP);
1112 	return(error);
1113 }
1114 
1115 /*
1116  * If sopt->sopt_td == NULL, then sopt->sopt_val is treated as an
1117  * in kernel pointer instead of a userland pointer.  This allows us
1118  * to manipulate socket options in the emulation code.
1119  */
1120 int
1121 kern_getsockopt(int s, struct sockopt *sopt)
1122 {
1123 	struct thread *td = curthread;
1124 	struct proc *p = td->td_proc;
1125 	struct file *fp;
1126 	int error;
1127 
1128 	if (sopt->sopt_val == NULL && sopt->sopt_valsize != 0)
1129 		return (EFAULT);
1130 	if (sopt->sopt_val != NULL && sopt->sopt_valsize == 0)
1131 		return (EINVAL);
1132 	if (sopt->sopt_valsize < 0 || sopt->sopt_valsize > SOMAXOPT_SIZE)
1133 		return (EINVAL);
1134 
1135 	error = holdsock(p->p_fd, s, &fp);
1136 	if (error)
1137 		return (error);
1138 
1139 	error = sogetopt((struct socket *)fp->f_data, sopt);
1140 	fdrop(fp);
1141 	return (error);
1142 }
1143 
1144 /*
1145  * getsockopt_args(int s, int level, int name, caddr_t val, int *avalsize)
1146  *
1147  * MPALMOSTSAFE
1148  */
1149 int
1150 sys_getsockopt(struct getsockopt_args *uap)
1151 {
1152 	struct thread *td = curthread;
1153 	struct	sockopt sopt;
1154 	int	error, valsize;
1155 
1156 	if (uap->val) {
1157 		error = copyin(uap->avalsize, &valsize, sizeof(valsize));
1158 		if (error)
1159 			return (error);
1160 	} else {
1161 		valsize = 0;
1162 	}
1163 
1164 	sopt.sopt_level = uap->level;
1165 	sopt.sopt_name = uap->name;
1166 	sopt.sopt_valsize = valsize;
1167 	sopt.sopt_td = td;
1168 	sopt.sopt_val = NULL;
1169 
1170 	if (sopt.sopt_valsize < 0 || sopt.sopt_valsize > SOMAXOPT_SIZE)
1171 		return (EINVAL);
1172 	if (uap->val) {
1173 		sopt.sopt_val = kmalloc(sopt.sopt_valsize, M_TEMP, M_WAITOK);
1174 		error = copyin(uap->val, sopt.sopt_val, sopt.sopt_valsize);
1175 		if (error)
1176 			goto out;
1177 	}
1178 
1179 	error = kern_getsockopt(uap->s, &sopt);
1180 	if (error)
1181 		goto out;
1182 	valsize = sopt.sopt_valsize;
1183 	error = copyout(&valsize, uap->avalsize, sizeof(valsize));
1184 	if (error)
1185 		goto out;
1186 	if (uap->val)
1187 		error = copyout(sopt.sopt_val, uap->val, sopt.sopt_valsize);
1188 out:
1189 	if (uap->val)
1190 		kfree(sopt.sopt_val, M_TEMP);
1191 	return (error);
1192 }
1193 
1194 /*
1195  * The second argument to kern_getsockname() is a handle to a struct sockaddr.
1196  * This allows kern_getsockname() to return a pointer to an allocated struct
1197  * sockaddr which must be freed later with FREE().  The caller must
1198  * initialize *name to NULL.
1199  */
1200 int
1201 kern_getsockname(int s, struct sockaddr **name, int *namelen)
1202 {
1203 	struct thread *td = curthread;
1204 	struct proc *p = td->td_proc;
1205 	struct file *fp;
1206 	struct socket *so;
1207 	struct sockaddr *sa = NULL;
1208 	int error;
1209 
1210 	error = holdsock(p->p_fd, s, &fp);
1211 	if (error)
1212 		return (error);
1213 	if (*namelen < 0) {
1214 		fdrop(fp);
1215 		return (EINVAL);
1216 	}
1217 	so = (struct socket *)fp->f_data;
1218 	error = so_pru_sockaddr(so, &sa);
1219 	if (error == 0) {
1220 		if (sa == NULL) {
1221 			*namelen = 0;
1222 		} else {
1223 			*namelen = MIN(*namelen, sa->sa_len);
1224 			*name = sa;
1225 		}
1226 	}
1227 
1228 	fdrop(fp);
1229 	return (error);
1230 }
1231 
1232 /*
1233  * getsockname_args(int fdes, caddr_t asa, int *alen)
1234  *
1235  * Get socket name.
1236  *
1237  * MPALMOSTSAFE
1238  */
1239 int
1240 sys_getsockname(struct getsockname_args *uap)
1241 {
1242 	struct sockaddr *sa = NULL;
1243 	int error, sa_len;
1244 
1245 	error = copyin(uap->alen, &sa_len, sizeof(sa_len));
1246 	if (error)
1247 		return (error);
1248 
1249 	error = kern_getsockname(uap->fdes, &sa, &sa_len);
1250 
1251 	if (error == 0)
1252 		error = copyout(sa, uap->asa, sa_len);
1253 	if (error == 0)
1254 		error = copyout(&sa_len, uap->alen, sizeof(*uap->alen));
1255 	if (sa)
1256 		FREE(sa, M_SONAME);
1257 	return (error);
1258 }
1259 
1260 /*
1261  * The second argument to kern_getpeername() is a handle to a struct sockaddr.
1262  * This allows kern_getpeername() to return a pointer to an allocated struct
1263  * sockaddr which must be freed later with FREE().  The caller must
1264  * initialize *name to NULL.
1265  */
1266 int
1267 kern_getpeername(int s, struct sockaddr **name, int *namelen)
1268 {
1269 	struct thread *td = curthread;
1270 	struct proc *p = td->td_proc;
1271 	struct file *fp;
1272 	struct socket *so;
1273 	struct sockaddr *sa = NULL;
1274 	int error;
1275 
1276 	error = holdsock(p->p_fd, s, &fp);
1277 	if (error)
1278 		return (error);
1279 	if (*namelen < 0) {
1280 		fdrop(fp);
1281 		return (EINVAL);
1282 	}
1283 	so = (struct socket *)fp->f_data;
1284 	if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) {
1285 		fdrop(fp);
1286 		return (ENOTCONN);
1287 	}
1288 	error = so_pru_peeraddr(so, &sa);
1289 	if (error == 0) {
1290 		if (sa == NULL) {
1291 			*namelen = 0;
1292 		} else {
1293 			*namelen = MIN(*namelen, sa->sa_len);
1294 			*name = sa;
1295 		}
1296 	}
1297 
1298 	fdrop(fp);
1299 	return (error);
1300 }
1301 
1302 /*
1303  * getpeername_args(int fdes, caddr_t asa, int *alen)
1304  *
1305  * Get name of peer for connected socket.
1306  *
1307  * MPALMOSTSAFE
1308  */
1309 int
1310 sys_getpeername(struct getpeername_args *uap)
1311 {
1312 	struct sockaddr *sa = NULL;
1313 	int error, sa_len;
1314 
1315 	error = copyin(uap->alen, &sa_len, sizeof(sa_len));
1316 	if (error)
1317 		return (error);
1318 
1319 	error = kern_getpeername(uap->fdes, &sa, &sa_len);
1320 
1321 	if (error == 0)
1322 		error = copyout(sa, uap->asa, sa_len);
1323 	if (error == 0)
1324 		error = copyout(&sa_len, uap->alen, sizeof(*uap->alen));
1325 	if (sa)
1326 		FREE(sa, M_SONAME);
1327 	return (error);
1328 }
1329 
1330 int
1331 getsockaddr(struct sockaddr **namp, caddr_t uaddr, size_t len)
1332 {
1333 	struct sockaddr *sa;
1334 	int error;
1335 
1336 	*namp = NULL;
1337 	if (len > SOCK_MAXADDRLEN)
1338 		return ENAMETOOLONG;
1339 	if (len < offsetof(struct sockaddr, sa_data[0]))
1340 		return EDOM;
1341 	MALLOC(sa, struct sockaddr *, len, M_SONAME, M_WAITOK);
1342 	error = copyin(uaddr, sa, len);
1343 	if (error) {
1344 		FREE(sa, M_SONAME);
1345 	} else {
1346 #if BYTE_ORDER != BIG_ENDIAN
1347 		/*
1348 		 * The bind(), connect(), and sendto() syscalls were not
1349 		 * versioned for COMPAT_43.  Thus, this check must stay.
1350 		 */
1351 		if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
1352 			sa->sa_family = sa->sa_len;
1353 #endif
1354 		sa->sa_len = len;
1355 		*namp = sa;
1356 	}
1357 	return error;
1358 }
1359 
1360 /*
1361  * Detach a mapped page and release resources back to the system.
1362  * We must release our wiring and if the object is ripped out
1363  * from under the vm_page we become responsible for freeing the
1364  * page.
1365  *
1366  * MPSAFE
1367  */
1368 static void
1369 sf_buf_mfree(void *arg)
1370 {
1371 	struct sf_buf *sf = arg;
1372 	vm_page_t m;
1373 
1374 	m = sf_buf_page(sf);
1375 	if (sf_buf_free(sf)) {
1376 		/* sf invalid now */
1377 		vm_page_unwire(m, 0);
1378 		if (m->wire_count == 0 && m->object == NULL)
1379 			vm_page_try_to_free(m);
1380 	}
1381 }
1382 
1383 /*
1384  * sendfile(2).
1385  * int sendfile(int fd, int s, off_t offset, size_t nbytes,
1386  *	 struct sf_hdtr *hdtr, off_t *sbytes, int flags)
1387  *
1388  * Send a file specified by 'fd' and starting at 'offset' to a socket
1389  * specified by 's'. Send only 'nbytes' of the file or until EOF if
1390  * nbytes == 0. Optionally add a header and/or trailer to the socket
1391  * output. If specified, write the total number of bytes sent into *sbytes.
1392  *
1393  * In FreeBSD kern/uipc_syscalls.c,v 1.103, a bug was fixed that caused
1394  * the headers to count against the remaining bytes to be sent from
1395  * the file descriptor.  We may wish to implement a compatibility syscall
1396  * in the future.
1397  *
1398  * MPALMOSTSAFE
1399  */
1400 int
1401 sys_sendfile(struct sendfile_args *uap)
1402 {
1403 	struct thread *td = curthread;
1404 	struct proc *p = td->td_proc;
1405 	struct file *fp;
1406 	struct vnode *vp = NULL;
1407 	struct sf_hdtr hdtr;
1408 	struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
1409 	struct uio auio;
1410 	struct mbuf *mheader = NULL;
1411 	size_t hbytes = 0;
1412 	size_t tbytes;
1413 	off_t hdtr_size = 0;
1414 	off_t sbytes;
1415 	int error;
1416 
1417 	KKASSERT(p);
1418 
1419 	/*
1420 	 * Do argument checking. Must be a regular file in, stream
1421 	 * type and connected socket out, positive offset.
1422 	 */
1423 	fp = holdfp(p->p_fd, uap->fd, FREAD);
1424 	if (fp == NULL) {
1425 		return (EBADF);
1426 	}
1427 	if (fp->f_type != DTYPE_VNODE) {
1428 		fdrop(fp);
1429 		return (EINVAL);
1430 	}
1431 	vp = (struct vnode *)fp->f_data;
1432 	vref(vp);
1433 	fdrop(fp);
1434 
1435 	/*
1436 	 * If specified, get the pointer to the sf_hdtr struct for
1437 	 * any headers/trailers.
1438 	 */
1439 	if (uap->hdtr) {
1440 		error = copyin(uap->hdtr, &hdtr, sizeof(hdtr));
1441 		if (error)
1442 			goto done;
1443 		/*
1444 		 * Send any headers.
1445 		 */
1446 		if (hdtr.headers) {
1447 			error = iovec_copyin(hdtr.headers, &iov, aiov,
1448 					     hdtr.hdr_cnt, &hbytes);
1449 			if (error)
1450 				goto done;
1451 			auio.uio_iov = iov;
1452 			auio.uio_iovcnt = hdtr.hdr_cnt;
1453 			auio.uio_offset = 0;
1454 			auio.uio_segflg = UIO_USERSPACE;
1455 			auio.uio_rw = UIO_WRITE;
1456 			auio.uio_td = td;
1457 			auio.uio_resid = hbytes;
1458 
1459 			mheader = m_uiomove(&auio);
1460 
1461 			iovec_free(&iov, aiov);
1462 			if (mheader == NULL)
1463 				goto done;
1464 		}
1465 	}
1466 
1467 	error = kern_sendfile(vp, uap->s, uap->offset, uap->nbytes, mheader,
1468 			      &sbytes, uap->flags);
1469 	if (error)
1470 		goto done;
1471 
1472 	/*
1473 	 * Send trailers. Wimp out and use writev(2).
1474 	 */
1475 	if (uap->hdtr != NULL && hdtr.trailers != NULL) {
1476 		error = iovec_copyin(hdtr.trailers, &iov, aiov,
1477 				     hdtr.trl_cnt, &auio.uio_resid);
1478 		if (error)
1479 			goto done;
1480 		auio.uio_iov = iov;
1481 		auio.uio_iovcnt = hdtr.trl_cnt;
1482 		auio.uio_offset = 0;
1483 		auio.uio_segflg = UIO_USERSPACE;
1484 		auio.uio_rw = UIO_WRITE;
1485 		auio.uio_td = td;
1486 
1487 		error = kern_sendmsg(uap->s, NULL, &auio, NULL, 0, &tbytes);
1488 
1489 		iovec_free(&iov, aiov);
1490 		if (error)
1491 			goto done;
1492 		hdtr_size += tbytes;	/* trailer bytes successfully sent */
1493 	}
1494 
1495 done:
1496 	if (vp)
1497 		vrele(vp);
1498 	if (uap->sbytes != NULL) {
1499 		sbytes += hdtr_size;
1500 		copyout(&sbytes, uap->sbytes, sizeof(off_t));
1501 	}
1502 	return (error);
1503 }
1504 
1505 int
1506 kern_sendfile(struct vnode *vp, int sfd, off_t offset, size_t nbytes,
1507 	      struct mbuf *mheader, off_t *sbytes, int flags)
1508 {
1509 	struct thread *td = curthread;
1510 	struct proc *p = td->td_proc;
1511 	struct vm_object *obj;
1512 	struct socket *so;
1513 	struct file *fp;
1514 	struct mbuf *m;
1515 	struct sf_buf *sf;
1516 	struct vm_page *pg;
1517 	off_t off, xfsize;
1518 	off_t hbytes = 0;
1519 	int error = 0;
1520 
1521 	if (vp->v_type != VREG) {
1522 		error = EINVAL;
1523 		goto done0;
1524 	}
1525 	if ((obj = vp->v_object) == NULL) {
1526 		error = EINVAL;
1527 		goto done0;
1528 	}
1529 	error = holdsock(p->p_fd, sfd, &fp);
1530 	if (error)
1531 		goto done0;
1532 	so = (struct socket *)fp->f_data;
1533 	if (so->so_type != SOCK_STREAM) {
1534 		error = EINVAL;
1535 		goto done;
1536 	}
1537 	if ((so->so_state & SS_ISCONNECTED) == 0) {
1538 		error = ENOTCONN;
1539 		goto done;
1540 	}
1541 	if (offset < 0) {
1542 		error = EINVAL;
1543 		goto done;
1544 	}
1545 
1546 	*sbytes = 0;
1547 	/*
1548 	 * Protect against multiple writers to the socket.
1549 	 */
1550 	ssb_lock(&so->so_snd, M_WAITOK);
1551 
1552 	/*
1553 	 * Loop through the pages in the file, starting with the requested
1554 	 * offset. Get a file page (do I/O if necessary), map the file page
1555 	 * into an sf_buf, attach an mbuf header to the sf_buf, and queue
1556 	 * it on the socket.
1557 	 */
1558 	for (off = offset; ; off += xfsize, *sbytes += xfsize + hbytes) {
1559 		vm_pindex_t pindex;
1560 		vm_offset_t pgoff;
1561 
1562 		pindex = OFF_TO_IDX(off);
1563 retry_lookup:
1564 		/*
1565 		 * Calculate the amount to transfer. Not to exceed a page,
1566 		 * the EOF, or the passed in nbytes.
1567 		 */
1568 		xfsize = vp->v_filesize - off;
1569 		if (xfsize > PAGE_SIZE)
1570 			xfsize = PAGE_SIZE;
1571 		pgoff = (vm_offset_t)(off & PAGE_MASK);
1572 		if (PAGE_SIZE - pgoff < xfsize)
1573 			xfsize = PAGE_SIZE - pgoff;
1574 		if (nbytes && xfsize > (nbytes - *sbytes))
1575 			xfsize = nbytes - *sbytes;
1576 		if (xfsize <= 0)
1577 			break;
1578 		/*
1579 		 * Optimize the non-blocking case by looking at the socket space
1580 		 * before going to the extra work of constituting the sf_buf.
1581 		 */
1582 		if ((fp->f_flag & FNONBLOCK) && ssb_space(&so->so_snd) <= 0) {
1583 			if (so->so_state & SS_CANTSENDMORE)
1584 				error = EPIPE;
1585 			else
1586 				error = EAGAIN;
1587 			ssb_unlock(&so->so_snd);
1588 			goto done;
1589 		}
1590 		/*
1591 		 * Attempt to look up the page.
1592 		 *
1593 		 *	Allocate if not found, wait and loop if busy, then
1594 		 *	wire the page.  critical section protection is
1595 		 * 	required to maintain the object association (an
1596 		 *	interrupt can free the page) through to the
1597 		 *	vm_page_wire() call.
1598 		 */
1599 		lwkt_gettoken(&vm_token);
1600 		pg = vm_page_lookup(obj, pindex);
1601 		if (pg == NULL) {
1602 			pg = vm_page_alloc(obj, pindex, VM_ALLOC_NORMAL);
1603 			if (pg == NULL) {
1604 				vm_wait(0);
1605 				lwkt_reltoken(&vm_token);
1606 				goto retry_lookup;
1607 			}
1608 			vm_page_wire(pg);
1609 			vm_page_wakeup(pg);
1610 		} else if (vm_page_sleep_busy(pg, TRUE, "sfpbsy")) {
1611 			lwkt_reltoken(&vm_token);
1612 			goto retry_lookup;
1613 		} else {
1614 			vm_page_wire(pg);
1615 		}
1616 		lwkt_reltoken(&vm_token);
1617 
1618 		/*
1619 		 * If page is not valid for what we need, initiate I/O
1620 		 */
1621 
1622 		if (!pg->valid || !vm_page_is_valid(pg, pgoff, xfsize)) {
1623 			struct uio auio;
1624 			struct iovec aiov;
1625 			int bsize;
1626 
1627 			/*
1628 			 * Ensure that our page is still around when the I/O
1629 			 * completes.
1630 			 */
1631 			vm_page_io_start(pg);
1632 
1633 			/*
1634 			 * Get the page from backing store.
1635 			 */
1636 			bsize = vp->v_mount->mnt_stat.f_iosize;
1637 			auio.uio_iov = &aiov;
1638 			auio.uio_iovcnt = 1;
1639 			aiov.iov_base = 0;
1640 			aiov.iov_len = MAXBSIZE;
1641 			auio.uio_resid = MAXBSIZE;
1642 			auio.uio_offset = trunc_page(off);
1643 			auio.uio_segflg = UIO_NOCOPY;
1644 			auio.uio_rw = UIO_READ;
1645 			auio.uio_td = td;
1646 			vn_lock(vp, LK_SHARED | LK_RETRY);
1647 			error = VOP_READ(vp, &auio,
1648 				    IO_VMIO | ((MAXBSIZE / bsize) << 16),
1649 				    td->td_ucred);
1650 			vn_unlock(vp);
1651 			vm_page_flag_clear(pg, PG_ZERO);
1652 			vm_page_io_finish(pg);
1653 			if (error) {
1654 				crit_enter();
1655 				vm_page_unwire(pg, 0);
1656 				vm_page_try_to_free(pg);
1657 				crit_exit();
1658 				ssb_unlock(&so->so_snd);
1659 				goto done;
1660 			}
1661 		}
1662 
1663 
1664 		/*
1665 		 * Get a sendfile buf. We usually wait as long as necessary,
1666 		 * but this wait can be interrupted.
1667 		 */
1668 		if ((sf = sf_buf_alloc(pg)) == NULL) {
1669 			crit_enter();
1670 			vm_page_unwire(pg, 0);
1671 			vm_page_try_to_free(pg);
1672 			crit_exit();
1673 			ssb_unlock(&so->so_snd);
1674 			error = EINTR;
1675 			goto done;
1676 		}
1677 
1678 		/*
1679 		 * Get an mbuf header and set it up as having external storage.
1680 		 */
1681 		MGETHDR(m, MB_WAIT, MT_DATA);
1682 		if (m == NULL) {
1683 			error = ENOBUFS;
1684 			sf_buf_free(sf);
1685 			ssb_unlock(&so->so_snd);
1686 			goto done;
1687 		}
1688 
1689 		m->m_ext.ext_free = sf_buf_mfree;
1690 		m->m_ext.ext_ref = sf_buf_ref;
1691 		m->m_ext.ext_arg = sf;
1692 		m->m_ext.ext_buf = (void *)sf_buf_kva(sf);
1693 		m->m_ext.ext_size = PAGE_SIZE;
1694 		m->m_data = (char *)sf_buf_kva(sf) + pgoff;
1695 		m->m_flags |= M_EXT;
1696 		m->m_pkthdr.len = m->m_len = xfsize;
1697 		KKASSERT((m->m_flags & (M_EXT_CLUSTER)) == 0);
1698 
1699 		if (mheader != NULL) {
1700 			hbytes = mheader->m_pkthdr.len;
1701 			mheader->m_pkthdr.len += m->m_pkthdr.len;
1702 			m_cat(mheader, m);
1703 			m = mheader;
1704 			mheader = NULL;
1705 		} else
1706 			hbytes = 0;
1707 
1708 		/*
1709 		 * Add the buffer to the socket buffer chain.
1710 		 */
1711 		crit_enter();
1712 retry_space:
1713 		/*
1714 		 * Make sure that the socket is still able to take more data.
1715 		 * CANTSENDMORE being true usually means that the connection
1716 		 * was closed. so_error is true when an error was sensed after
1717 		 * a previous send.
1718 		 * The state is checked after the page mapping and buffer
1719 		 * allocation above since those operations may block and make
1720 		 * any socket checks stale. From this point forward, nothing
1721 		 * blocks before the pru_send (or more accurately, any blocking
1722 		 * results in a loop back to here to re-check).
1723 		 */
1724 		if ((so->so_state & SS_CANTSENDMORE) || so->so_error) {
1725 			if (so->so_state & SS_CANTSENDMORE) {
1726 				error = EPIPE;
1727 			} else {
1728 				error = so->so_error;
1729 				so->so_error = 0;
1730 			}
1731 			m_freem(m);
1732 			ssb_unlock(&so->so_snd);
1733 			crit_exit();
1734 			goto done;
1735 		}
1736 		/*
1737 		 * Wait for socket space to become available. We do this just
1738 		 * after checking the connection state above in order to avoid
1739 		 * a race condition with ssb_wait().
1740 		 */
1741 		if (ssb_space(&so->so_snd) < so->so_snd.ssb_lowat) {
1742 			if (fp->f_flag & FNONBLOCK) {
1743 				m_freem(m);
1744 				ssb_unlock(&so->so_snd);
1745 				crit_exit();
1746 				error = EAGAIN;
1747 				goto done;
1748 			}
1749 			error = ssb_wait(&so->so_snd);
1750 			/*
1751 			 * An error from ssb_wait usually indicates that we've
1752 			 * been interrupted by a signal. If we've sent anything
1753 			 * then return bytes sent, otherwise return the error.
1754 			 */
1755 			if (error) {
1756 				m_freem(m);
1757 				ssb_unlock(&so->so_snd);
1758 				crit_exit();
1759 				goto done;
1760 			}
1761 			goto retry_space;
1762 		}
1763 		error = so_pru_send(so, 0, m, NULL, NULL, td);
1764 		crit_exit();
1765 		if (error) {
1766 			ssb_unlock(&so->so_snd);
1767 			goto done;
1768 		}
1769 	}
1770 	if (mheader != NULL) {
1771 		*sbytes += mheader->m_pkthdr.len;
1772 		error = so_pru_send(so, 0, mheader, NULL, NULL, td);
1773 		mheader = NULL;
1774 	}
1775 	ssb_unlock(&so->so_snd);
1776 
1777 done:
1778 	fdrop(fp);
1779 done0:
1780 	if (mheader != NULL)
1781 		m_freem(mheader);
1782 	return (error);
1783 }
1784 
1785 /*
1786  * MPALMOSTSAFE
1787  */
1788 int
1789 sys_sctp_peeloff(struct sctp_peeloff_args *uap)
1790 {
1791 #ifdef SCTP
1792 	struct thread *td = curthread;
1793 	struct filedesc *fdp = td->td_proc->p_fd;
1794 	struct file *lfp = NULL;
1795 	struct file *nfp = NULL;
1796 	int error;
1797 	struct socket *head, *so;
1798 	caddr_t assoc_id;
1799 	int fd;
1800 	short fflag;		/* type must match fp->f_flag */
1801 
1802 	assoc_id = uap->name;
1803 	error = holdsock(td->td_proc->p_fd, uap->sd, &lfp);
1804 	if (error)
1805 		return (error);
1806 
1807 	crit_enter();
1808 	head = (struct socket *)lfp->f_data;
1809 	error = sctp_can_peel_off(head, assoc_id);
1810 	if (error) {
1811 		crit_exit();
1812 		goto done;
1813 	}
1814 	/*
1815 	 * At this point we know we do have a assoc to pull
1816 	 * we proceed to get the fd setup. This may block
1817 	 * but that is ok.
1818 	 */
1819 
1820 	fflag = lfp->f_flag;
1821 	error = falloc(td->td_lwp, &nfp, &fd);
1822 	if (error) {
1823 		/*
1824 		 * Probably ran out of file descriptors. Put the
1825 		 * unaccepted connection back onto the queue and
1826 		 * do another wakeup so some other process might
1827 		 * have a chance at it.
1828 		 */
1829 		crit_exit();
1830 		goto done;
1831 	}
1832 	uap->sysmsg_iresult = fd;
1833 
1834 	so = sctp_get_peeloff(head, assoc_id, &error);
1835 	if (so == NULL) {
1836 		/*
1837 		 * Either someone else peeled it off OR
1838 		 * we can't get a socket.
1839 		 */
1840 		goto noconnection;
1841 	}
1842 	soreference(so);			/* reference needed */
1843 	soclrstate(so, SS_NOFDREF | SS_COMP);	/* when clearing NOFDREF */
1844 	so->so_head = NULL;
1845 	if (head->so_sigio != NULL)
1846 		fsetown(fgetown(&head->so_sigio), &so->so_sigio);
1847 
1848 	nfp->f_type = DTYPE_SOCKET;
1849 	nfp->f_flag = fflag;
1850 	nfp->f_ops = &socketops;
1851 	nfp->f_data = so;
1852 
1853 noconnection:
1854 	/*
1855 	 * Assign the file pointer to the reserved descriptor, or clear
1856 	 * the reserved descriptor if an error occured.
1857 	 */
1858 	if (error)
1859 		fsetfd(fdp, NULL, fd);
1860 	else
1861 		fsetfd(fdp, nfp, fd);
1862 	crit_exit();
1863 	/*
1864 	 * Release explicitly held references before returning.
1865 	 */
1866 done:
1867 	if (nfp != NULL)
1868 		fdrop(nfp);
1869 	fdrop(lfp);
1870 	return (error);
1871 #else /* SCTP */
1872 	return(EOPNOTSUPP);
1873 #endif /* SCTP */
1874 }
1875