xref: /dragonfly/sys/kern/uipc_syscalls.c (revision 2cd2d2b5)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1990, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * sendfile(2) and related extensions:
6  * Copyright (c) 1998, David Greenman. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)uipc_syscalls.c	8.4 (Berkeley) 2/21/94
37  * $FreeBSD: src/sys/kern/uipc_syscalls.c,v 1.65.2.17 2003/04/04 17:11:16 tegge Exp $
38  * $DragonFly: src/sys/kern/uipc_syscalls.c,v 1.41 2004/08/24 21:53:38 dillon Exp $
39  */
40 
41 #include "opt_ktrace.h"
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/sysproto.h>
47 #include <sys/malloc.h>
48 #include <sys/filedesc.h>
49 #include <sys/event.h>
50 #include <sys/proc.h>
51 #include <sys/fcntl.h>
52 #include <sys/file.h>
53 #include <sys/filio.h>
54 #include <sys/kern_syscall.h>
55 #include <sys/mbuf.h>
56 #include <sys/protosw.h>
57 #include <sys/sfbuf.h>
58 #include <sys/socket.h>
59 #include <sys/socketvar.h>
60 #include <sys/socketops.h>
61 #include <sys/uio.h>
62 #include <sys/vnode.h>
63 #include <sys/lock.h>
64 #include <sys/mount.h>
65 #ifdef KTRACE
66 #include <sys/ktrace.h>
67 #endif
68 #include <vm/vm.h>
69 #include <vm/vm_object.h>
70 #include <vm/vm_page.h>
71 #include <vm/vm_pageout.h>
72 #include <vm/vm_kern.h>
73 #include <vm/vm_extern.h>
74 #include <sys/file2.h>
75 #include <sys/signalvar.h>
76 
77 #include <sys/thread2.h>
78 #include <sys/msgport2.h>
79 
80 struct sfbuf_mref {
81 	struct sf_buf	*sf;
82 	int		mref_count;
83 };
84 
85 static MALLOC_DEFINE(M_SENDFILE, "sendfile", "sendfile sfbuf ref structures");
86 
87 /*
88  * System call interface to the socket abstraction.
89  */
90 
91 extern	struct fileops socketops;
92 
93 /*
94  * socket_args(int domain, int type, int protocol)
95  */
96 int
97 kern_socket(int domain, int type, int protocol, int *res)
98 {
99 	struct thread *td = curthread;
100 	struct proc *p = td->td_proc;
101 	struct filedesc *fdp;
102 	struct socket *so;
103 	struct file *fp;
104 	int fd, error;
105 
106 	KKASSERT(p);
107 	fdp = p->p_fd;
108 
109 	error = falloc(p, &fp, &fd);
110 	if (error)
111 		return (error);
112 	fhold(fp);
113 	error = socreate(domain, &so, type, protocol, td);
114 	if (error) {
115 		if (fdp->fd_ofiles[fd] == fp) {
116 			fdp->fd_ofiles[fd] = NULL;
117 			fdrop(fp, td);
118 		}
119 	} else {
120 		fp->f_data = (caddr_t)so;
121 		fp->f_flag = FREAD|FWRITE;
122 		fp->f_ops = &socketops;
123 		fp->f_type = DTYPE_SOCKET;
124 		*res = fd;
125 	}
126 	fdrop(fp, td);
127 	return (error);
128 }
129 
130 int
131 socket(struct socket_args *uap)
132 {
133 	int error;
134 
135 	error = kern_socket(uap->domain, uap->type, uap->protocol,
136 	    &uap->sysmsg_result);
137 
138 	return (error);
139 }
140 
141 int
142 kern_bind(int s, struct sockaddr *sa)
143 {
144 	struct thread *td = curthread;
145 	struct proc *p = td->td_proc;
146 	struct file *fp;
147 	int error;
148 
149 	KKASSERT(p);
150 	error = holdsock(p->p_fd, s, &fp);
151 	if (error)
152 		return (error);
153 	error = sobind((struct socket *)fp->f_data, sa, td);
154 	fdrop(fp, td);
155 	return (error);
156 }
157 
158 /*
159  * bind_args(int s, caddr_t name, int namelen)
160  */
161 int
162 bind(struct bind_args *uap)
163 {
164 	struct sockaddr *sa;
165 	int error;
166 
167 	error = getsockaddr(&sa, uap->name, uap->namelen);
168 	if (error)
169 		return (error);
170 	error = kern_bind(uap->s, sa);
171 	FREE(sa, M_SONAME);
172 
173 	return (error);
174 }
175 
176 int
177 kern_listen(int s, int backlog)
178 {
179 	struct thread *td = curthread;
180 	struct proc *p = td->td_proc;
181 	struct file *fp;
182 	int error;
183 
184 	KKASSERT(p);
185 	error = holdsock(p->p_fd, s, &fp);
186 	if (error)
187 		return (error);
188 	error = solisten((struct socket *)fp->f_data, backlog, td);
189 	fdrop(fp, td);
190 	return(error);
191 }
192 
193 /*
194  * listen_args(int s, int backlog)
195  */
196 int
197 listen(struct listen_args *uap)
198 {
199 	int error;
200 
201 	error = kern_listen(uap->s, uap->backlog);
202 	return (error);
203 }
204 
205 /*
206  * Returns the accepted socket as well.
207  */
208 static boolean_t
209 soaccept_predicate(struct netmsg *msg0)
210 {
211 	struct netmsg_so_notify *msg = (struct netmsg_so_notify *)msg0;
212 	struct socket *head = msg->nm_so;
213 
214 	if (head->so_error != 0) {
215 		msg->nm_lmsg.ms_error = head->so_error;
216 		return (TRUE);
217 	}
218 	if (!TAILQ_EMPTY(&head->so_comp)) {
219 		/* Abuse nm_so field as copy in/copy out parameter. XXX JH */
220 		msg->nm_so = TAILQ_FIRST(&head->so_comp);
221 		TAILQ_REMOVE(&head->so_comp, msg->nm_so, so_list);
222 		head->so_qlen--;
223 
224 		msg->nm_lmsg.ms_error = 0;
225 		return (TRUE);
226 	}
227 	if (head->so_state & SS_CANTRCVMORE) {
228 		msg->nm_lmsg.ms_error = ECONNABORTED;
229 		return (TRUE);
230 	}
231 	if (head->so_state & SS_NBIO) {
232 		msg->nm_lmsg.ms_error = EWOULDBLOCK;
233 		return (TRUE);
234 	}
235 
236 	return (FALSE);
237 }
238 
239 /*
240  * The second argument to kern_accept() is a handle to a struct sockaddr.
241  * This allows kern_accept() to return a pointer to an allocated struct
242  * sockaddr which must be freed later with FREE().  The caller must
243  * initialize *name to NULL.
244  */
245 int
246 kern_accept(int s, struct sockaddr **name, int *namelen, int *res)
247 {
248 	struct thread *td = curthread;
249 	struct proc *p = td->td_proc;
250 	struct filedesc *fdp = p->p_fd;
251 	struct file *lfp = NULL;
252 	struct file *nfp = NULL;
253 	struct sockaddr *sa;
254 	struct socket *head, *so;
255 	struct netmsg_so_notify msg;
256 	lwkt_port_t port;
257 	int fd;
258 	u_int fflag;		/* type must match fp->f_flag */
259 	int error, tmp;
260 
261 	if (name && namelen && *namelen < 0)
262 		return (EINVAL);
263 
264 	error = holdsock(fdp, s, &lfp);
265 	if (error)
266 		return (error);
267 
268 	error = falloc(p, &nfp, &fd);
269 	if (error) {		/* Probably ran out of file descriptors. */
270 		*res = -1;
271 		fdrop(lfp, td);
272 		return (error);
273 	}
274 	fhold(nfp);
275 	*res = fd;
276 
277 	head = (struct socket *)lfp->f_data;
278 	if ((head->so_options & SO_ACCEPTCONN) == 0) {
279 		error = EINVAL;
280 		goto done;
281 	}
282 
283 	/* optimize for uniprocessor case later XXX JH */
284 	port = head->so_proto->pr_mport(head, NULL, PRU_PRED);
285 	lwkt_initmsg(&msg.nm_lmsg, &curthread->td_msgport,
286 		     MSGF_PCATCH | MSGF_ABORTABLE,
287 		     lwkt_cmd_func(netmsg_so_notify),
288 		     lwkt_cmd_func(netmsg_so_notify_abort));
289 	msg.nm_predicate = soaccept_predicate;
290 	msg.nm_so = head;
291 	msg.nm_etype = NM_REVENT;
292 	error = lwkt_domsg(port, &msg.nm_lmsg);
293 	if (error)
294 		goto done;
295 
296 	/*
297 	 * At this point we have the connection that's ready to be accepted.
298 	 */
299 	so = msg.nm_so;
300 
301 	fflag = lfp->f_flag;
302 
303 	/* connection has been removed from the listen queue */
304 	KNOTE(&head->so_rcv.sb_sel.si_note, 0);
305 
306 	so->so_state &= ~SS_COMP;
307 	so->so_head = NULL;
308 	if (head->so_sigio != NULL)
309 		fsetown(fgetown(head->so_sigio), &so->so_sigio);
310 
311 	nfp->f_data = (caddr_t)so;
312 	nfp->f_flag = fflag;
313 	nfp->f_ops = &socketops;
314 	nfp->f_type = DTYPE_SOCKET;
315 	/* Sync socket nonblocking/async state with file flags */
316 	tmp = fflag & FNONBLOCK;
317 	(void) fo_ioctl(nfp, FIONBIO, (caddr_t)&tmp, td);
318 	tmp = fflag & FASYNC;
319 	(void) fo_ioctl(nfp, FIOASYNC, (caddr_t)&tmp, td);
320 
321 	sa = NULL;
322 	error = soaccept(so, &sa);
323 
324 	/*
325 	 * Set the returned name and namelen as applicable.  Set the returned
326 	 * namelen to 0 for older code which might ignore the return value
327 	 * from accept.
328 	 */
329 	if (error == 0) {
330 		if (sa && name && namelen) {
331 			if (*namelen > sa->sa_len)
332 				*namelen = sa->sa_len;
333 			*name = sa;
334 		} else {
335 			if (sa)
336 				FREE(sa, M_SONAME);
337 		}
338 	}
339 
340 done:
341 	/*
342 	 * close the new descriptor, assuming someone hasn't ripped it
343 	 * out from under us.  Note that *res is normally ignored if an
344 	 * error is returned but a syscall message will still have access
345 	 * to the result code.
346 	 */
347 	if (error) {
348 		*res = -1;
349 		if (fdp->fd_ofiles[fd] == nfp) {
350 			fdp->fd_ofiles[fd] = NULL;
351 			fdrop(nfp, td);
352 		}
353 	}
354 
355 	/*
356 	 * Release explicitly held references before returning.
357 	 */
358 	if (nfp != NULL)
359 		fdrop(nfp, td);
360 	fdrop(lfp, td);
361 	return (error);
362 }
363 
364 /*
365  * accept_args(int s, caddr_t name, int *anamelen)
366  */
367 int
368 accept(struct accept_args *uap)
369 {
370 	struct sockaddr *sa = NULL;
371 	int sa_len;
372 	int error;
373 
374 	if (uap->name) {
375 		error = copyin(uap->anamelen, &sa_len, sizeof(sa_len));
376 		if (error)
377 			return (error);
378 
379 		error = kern_accept(uap->s, &sa, &sa_len, &uap->sysmsg_result);
380 
381 		if (error == 0)
382 			error = copyout(sa, uap->name, sa_len);
383 		if (error == 0) {
384 			error = copyout(&sa_len, uap->anamelen,
385 			    sizeof(*uap->anamelen));
386 		}
387 		if (sa)
388 			FREE(sa, M_SONAME);
389 	} else {
390 		error = kern_accept(uap->s, NULL, 0, &uap->sysmsg_result);
391 	}
392 	return (error);
393 }
394 
395 /*
396  * Returns TRUE if predicate satisfied.
397  */
398 static boolean_t
399 soconnected_predicate(struct netmsg *msg0)
400 {
401 	struct netmsg_so_notify *msg = (struct netmsg_so_notify *)msg0;
402 	struct socket *so = msg->nm_so;
403 
404 	/* check predicate */
405 	if (!(so->so_state & SS_ISCONNECTING) || so->so_error != 0) {
406 		msg->nm_lmsg.ms_error = so->so_error;
407 		return (TRUE);
408 	}
409 
410 	return (FALSE);
411 }
412 
413 int
414 kern_connect(int s, struct sockaddr *sa)
415 {
416 	struct thread *td = curthread;
417 	struct proc *p = td->td_proc;
418 	struct file *fp;
419 	struct socket *so;
420 	int error;
421 
422 	error = holdsock(p->p_fd, s, &fp);
423 	if (error)
424 		return (error);
425 	so = (struct socket *)fp->f_data;
426 	if ((so->so_state & SS_NBIO) && (so->so_state & SS_ISCONNECTING)) {
427 		error = EALREADY;
428 		goto done;
429 	}
430 	error = soconnect(so, sa, td);
431 	if (error)
432 		goto bad;
433 	if ((so->so_state & SS_NBIO) && (so->so_state & SS_ISCONNECTING)) {
434 		error = EINPROGRESS;
435 		goto done;
436 	}
437 	if ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
438 		struct netmsg_so_notify msg;
439 		lwkt_port_t port;
440 
441 		port = so->so_proto->pr_mport(so, sa, PRU_PRED);
442 		lwkt_initmsg(&msg.nm_lmsg,
443 			    &curthread->td_msgport,
444 			    MSGF_PCATCH | MSGF_ABORTABLE,
445 			    lwkt_cmd_func(netmsg_so_notify),
446 			    lwkt_cmd_func(netmsg_so_notify_abort));
447 		msg.nm_predicate = soconnected_predicate;
448 		msg.nm_so = so;
449 		msg.nm_etype = NM_REVENT;
450 		error = lwkt_domsg(port, &msg.nm_lmsg);
451 	}
452 	if (error == 0) {
453 		error = so->so_error;
454 		so->so_error = 0;
455 	}
456 bad:
457 	so->so_state &= ~SS_ISCONNECTING;
458 	if (error == ERESTART)
459 		error = EINTR;
460 done:
461 	fdrop(fp, td);
462 	return (error);
463 }
464 
465 /*
466  * connect_args(int s, caddr_t name, int namelen)
467  */
468 int
469 connect(struct connect_args *uap)
470 {
471 	struct sockaddr *sa;
472 	int error;
473 
474 	error = getsockaddr(&sa, uap->name, uap->namelen);
475 	if (error)
476 		return (error);
477 	error = kern_connect(uap->s, sa);
478 	FREE(sa, M_SONAME);
479 
480 	return (error);
481 }
482 
483 int
484 kern_socketpair(int domain, int type, int protocol, int *sv)
485 {
486 	struct thread *td = curthread;
487 	struct proc *p = td->td_proc;
488 	struct filedesc *fdp;
489 	struct file *fp1, *fp2;
490 	struct socket *so1, *so2;
491 	int fd, error;
492 
493 	KKASSERT(p);
494 	fdp = p->p_fd;
495 	error = socreate(domain, &so1, type, protocol, td);
496 	if (error)
497 		return (error);
498 	error = socreate(domain, &so2, type, protocol, td);
499 	if (error)
500 		goto free1;
501 	error = falloc(p, &fp1, &fd);
502 	if (error)
503 		goto free2;
504 	fhold(fp1);
505 	sv[0] = fd;
506 	fp1->f_data = (caddr_t)so1;
507 	error = falloc(p, &fp2, &fd);
508 	if (error)
509 		goto free3;
510 	fhold(fp2);
511 	fp2->f_data = (caddr_t)so2;
512 	sv[1] = fd;
513 	error = soconnect2(so1, so2);
514 	if (error)
515 		goto free4;
516 	if (type == SOCK_DGRAM) {
517 		/*
518 		 * Datagram socket connection is asymmetric.
519 		 */
520 		 error = soconnect2(so2, so1);
521 		 if (error)
522 			goto free4;
523 	}
524 	fp1->f_flag = fp2->f_flag = FREAD|FWRITE;
525 	fp1->f_ops = fp2->f_ops = &socketops;
526 	fp1->f_type = fp2->f_type = DTYPE_SOCKET;
527 	fdrop(fp1, td);
528 	fdrop(fp2, td);
529 	return (error);
530 free4:
531 	if (fdp->fd_ofiles[sv[1]] == fp2) {
532 		fdp->fd_ofiles[sv[1]] = NULL;
533 		fdrop(fp2, td);
534 	}
535 	fdrop(fp2, td);
536 free3:
537 	if (fdp->fd_ofiles[sv[0]] == fp1) {
538 		fdp->fd_ofiles[sv[0]] = NULL;
539 		fdrop(fp1, td);
540 	}
541 	fdrop(fp1, td);
542 free2:
543 	(void)soclose(so2);
544 free1:
545 	(void)soclose(so1);
546 	return (error);
547 }
548 
549 /*
550  * socketpair(int domain, int type, int protocol, int *rsv)
551  */
552 int
553 socketpair(struct socketpair_args *uap)
554 {
555 	int error, sockv[2];
556 
557 	error = kern_socketpair(uap->domain, uap->type, uap->protocol, sockv);
558 
559 	if (error == 0)
560 		error = copyout(sockv, uap->rsv, sizeof(sockv));
561 	return (error);
562 }
563 
564 int
565 kern_sendmsg(int s, struct sockaddr *sa, struct uio *auio,
566     struct mbuf *control, int flags, int *res)
567 {
568 	struct thread *td = curthread;
569 	struct proc *p = td->td_proc;
570 	struct file *fp;
571 	int len, error;
572 	struct socket *so;
573 #ifdef KTRACE
574 	struct iovec *ktriov = NULL;
575 	struct uio ktruio;
576 #endif
577 
578 	error = holdsock(p->p_fd, s, &fp);
579 	if (error)
580 		return (error);
581 	if (auio->uio_resid < 0) {
582 		error = EINVAL;
583 		goto done;
584 	}
585 #ifdef KTRACE
586 	if (KTRPOINT(td, KTR_GENIO)) {
587 		int iovlen = auio->uio_iovcnt * sizeof (struct iovec);
588 
589 		MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
590 		bcopy((caddr_t)auio->uio_iov, (caddr_t)ktriov, iovlen);
591 		ktruio = *auio;
592 	}
593 #endif
594 	len = auio->uio_resid;
595 	so = (struct socket *)fp->f_data;
596 	error = so_pru_sosend(so, sa, auio, NULL, control, flags, td);
597 	if (error) {
598 		if (auio->uio_resid != len && (error == ERESTART ||
599 		    error == EINTR || error == EWOULDBLOCK))
600 			error = 0;
601 		if (error == EPIPE)
602 			psignal(p, SIGPIPE);
603 	}
604 #ifdef KTRACE
605 	if (ktriov != NULL) {
606 		if (error == 0) {
607 			ktruio.uio_iov = ktriov;
608 			ktruio.uio_resid = len - auio->uio_resid;
609 			ktrgenio(p->p_tracep, s, UIO_WRITE, &ktruio, error);
610 		}
611 		FREE(ktriov, M_TEMP);
612 	}
613 #endif
614 	if (error == 0)
615 		*res  = len - auio->uio_resid;
616 done:
617 	fdrop(fp, td);
618 	return (error);
619 }
620 
621 /*
622  * sendto_args(int s, caddr_t buf, size_t len, int flags, caddr_t to, int tolen)
623  */
624 int
625 sendto(struct sendto_args *uap)
626 {
627 	struct thread *td = curthread;
628 	struct uio auio;
629 	struct iovec aiov;
630 	struct sockaddr *sa = NULL;
631 	int error;
632 
633 	if (uap->to) {
634 		error = getsockaddr(&sa, uap->to, uap->tolen);
635 		if (error)
636 			return (error);
637 	}
638 	aiov.iov_base = uap->buf;
639 	aiov.iov_len = uap->len;
640 	auio.uio_iov = &aiov;
641 	auio.uio_iovcnt = 1;
642 	auio.uio_offset = 0;
643 	auio.uio_resid = uap->len;
644 	auio.uio_segflg = UIO_USERSPACE;
645 	auio.uio_rw = UIO_WRITE;
646 	auio.uio_td = td;
647 
648 	error = kern_sendmsg(uap->s, sa, &auio, NULL, uap->flags,
649 	    &uap->sysmsg_result);
650 
651 	if (sa)
652 		FREE(sa, M_SONAME);
653 	return (error);
654 }
655 
656 /*
657  * sendmsg_args(int s, caddr_t msg, int flags)
658  */
659 int
660 sendmsg(struct sendmsg_args *uap)
661 {
662 	struct thread *td = curthread;
663 	struct msghdr msg;
664 	struct uio auio;
665 	struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
666 	struct sockaddr *sa = NULL;
667 	struct mbuf *control = NULL;
668 	int error;
669 
670 	error = copyin(uap->msg, (caddr_t)&msg, sizeof(msg));
671 	if (error)
672 		return (error);
673 
674 	/*
675 	 * Conditionally copyin msg.msg_name.
676 	 */
677 	if (msg.msg_name) {
678 		error = getsockaddr(&sa, msg.msg_name, msg.msg_namelen);
679 		if (error)
680 			return (error);
681 	}
682 
683 	/*
684 	 * Populate auio.
685 	 */
686 	error = iovec_copyin(msg.msg_iov, &iov, aiov, msg.msg_iovlen,
687 	    &auio.uio_resid);
688 	if (error)
689 		goto cleanup;
690 	auio.uio_iov = iov;
691 	auio.uio_iovcnt = msg.msg_iovlen;
692 	auio.uio_offset = 0;
693 	auio.uio_segflg = UIO_USERSPACE;
694 	auio.uio_rw = UIO_WRITE;
695 	auio.uio_td = td;
696 
697 	/*
698 	 * Conditionally copyin msg.msg_control.
699 	 */
700 	if (msg.msg_control) {
701 		if (msg.msg_controllen < sizeof(struct cmsghdr) ||
702 		    msg.msg_controllen > MLEN) {
703 			error = EINVAL;
704 			goto cleanup;
705 		}
706 		control = m_get(MB_WAIT, MT_CONTROL);
707 		if (control == NULL) {
708 			error = ENOBUFS;
709 			goto cleanup;
710 		}
711 		control->m_len = msg.msg_controllen;
712 		error = copyin(msg.msg_control, mtod(control, caddr_t),
713 		    msg.msg_controllen);
714 		if (error) {
715 			m_free(control);
716 			goto cleanup;
717 		}
718 	}
719 
720 	error = kern_sendmsg(uap->s, sa, &auio, control, uap->flags,
721 	    &uap->sysmsg_result);
722 
723 cleanup:
724 	if (sa)
725 		FREE(sa, M_SONAME);
726 	iovec_free(&iov, aiov);
727 	return (error);
728 }
729 
730 /*
731  * kern_recvmsg() takes a handle to sa and control.  If the handle is non-
732  * null, it returns a dynamically allocated struct sockaddr and an mbuf.
733  * Don't forget to FREE() and m_free() these if they are returned.
734  */
735 int
736 kern_recvmsg(int s, struct sockaddr **sa, struct uio *auio,
737     struct mbuf **control, int *flags, int *res)
738 {
739 	struct thread *td = curthread;
740 	struct proc *p = td->td_proc;
741 	struct file *fp;
742 	int len, error;
743 	struct socket *so;
744 #ifdef KTRACE
745 	struct iovec *ktriov = NULL;
746 	struct uio ktruio;
747 #endif
748 
749 	error = holdsock(p->p_fd, s, &fp);
750 	if (error)
751 		return (error);
752 	if (auio->uio_resid < 0) {
753 		error = EINVAL;
754 		goto done;
755 	}
756 #ifdef KTRACE
757 	if (KTRPOINT(td, KTR_GENIO)) {
758 		int iovlen = auio->uio_iovcnt * sizeof (struct iovec);
759 
760 		MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
761 		bcopy(auio->uio_iov, ktriov, iovlen);
762 		ktruio = *auio;
763 	}
764 #endif
765 	len = auio->uio_resid;
766 	so = (struct socket *)fp->f_data;
767 	error = so_pru_soreceive(so, sa, auio, NULL, control, flags);
768 	if (error) {
769 		if (auio->uio_resid != len && (error == ERESTART ||
770 		    error == EINTR || error == EWOULDBLOCK))
771 			error = 0;
772 	}
773 #ifdef KTRACE
774 	if (ktriov != NULL) {
775 		if (error == 0) {
776 			ktruio.uio_iov = ktriov;
777 			ktruio.uio_resid = len - auio->uio_resid;
778 			ktrgenio(p->p_tracep, s, UIO_READ, &ktruio, error);
779 		}
780 		FREE(ktriov, M_TEMP);
781 	}
782 #endif
783 	if (error == 0)
784 		*res = len - auio->uio_resid;
785 done:
786 	fdrop(fp, td);
787 	return (error);
788 }
789 
790 /*
791  * recvfrom_args(int s, caddr_t buf, size_t len, int flags,
792  *			caddr_t from, int *fromlenaddr)
793  */
794 int
795 recvfrom(struct recvfrom_args *uap)
796 {
797 	struct thread *td = curthread;
798 	struct uio auio;
799 	struct iovec aiov;
800 	struct sockaddr *sa = NULL;
801 	int error, fromlen;
802 
803 	if (uap->from && uap->fromlenaddr) {
804 		error = copyin(uap->fromlenaddr, &fromlen, sizeof(fromlen));
805 		if (error)
806 			return (error);
807 		if (fromlen < 0)
808 			return (EINVAL);
809 	} else {
810 		fromlen = 0;
811 	}
812 	aiov.iov_base = uap->buf;
813 	aiov.iov_len = uap->len;
814 	auio.uio_iov = &aiov;
815 	auio.uio_iovcnt = 1;
816 	auio.uio_offset = 0;
817 	auio.uio_resid = uap->len;
818 	auio.uio_segflg = UIO_USERSPACE;
819 	auio.uio_rw = UIO_READ;
820 	auio.uio_td = td;
821 
822 	error = kern_recvmsg(uap->s, uap->from ? &sa : NULL, &auio, NULL,
823 	    &uap->flags, &uap->sysmsg_result);
824 
825 	if (error == 0 && uap->from) {
826 		/* note: sa may still be NULL */
827 		if (sa) {
828 			fromlen = MIN(fromlen, sa->sa_len);
829 			error = copyout(sa, uap->from, fromlen);
830 		} else {
831 			fromlen = 0;
832 		}
833 		if (error == 0) {
834 			error = copyout(&fromlen, uap->fromlenaddr,
835 					sizeof(fromlen));
836 		}
837 	}
838 	if (sa)
839 		FREE(sa, M_SONAME);
840 
841 	return (error);
842 }
843 
844 /*
845  * recvmsg_args(int s, struct msghdr *msg, int flags)
846  */
847 int
848 recvmsg(struct recvmsg_args *uap)
849 {
850 	struct thread *td = curthread;
851 	struct msghdr msg;
852 	struct uio auio;
853 	struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
854 	struct mbuf *m, *control = NULL;
855 	struct sockaddr *sa = NULL;
856 	caddr_t ctlbuf;
857 	socklen_t *ufromlenp, *ucontrollenp;
858 	int error, fromlen, controllen, len, flags, *uflagsp;
859 
860 	/*
861 	 * This copyin handles everything except the iovec.
862 	 */
863 	error = copyin(uap->msg, &msg, sizeof(msg));
864 	if (error)
865 		return (error);
866 
867 	if (msg.msg_name && msg.msg_namelen < 0)
868 		return (EINVAL);
869 	if (msg.msg_control && msg.msg_controllen < 0)
870 		return (EINVAL);
871 
872 	ufromlenp = (socklen_t *)((caddr_t)uap->msg + offsetof(struct msghdr,
873 	    msg_namelen));
874 	ucontrollenp = (socklen_t *)((caddr_t)uap->msg + offsetof(struct msghdr,
875 	    msg_controllen));
876 	uflagsp = (int *)((caddr_t)uap->msg + offsetof(struct msghdr,
877 	    msg_flags));
878 
879 	/*
880 	 * Populate auio.
881 	 */
882 	error = iovec_copyin(msg.msg_iov, &iov, aiov, msg.msg_iovlen,
883 	    &auio.uio_resid);
884 	if (error)
885 		return (error);
886 	auio.uio_iov = iov;
887 	auio.uio_iovcnt = msg.msg_iovlen;
888 	auio.uio_offset = 0;
889 	auio.uio_segflg = UIO_USERSPACE;
890 	auio.uio_rw = UIO_READ;
891 	auio.uio_td = td;
892 
893 	flags = msg.msg_flags;
894 
895 	error = kern_recvmsg(uap->s, msg.msg_name ? &sa : NULL, &auio,
896 	    msg.msg_control ? &control : NULL, &flags, &uap->sysmsg_result);
897 
898 	/*
899 	 * Conditionally copyout the name and populate the namelen field.
900 	 */
901 	if (error == 0 && msg.msg_name) {
902 		fromlen = MIN(msg.msg_namelen, sa->sa_len);
903 		error = copyout(sa, msg.msg_name, fromlen);
904 		if (error == 0)
905 			error = copyout(&fromlen, ufromlenp,
906 			    sizeof(*ufromlenp));
907 	}
908 
909 	/*
910 	 * Copyout msg.msg_control and msg.msg_controllen.
911 	 */
912 	if (error == 0 && msg.msg_control) {
913 		len = msg.msg_controllen;
914 		m = control;
915 		ctlbuf = (caddr_t)msg.msg_control;
916 
917 		while(m && len > 0) {
918 			unsigned int tocopy;
919 
920 			if (len >= m->m_len) {
921 				tocopy = m->m_len;
922 			} else {
923 				msg.msg_flags |= MSG_CTRUNC;
924 				tocopy = len;
925 			}
926 
927 			error = copyout(mtod(m, caddr_t), ctlbuf, tocopy);
928 			if (error)
929 				goto cleanup;
930 
931 			ctlbuf += tocopy;
932 			len -= tocopy;
933 			m = m->m_next;
934 		}
935 		controllen = ctlbuf - (caddr_t)msg.msg_control;
936 		error = copyout(&controllen, ucontrollenp,
937 		    sizeof(*ucontrollenp));
938 	}
939 
940 	if (error == 0)
941 		error = copyout(&flags, uflagsp, sizeof(*uflagsp));
942 
943 cleanup:
944 	if (sa)
945 		FREE(sa, M_SONAME);
946 	iovec_free(&iov, aiov);
947 	if (control)
948 		m_freem(control);
949 	return (error);
950 }
951 
952 /*
953  * shutdown_args(int s, int how)
954  */
955 int
956 kern_shutdown(int s, int how)
957 {
958 	struct thread *td = curthread;
959 	struct proc *p = td->td_proc;
960 	struct file *fp;
961 	int error;
962 
963 	KKASSERT(p);
964 	error = holdsock(p->p_fd, s, &fp);
965 	if (error)
966 		return (error);
967 	error = soshutdown((struct socket *)fp->f_data, how);
968 	fdrop(fp, td);
969 	return(error);
970 }
971 
972 int
973 shutdown(struct shutdown_args *uap)
974 {
975 	int error;
976 
977 	error = kern_shutdown(uap->s, uap->how);
978 
979 	return (error);
980 }
981 
982 /*
983  * If sopt->sopt_td == NULL, then sopt->sopt_val is treated as an
984  * in kernel pointer instead of a userland pointer.  This allows us
985  * to manipulate socket options in the emulation code.
986  */
987 int
988 kern_setsockopt(int s, struct sockopt *sopt)
989 {
990 	struct thread *td = curthread;
991 	struct proc *p = td->td_proc;
992 	struct file *fp;
993 	int error;
994 
995 	if (sopt->sopt_val == 0 && sopt->sopt_valsize != 0)
996 		return (EFAULT);
997 	if (sopt->sopt_valsize < 0)
998 		return (EINVAL);
999 
1000 	error = holdsock(p->p_fd, s, &fp);
1001 	if (error)
1002 		return (error);
1003 
1004 	error = sosetopt((struct socket *)fp->f_data, sopt);
1005 	fdrop(fp, td);
1006 	return (error);
1007 }
1008 
1009 /*
1010  * setsockopt_args(int s, int level, int name, caddr_t val, int valsize)
1011  */
1012 int
1013 setsockopt(struct setsockopt_args *uap)
1014 {
1015 	struct thread *td = curthread;
1016 	struct sockopt sopt;
1017 	int error;
1018 
1019 	sopt.sopt_dir = SOPT_SET;
1020 	sopt.sopt_level = uap->level;
1021 	sopt.sopt_name = uap->name;
1022 	sopt.sopt_val = uap->val;
1023 	sopt.sopt_valsize = uap->valsize;
1024 	sopt.sopt_td = td;
1025 
1026 	error = kern_setsockopt(uap->s, &sopt);
1027 	return(error);
1028 }
1029 
1030 /*
1031  * If sopt->sopt_td == NULL, then sopt->sopt_val is treated as an
1032  * in kernel pointer instead of a userland pointer.  This allows us
1033  * to manipulate socket options in the emulation code.
1034  */
1035 int
1036 kern_getsockopt(int s, struct sockopt *sopt)
1037 {
1038 	struct thread *td = curthread;
1039 	struct proc *p = td->td_proc;
1040 	struct file *fp;
1041 	int error;
1042 
1043 	if (sopt->sopt_val == 0 && sopt->sopt_valsize != 0)
1044 		return (EFAULT);
1045 	if (sopt->sopt_valsize < 0)
1046 		return (EINVAL);
1047 
1048 	error = holdsock(p->p_fd, s, &fp);
1049 	if (error)
1050 		return (error);
1051 
1052 	error = sogetopt((struct socket *)fp->f_data, sopt);
1053 	fdrop(fp, td);
1054 	return (error);
1055 }
1056 
1057 /*
1058  * getsockopt_Args(int s, int level, int name, caddr_t val, int *avalsize)
1059  */
1060 int
1061 getsockopt(struct getsockopt_args *uap)
1062 {
1063 	struct thread *td = curthread;
1064 	struct	sockopt sopt;
1065 	int	error, valsize;
1066 
1067 	if (uap->val) {
1068 		error = copyin(uap->avalsize, &valsize, sizeof(valsize));
1069 		if (error)
1070 			return (error);
1071 		if (valsize < 0)
1072 			return (EINVAL);
1073 	} else {
1074 		valsize = 0;
1075 	}
1076 
1077 	sopt.sopt_dir = SOPT_GET;
1078 	sopt.sopt_level = uap->level;
1079 	sopt.sopt_name = uap->name;
1080 	sopt.sopt_val = uap->val;
1081 	sopt.sopt_valsize = valsize;
1082 	sopt.sopt_td = td;
1083 
1084 	error = kern_getsockopt(uap->s, &sopt);
1085 	if (error == 0) {
1086 		valsize = sopt.sopt_valsize;
1087 		error = copyout(&valsize, uap->avalsize, sizeof(valsize));
1088 	}
1089 	return (error);
1090 }
1091 
1092 /*
1093  * The second argument to kern_getsockname() is a handle to a struct sockaddr.
1094  * This allows kern_getsockname() to return a pointer to an allocated struct
1095  * sockaddr which must be freed later with FREE().  The caller must
1096  * initialize *name to NULL.
1097  */
1098 int
1099 kern_getsockname(int s, struct sockaddr **name, int *namelen)
1100 {
1101 	struct thread *td = curthread;
1102 	struct proc *p = td->td_proc;
1103 	struct file *fp;
1104 	struct socket *so;
1105 	struct sockaddr *sa = NULL;
1106 	int error;
1107 
1108 	error = holdsock(p->p_fd, s, &fp);
1109 	if (error)
1110 		return (error);
1111 	if (*namelen < 0) {
1112 		fdrop(fp, td);
1113 		return (EINVAL);
1114 	}
1115 	so = (struct socket *)fp->f_data;
1116 	error = so_pru_sockaddr(so, &sa);
1117 	if (error == 0) {
1118 		if (sa == 0) {
1119 			*namelen = 0;
1120 		} else {
1121 			*namelen = MIN(*namelen, sa->sa_len);
1122 			*name = sa;
1123 		}
1124 	}
1125 
1126 	fdrop(fp, td);
1127 	return (error);
1128 }
1129 
1130 /*
1131  * getsockname_args(int fdes, caddr_t asa, int *alen)
1132  *
1133  * Get socket name.
1134  */
1135 int
1136 getsockname(struct getsockname_args *uap)
1137 {
1138 	struct sockaddr *sa = NULL;
1139 	int error, sa_len;
1140 
1141 	error = copyin(uap->alen, &sa_len, sizeof(sa_len));
1142 	if (error)
1143 		return (error);
1144 
1145 	error = kern_getsockname(uap->fdes, &sa, &sa_len);
1146 
1147 	if (error == 0)
1148 		error = copyout(sa, uap->asa, sa_len);
1149 	if (error == 0)
1150 		error = copyout(&sa_len, uap->alen, sizeof(*uap->alen));
1151 	if (sa)
1152 		FREE(sa, M_SONAME);
1153 	return (error);
1154 }
1155 
1156 /*
1157  * The second argument to kern_getpeername() is a handle to a struct sockaddr.
1158  * This allows kern_getpeername() to return a pointer to an allocated struct
1159  * sockaddr which must be freed later with FREE().  The caller must
1160  * initialize *name to NULL.
1161  */
1162 int
1163 kern_getpeername(int s, struct sockaddr **name, int *namelen)
1164 {
1165 	struct thread *td = curthread;
1166 	struct proc *p = td->td_proc;
1167 	struct file *fp;
1168 	struct socket *so;
1169 	struct sockaddr *sa = NULL;
1170 	int error;
1171 
1172 	error = holdsock(p->p_fd, s, &fp);
1173 	if (error)
1174 		return (error);
1175 	if (*namelen < 0) {
1176 		fdrop(fp, td);
1177 		return (EINVAL);
1178 	}
1179 	so = (struct socket *)fp->f_data;
1180 	if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) {
1181 		fdrop(fp, td);
1182 		return (ENOTCONN);
1183 	}
1184 	error = so_pru_peeraddr(so, &sa);
1185 	if (error == 0) {
1186 		if (sa == 0) {
1187 			*namelen = 0;
1188 		} else {
1189 			*namelen = MIN(*namelen, sa->sa_len);
1190 			*name = sa;
1191 		}
1192 	}
1193 
1194 	fdrop(fp, td);
1195 	return (error);
1196 }
1197 
1198 /*
1199  * getpeername_args(int fdes, caddr_t asa, int *alen)
1200  *
1201  * Get name of peer for connected socket.
1202  */
1203 int
1204 getpeername(struct getpeername_args *uap)
1205 {
1206 	struct sockaddr *sa = NULL;
1207 	int error, sa_len;
1208 
1209 	error = copyin(uap->alen, &sa_len, sizeof(sa_len));
1210 	if (error)
1211 		return (error);
1212 
1213 	error = kern_getpeername(uap->fdes, &sa, &sa_len);
1214 
1215 	if (error == 0)
1216 		error = copyout(sa, uap->asa, sa_len);
1217 	if (error == 0)
1218 		error = copyout(&sa_len, uap->alen, sizeof(*uap->alen));
1219 	if (sa)
1220 		FREE(sa, M_SONAME);
1221 	return (error);
1222 }
1223 
1224 int
1225 getsockaddr(struct sockaddr **namp, caddr_t uaddr, size_t len)
1226 {
1227 	struct sockaddr *sa;
1228 	int error;
1229 
1230 	*namp = NULL;
1231 	if (len > SOCK_MAXADDRLEN)
1232 		return ENAMETOOLONG;
1233 	if (len < offsetof(struct sockaddr, sa_data[0]))
1234 		return EDOM;
1235 	MALLOC(sa, struct sockaddr *, len, M_SONAME, M_WAITOK);
1236 	error = copyin(uaddr, sa, len);
1237 	if (error) {
1238 		FREE(sa, M_SONAME);
1239 	} else {
1240 #if BYTE_ORDER != BIG_ENDIAN
1241 		/*
1242 		 * The bind(), connect(), and sendto() syscalls were not
1243 		 * versioned for COMPAT_43.  Thus, this check must stay.
1244 		 */
1245 		if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
1246 			sa->sa_family = sa->sa_len;
1247 #endif
1248 		sa->sa_len = len;
1249 		*namp = sa;
1250 	}
1251 	return error;
1252 }
1253 
1254 /*
1255  * holdsock() - load the struct file pointer associated
1256  * with a socket into *fpp.  If an error occurs, non-zero
1257  * will be returned and *fpp will be set to NULL.
1258  */
1259 int
1260 holdsock(fdp, fdes, fpp)
1261 	struct filedesc *fdp;
1262 	int fdes;
1263 	struct file **fpp;
1264 {
1265 	struct file *fp = NULL;
1266 	int error = 0;
1267 
1268 	if ((unsigned)fdes >= fdp->fd_nfiles ||
1269 	    (fp = fdp->fd_ofiles[fdes]) == NULL) {
1270 		error = EBADF;
1271 	} else if (fp->f_type != DTYPE_SOCKET) {
1272 		error = ENOTSOCK;
1273 		fp = NULL;
1274 	} else {
1275 		fhold(fp);
1276 	}
1277 	*fpp = fp;
1278 	return(error);
1279 }
1280 
1281 /*
1282  * Detach a mapped page and release resources back to the system.
1283  * We must release our wiring and if the object is ripped out
1284  * from under the vm_page we become responsible for freeing the
1285  * page.
1286  *
1287  * XXX HACK XXX TEMPORARY UNTIL WE IMPLEMENT EXT MBUF REFERENCE COUNTING
1288  */
1289 static void
1290 sf_buf_mref(void *arg)
1291 {
1292 	struct sfbuf_mref *sfm = arg;
1293 
1294 	++sfm->mref_count;
1295 }
1296 
1297 static void
1298 sf_buf_mfree(void *arg)
1299 {
1300 	struct sfbuf_mref *sfm = arg;
1301 	vm_page_t m;
1302 	int s;
1303 
1304 	KKASSERT(sfm->mref_count > 0);
1305 	if (--sfm->mref_count == 0) {
1306 		m = sf_buf_page(sfm->sf);
1307 		sf_buf_free(sfm->sf);
1308 		s = splvm();
1309 		vm_page_unwire(m, 0);
1310 		if (m->wire_count == 0 && m->object == NULL)
1311 			vm_page_free(m);
1312 		splx(s);
1313 		free(sfm, M_SENDFILE);
1314 	}
1315 }
1316 
1317 /*
1318  * sendfile(2).
1319  * int sendfile(int fd, int s, off_t offset, size_t nbytes,
1320  *	 struct sf_hdtr *hdtr, off_t *sbytes, int flags)
1321  *
1322  * Send a file specified by 'fd' and starting at 'offset' to a socket
1323  * specified by 's'. Send only 'nbytes' of the file or until EOF if
1324  * nbytes == 0. Optionally add a header and/or trailer to the socket
1325  * output. If specified, write the total number of bytes sent into *sbytes.
1326  *
1327  * In FreeBSD kern/uipc_syscalls.c,v 1.103, a bug was fixed that caused
1328  * the headers to count against the remaining bytes to be sent from
1329  * the file descriptor.  We may wish to implement a compatibility syscall
1330  * in the future.
1331  */
1332 int
1333 sendfile(struct sendfile_args *uap)
1334 {
1335 	struct thread *td = curthread;
1336 	struct proc *p = td->td_proc;
1337 	struct file *fp;
1338 	struct filedesc *fdp;
1339 	struct vnode *vp = NULL;
1340 	struct sf_hdtr hdtr;
1341 	struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
1342 	struct uio auio;
1343 	struct mbuf *mheader = NULL;
1344 	off_t hdtr_size = 0, sbytes;
1345 	int error, hbytes = 0, tbytes;
1346 
1347 	KKASSERT(p);
1348 	fdp = p->p_fd;
1349 
1350 	/*
1351 	 * Do argument checking. Must be a regular file in, stream
1352 	 * type and connected socket out, positive offset.
1353 	 */
1354 	fp = holdfp(fdp, uap->fd, FREAD);
1355 	if (fp == NULL) {
1356 		return (EBADF);
1357 	}
1358 	if (fp->f_type != DTYPE_VNODE) {
1359 		fdrop(fp, td);
1360 		return (EINVAL);
1361 	}
1362 	vp = (struct vnode *)fp->f_data;
1363 	vref(vp);
1364 	fdrop(fp, td);
1365 
1366 	/*
1367 	 * If specified, get the pointer to the sf_hdtr struct for
1368 	 * any headers/trailers.
1369 	 */
1370 	if (uap->hdtr) {
1371 		error = copyin(uap->hdtr, &hdtr, sizeof(hdtr));
1372 		if (error)
1373 			goto done;
1374 		/*
1375 		 * Send any headers.
1376 		 */
1377 		if (hdtr.headers) {
1378 			error = iovec_copyin(hdtr.headers, &iov, aiov,
1379 			    hdtr.hdr_cnt, &hbytes);
1380 			if (error)
1381 				goto done;
1382 			auio.uio_iov = iov;
1383 			auio.uio_iovcnt = hdtr.hdr_cnt;
1384 			auio.uio_offset = 0;
1385 			auio.uio_segflg = UIO_USERSPACE;
1386 			auio.uio_rw = UIO_WRITE;
1387 			auio.uio_td = td;
1388 			auio.uio_resid = hbytes;
1389 
1390 			mheader = m_uiomove(&auio, MB_WAIT, 0);
1391 
1392 			iovec_free(&iov, aiov);
1393 			if (mheader == NULL)
1394 				goto done;
1395 		}
1396 	}
1397 
1398 	error = kern_sendfile(vp, uap->s, uap->offset, uap->nbytes, mheader,
1399 	    &sbytes, uap->flags);
1400 	if (error)
1401 		goto done;
1402 
1403 	/*
1404 	 * Send trailers. Wimp out and use writev(2).
1405 	 */
1406 	if (uap->hdtr != NULL && hdtr.trailers != NULL) {
1407 		error = iovec_copyin(hdtr.trailers, &iov, aiov,
1408 		    hdtr.trl_cnt, &auio.uio_resid);
1409 		if (error)
1410 			goto done;
1411 		auio.uio_iov = iov;
1412 		auio.uio_iovcnt = hdtr.trl_cnt;
1413 		auio.uio_offset = 0;
1414 		auio.uio_segflg = UIO_USERSPACE;
1415 		auio.uio_rw = UIO_WRITE;
1416 		auio.uio_td = td;
1417 
1418 		error = kern_sendmsg(uap->s, NULL, &auio, NULL, 0, &tbytes);
1419 
1420 		iovec_free(&iov, aiov);
1421 		if (error)
1422 			goto done;
1423 		hdtr_size += tbytes;	/* trailer bytes successfully sent */
1424 	}
1425 
1426 done:
1427 	if (uap->sbytes != NULL) {
1428 		sbytes += hdtr_size;
1429 		copyout(&sbytes, uap->sbytes, sizeof(off_t));
1430 	}
1431 	if (vp)
1432 		vrele(vp);
1433 	return (error);
1434 }
1435 
1436 int
1437 kern_sendfile(struct vnode *vp, int sfd, off_t offset, size_t nbytes,
1438     struct mbuf *mheader, off_t *sbytes, int flags)
1439 {
1440 	struct thread *td = curthread;
1441 	struct proc *p = td->td_proc;
1442 	struct vm_object *obj;
1443 	struct socket *so;
1444 	struct file *fp;
1445 	struct mbuf *m;
1446 	struct sf_buf *sf;
1447 	struct sfbuf_mref *sfm;
1448 	struct vm_page *pg;
1449 	off_t off, xfsize;
1450 	off_t hbytes = 0;
1451 	int error = 0;
1452 	int s;
1453 
1454 	if (vp->v_type != VREG || VOP_GETVOBJECT(vp, &obj) != 0) {
1455 		error = EINVAL;
1456 		goto done;
1457 	}
1458 	error = holdsock(p->p_fd, sfd, &fp);
1459 	if (error)
1460 		goto done;
1461 	so = (struct socket *)fp->f_data;
1462 	if (so->so_type != SOCK_STREAM) {
1463 		error = EINVAL;
1464 		goto done;
1465 	}
1466 	if ((so->so_state & SS_ISCONNECTED) == 0) {
1467 		error = ENOTCONN;
1468 		goto done;
1469 	}
1470 	if (offset < 0) {
1471 		error = EINVAL;
1472 		goto done;
1473 	}
1474 
1475 	*sbytes = 0;
1476 	/*
1477 	 * Protect against multiple writers to the socket.
1478 	 */
1479 	(void) sblock(&so->so_snd, M_WAITOK);
1480 
1481 	/*
1482 	 * Loop through the pages in the file, starting with the requested
1483 	 * offset. Get a file page (do I/O if necessary), map the file page
1484 	 * into an sf_buf, attach an mbuf header to the sf_buf, and queue
1485 	 * it on the socket.
1486 	 */
1487 	for (off = offset; ; off += xfsize, *sbytes += xfsize + hbytes) {
1488 		vm_pindex_t pindex;
1489 		vm_offset_t pgoff;
1490 
1491 		pindex = OFF_TO_IDX(off);
1492 retry_lookup:
1493 		/*
1494 		 * Calculate the amount to transfer. Not to exceed a page,
1495 		 * the EOF, or the passed in nbytes.
1496 		 */
1497 		xfsize = obj->un_pager.vnp.vnp_size - off;
1498 		if (xfsize > PAGE_SIZE)
1499 			xfsize = PAGE_SIZE;
1500 		pgoff = (vm_offset_t)(off & PAGE_MASK);
1501 		if (PAGE_SIZE - pgoff < xfsize)
1502 			xfsize = PAGE_SIZE - pgoff;
1503 		if (nbytes && xfsize > (nbytes - *sbytes))
1504 			xfsize = nbytes - *sbytes;
1505 		if (xfsize <= 0)
1506 			break;
1507 		/*
1508 		 * Optimize the non-blocking case by looking at the socket space
1509 		 * before going to the extra work of constituting the sf_buf.
1510 		 */
1511 		if ((so->so_state & SS_NBIO) && sbspace(&so->so_snd) <= 0) {
1512 			if (so->so_state & SS_CANTSENDMORE)
1513 				error = EPIPE;
1514 			else
1515 				error = EAGAIN;
1516 			sbunlock(&so->so_snd);
1517 			goto done;
1518 		}
1519 		/*
1520 		 * Attempt to look up the page.
1521 		 *
1522 		 *	Allocate if not found, wait and loop if busy, then
1523 		 *	wire the page.  splvm() protection is required to
1524 		 *	maintain the object association (an interrupt can
1525 		 *	free the page) through to the vm_page_wire() call.
1526 		 */
1527 		s = splvm();
1528 		pg = vm_page_lookup(obj, pindex);
1529 		if (pg == NULL) {
1530 			pg = vm_page_alloc(obj, pindex, VM_ALLOC_NORMAL);
1531 			if (pg == NULL) {
1532 				vm_wait();
1533 				splx(s);
1534 				goto retry_lookup;
1535 			}
1536 			vm_page_wakeup(pg);
1537 		} else if (vm_page_sleep_busy(pg, TRUE, "sfpbsy")) {
1538 			splx(s);
1539 			goto retry_lookup;
1540 		}
1541 		vm_page_wire(pg);
1542 		splx(s);
1543 
1544 		/*
1545 		 * If page is not valid for what we need, initiate I/O
1546 		 */
1547 
1548 		if (!pg->valid || !vm_page_is_valid(pg, pgoff, xfsize)) {
1549 			struct uio auio;
1550 			struct iovec aiov;
1551 			int bsize;
1552 
1553 			/*
1554 			 * Ensure that our page is still around when the I/O
1555 			 * completes.
1556 			 */
1557 			vm_page_io_start(pg);
1558 
1559 			/*
1560 			 * Get the page from backing store.
1561 			 */
1562 			bsize = vp->v_mount->mnt_stat.f_iosize;
1563 			auio.uio_iov = &aiov;
1564 			auio.uio_iovcnt = 1;
1565 			aiov.iov_base = 0;
1566 			aiov.iov_len = MAXBSIZE;
1567 			auio.uio_resid = MAXBSIZE;
1568 			auio.uio_offset = trunc_page(off);
1569 			auio.uio_segflg = UIO_NOCOPY;
1570 			auio.uio_rw = UIO_READ;
1571 			auio.uio_td = td;
1572 			vn_lock(vp, NULL, LK_SHARED | LK_NOPAUSE | LK_RETRY, td);
1573 			error = VOP_READ(vp, &auio,
1574 				    IO_VMIO | ((MAXBSIZE / bsize) << 16),
1575 				    p->p_ucred);
1576 			VOP_UNLOCK(vp, NULL, 0, td);
1577 			vm_page_flag_clear(pg, PG_ZERO);
1578 			vm_page_io_finish(pg);
1579 			if (error) {
1580 				vm_page_unwire(pg, 0);
1581 				/*
1582 				 * See if anyone else might know about this page.
1583 				 * If not and it is not valid, then free it.
1584 				 */
1585 				if (pg->wire_count == 0 && pg->valid == 0 &&
1586 				    pg->busy == 0 && !(pg->flags & PG_BUSY) &&
1587 				    pg->hold_count == 0) {
1588 					vm_page_busy(pg);
1589 					vm_page_free(pg);
1590 				}
1591 				sbunlock(&so->so_snd);
1592 				goto done;
1593 			}
1594 		}
1595 
1596 
1597 		/*
1598 		 * Get a sendfile buf. We usually wait as long as necessary,
1599 		 * but this wait can be interrupted.
1600 		 */
1601 		if ((sf = sf_buf_alloc(pg, SFBA_PCATCH)) == NULL) {
1602 			s = splvm();
1603 			vm_page_unwire(pg, 0);
1604 			if (pg->wire_count == 0 && pg->object == NULL)
1605 				vm_page_free(pg);
1606 			splx(s);
1607 			sbunlock(&so->so_snd);
1608 			error = EINTR;
1609 			goto done;
1610 		}
1611 
1612 		/*
1613 		 * Get an mbuf header and set it up as having external storage.
1614 		 */
1615 		MGETHDR(m, MB_WAIT, MT_DATA);
1616 		if (m == NULL) {
1617 			error = ENOBUFS;
1618 			sf_buf_free(sf);
1619 			sbunlock(&so->so_snd);
1620 			goto done;
1621 		}
1622 
1623 		/*
1624 		 * sfm is a temporary hack, use a per-cpu cache for this.
1625 		 */
1626 		sfm = malloc(sizeof(struct sfbuf_mref), M_SENDFILE, M_WAITOK);
1627 		sfm->sf = sf;
1628 		sfm->mref_count = 1;
1629 
1630 		m->m_ext.ext_nfree.new = sf_buf_mfree;
1631 		m->m_ext.ext_nref.new = sf_buf_mref;
1632 		m->m_ext.ext_arg = sfm;
1633 		m->m_ext.ext_buf = (void *)sf->kva;
1634 		m->m_ext.ext_size = PAGE_SIZE;
1635 		m->m_data = (char *) sf->kva + pgoff;
1636 		m->m_flags |= M_EXT;
1637 		m->m_pkthdr.len = m->m_len = xfsize;
1638 		KKASSERT((m->m_flags & (M_EXT_OLD|M_EXT_CLUSTER)) == 0);
1639 
1640 		if (mheader != NULL) {
1641 			hbytes = mheader->m_pkthdr.len;
1642 			mheader->m_pkthdr.len += m->m_pkthdr.len;
1643 			m_cat(mheader, m);
1644 			m = mheader;
1645 			mheader = NULL;
1646 		} else
1647 			hbytes = 0;
1648 
1649 		/*
1650 		 * Add the buffer to the socket buffer chain.
1651 		 */
1652 		s = splnet();
1653 retry_space:
1654 		/*
1655 		 * Make sure that the socket is still able to take more data.
1656 		 * CANTSENDMORE being true usually means that the connection
1657 		 * was closed. so_error is true when an error was sensed after
1658 		 * a previous send.
1659 		 * The state is checked after the page mapping and buffer
1660 		 * allocation above since those operations may block and make
1661 		 * any socket checks stale. From this point forward, nothing
1662 		 * blocks before the pru_send (or more accurately, any blocking
1663 		 * results in a loop back to here to re-check).
1664 		 */
1665 		if ((so->so_state & SS_CANTSENDMORE) || so->so_error) {
1666 			if (so->so_state & SS_CANTSENDMORE) {
1667 				error = EPIPE;
1668 			} else {
1669 				error = so->so_error;
1670 				so->so_error = 0;
1671 			}
1672 			m_freem(m);
1673 			sbunlock(&so->so_snd);
1674 			splx(s);
1675 			goto done;
1676 		}
1677 		/*
1678 		 * Wait for socket space to become available. We do this just
1679 		 * after checking the connection state above in order to avoid
1680 		 * a race condition with sbwait().
1681 		 */
1682 		if (sbspace(&so->so_snd) < so->so_snd.sb_lowat) {
1683 			if (so->so_state & SS_NBIO) {
1684 				m_freem(m);
1685 				sbunlock(&so->so_snd);
1686 				splx(s);
1687 				error = EAGAIN;
1688 				goto done;
1689 			}
1690 			error = sbwait(&so->so_snd);
1691 			/*
1692 			 * An error from sbwait usually indicates that we've
1693 			 * been interrupted by a signal. If we've sent anything
1694 			 * then return bytes sent, otherwise return the error.
1695 			 */
1696 			if (error) {
1697 				m_freem(m);
1698 				sbunlock(&so->so_snd);
1699 				splx(s);
1700 				goto done;
1701 			}
1702 			goto retry_space;
1703 		}
1704 		error = so_pru_send(so, 0, m, NULL, NULL, td);
1705 		splx(s);
1706 		if (error) {
1707 			sbunlock(&so->so_snd);
1708 			goto done;
1709 		}
1710 	}
1711 	if (mheader != NULL) {
1712 		*sbytes += mheader->m_pkthdr.len;
1713 		error = so_pru_send(so, 0, mheader, NULL, NULL, td);
1714 		mheader = NULL;
1715 	}
1716 	sbunlock(&so->so_snd);
1717 
1718 done:
1719 	if (fp)
1720 		fdrop(fp, td);
1721 	if (mheader != NULL)
1722 		m_freem(mheader);
1723 	return (error);
1724 }
1725