xref: /dragonfly/sys/kern/uipc_syscalls.c (revision 16777b6b)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1990, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * sendfile(2) and related extensions:
6  * Copyright (c) 1998, David Greenman. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)uipc_syscalls.c	8.4 (Berkeley) 2/21/94
37  * $FreeBSD: src/sys/kern/uipc_syscalls.c,v 1.65.2.17 2003/04/04 17:11:16 tegge Exp $
38  * $DragonFly: src/sys/kern/uipc_syscalls.c,v 1.16 2003/09/29 05:34:08 daver Exp $
39  */
40 
41 #include "opt_compat.h"
42 #include "opt_ktrace.h"
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/sysproto.h>
48 #include <sys/malloc.h>
49 #include <sys/filedesc.h>
50 #include <sys/event.h>
51 #include <sys/proc.h>
52 #include <sys/fcntl.h>
53 #include <sys/file.h>
54 #include <sys/filio.h>
55 #include <sys/kern_syscall.h>
56 #include <sys/mbuf.h>
57 #include <sys/protosw.h>
58 #include <sys/socket.h>
59 #include <sys/socketvar.h>
60 #include <sys/signalvar.h>
61 #include <sys/uio.h>
62 #include <sys/vnode.h>
63 #include <sys/lock.h>
64 #include <sys/mount.h>
65 #ifdef KTRACE
66 #include <sys/ktrace.h>
67 #endif
68 #include <vm/vm.h>
69 #include <vm/vm_object.h>
70 #include <vm/vm_page.h>
71 #include <vm/vm_pageout.h>
72 #include <vm/vm_kern.h>
73 #include <vm/vm_extern.h>
74 #include <sys/file2.h>
75 
76 #if defined(COMPAT_43)
77 #include <emulation/43bsd/43bsd_socket.h>
78 #endif /* COMPAT_43 */
79 
80 static void sf_buf_init(void *arg);
81 SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL)
82 
83 static int do_sendfile(struct sendfile_args *uap, int compat);
84 
85 static SLIST_HEAD(, sf_buf) sf_freelist;
86 static vm_offset_t sf_base;
87 static struct sf_buf *sf_bufs;
88 static int sf_buf_alloc_want;
89 
90 /*
91  * System call interface to the socket abstraction.
92  */
93 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
94 #define COMPAT_OLDSOCK
95 #endif
96 
97 extern	struct fileops socketops;
98 
99 /*
100  * socket_args(int domain, int type, int protocol)
101  */
102 int
103 socket(struct socket_args *uap)
104 {
105 	struct thread *td = curthread;
106 	struct proc *p = td->td_proc;
107 	struct filedesc *fdp;
108 	struct socket *so;
109 	struct file *fp;
110 	int fd, error;
111 
112 	KKASSERT(p);
113 	fdp = p->p_fd;
114 
115 	error = falloc(p, &fp, &fd);
116 	if (error)
117 		return (error);
118 	fhold(fp);
119 	error = socreate(uap->domain, &so, uap->type, uap->protocol, td);
120 	if (error) {
121 		if (fdp->fd_ofiles[fd] == fp) {
122 			fdp->fd_ofiles[fd] = NULL;
123 			fdrop(fp, td);
124 		}
125 	} else {
126 		fp->f_data = (caddr_t)so;
127 		fp->f_flag = FREAD|FWRITE;
128 		fp->f_ops = &socketops;
129 		fp->f_type = DTYPE_SOCKET;
130 		uap->sysmsg_result = fd;
131 	}
132 	fdrop(fp, td);
133 	return (error);
134 }
135 
136 int
137 kern_bind(int s, struct sockaddr *sa)
138 {
139 	struct thread *td = curthread;
140 	struct proc *p = td->td_proc;
141 	struct file *fp;
142 	int error;
143 
144 	KKASSERT(p);
145 	error = holdsock(p->p_fd, s, &fp);
146 	if (error)
147 		return (error);
148 	error = sobind((struct socket *)fp->f_data, sa, td);
149 	fdrop(fp, td);
150 	return (error);
151 }
152 
153 /*
154  * bind_args(int s, caddr_t name, int namelen)
155  */
156 int
157 bind(struct bind_args *uap)
158 {
159 	struct sockaddr *sa;
160 	int error;
161 
162 	error = getsockaddr(&sa, uap->name, uap->namelen);
163 	if (error)
164 		return (error);
165 	error = kern_bind(uap->s, sa);
166 	FREE(sa, M_SONAME);
167 
168 	return (error);
169 }
170 
171 int
172 kern_listen(int s, int backlog)
173 {
174 	struct thread *td = curthread;
175 	struct proc *p = td->td_proc;
176 	struct file *fp;
177 	int error;
178 
179 	KKASSERT(p);
180 	error = holdsock(p->p_fd, s, &fp);
181 	if (error)
182 		return (error);
183 	error = solisten((struct socket *)fp->f_data, backlog, td);
184 	fdrop(fp, td);
185 	return(error);
186 }
187 
188 /*
189  * listen_args(int s, int backlog)
190  */
191 int
192 listen(struct listen_args *uap)
193 {
194 	int error;
195 
196 	error = kern_listen(uap->s, uap->backlog);
197 	return (error);
198 }
199 
200 /*
201  * The second argument to kern_accept() is a handle to a struct sockaddr.
202  * This allows kern_accept() to return a pointer to an allocated struct
203  * sockaddr which must be freed later with FREE().  The caller must
204  * initialize *name to NULL.
205  */
206 int
207 kern_accept(int s, struct sockaddr **name, int *namelen, int *res)
208 {
209 	struct thread *td = curthread;
210 	struct proc *p = td->td_proc;
211 	struct filedesc *fdp = p->p_fd;
212 	struct file *lfp = NULL;
213 	struct file *nfp = NULL;
214 	struct sockaddr *sa;
215 	int error, s1;
216 	struct socket *head, *so;
217 	int fd;
218 	u_int fflag;		/* type must match fp->f_flag */
219 	int tmp;
220 
221 	if (name && namelen && *namelen < 0)
222 		return (EINVAL);
223 
224 	error = holdsock(fdp, s, &lfp);
225 	if (error)
226 		return (error);
227 	s1 = splnet();
228 	head = (struct socket *)lfp->f_data;
229 	if ((head->so_options & SO_ACCEPTCONN) == 0) {
230 		splx(s1);
231 		error = EINVAL;
232 		goto done;
233 	}
234 	while (TAILQ_EMPTY(&head->so_comp) && head->so_error == 0) {
235 		if (head->so_state & SS_CANTRCVMORE) {
236 			head->so_error = ECONNABORTED;
237 			break;
238 		}
239 		if ((head->so_state & SS_NBIO) != 0) {
240 			head->so_error = EWOULDBLOCK;
241 			break;
242 		}
243 		error = tsleep((caddr_t)&head->so_timeo, PCATCH, "accept", 0);
244 		if (error) {
245 			splx(s1);
246 			goto done;
247 		}
248 	}
249 	if (head->so_error) {
250 		error = head->so_error;
251 		head->so_error = 0;
252 		splx(s1);
253 		goto done;
254 	}
255 
256 	/*
257 	 * At this point we know that there is at least one connection
258 	 * ready to be accepted. Remove it from the queue prior to
259 	 * allocating the file descriptor for it since falloc() may
260 	 * block allowing another process to accept the connection
261 	 * instead.
262 	 */
263 	so = TAILQ_FIRST(&head->so_comp);
264 	TAILQ_REMOVE(&head->so_comp, so, so_list);
265 	head->so_qlen--;
266 
267 	fflag = lfp->f_flag;
268 	error = falloc(p, &nfp, &fd);
269 	if (error) {
270 		/*
271 		 * Probably ran out of file descriptors. Put the
272 		 * unaccepted connection back onto the queue and
273 		 * do another wakeup so some other process might
274 		 * have a chance at it.
275 		 */
276 		TAILQ_INSERT_HEAD(&head->so_comp, so, so_list);
277 		head->so_qlen++;
278 		wakeup_one(&head->so_timeo);
279 		splx(s1);
280 		goto done;
281 	}
282 	fhold(nfp);
283 	*res = fd;
284 
285 	/* connection has been removed from the listen queue */
286 	KNOTE(&head->so_rcv.sb_sel.si_note, 0);
287 
288 	so->so_state &= ~SS_COMP;
289 	so->so_head = NULL;
290 	if (head->so_sigio != NULL)
291 		fsetown(fgetown(head->so_sigio), &so->so_sigio);
292 
293 	nfp->f_data = (caddr_t)so;
294 	nfp->f_flag = fflag;
295 	nfp->f_ops = &socketops;
296 	nfp->f_type = DTYPE_SOCKET;
297 	/* Sync socket nonblocking/async state with file flags */
298 	tmp = fflag & FNONBLOCK;
299 	(void) fo_ioctl(nfp, FIONBIO, (caddr_t)&tmp, td);
300 	tmp = fflag & FASYNC;
301 	(void) fo_ioctl(nfp, FIOASYNC, (caddr_t)&tmp, td);
302 
303 	sa = NULL;
304 	error = soaccept(so, &sa);
305 
306 	/*
307 	 * Set the returned name and namelen as applicable.  Set the returned
308 	 * namelen to 0 for older code which might ignore the return value
309 	 * from accept.
310 	 */
311 	if (error == 0) {
312 		if (sa && name && namelen) {
313 			if (*namelen > sa->sa_len)
314 				*namelen = sa->sa_len;
315 			*name = sa;
316 		} else {
317 			if (sa)
318 				FREE(sa, M_SONAME);
319 		}
320 	}
321 
322 	/*
323 	 * close the new descriptor, assuming someone hasn't ripped it
324 	 * out from under us.  Note that *res is normally ignored if an
325 	 * error is returned but a syscall message will still have access
326 	 * to the result code.
327 	 */
328 	if (error) {
329 		*res = -1;
330 		if (fdp->fd_ofiles[fd] == nfp) {
331 			fdp->fd_ofiles[fd] = NULL;
332 			fdrop(nfp, td);
333 		}
334 	}
335 	splx(s1);
336 
337 	/*
338 	 * Release explicitly held references before returning.
339 	 */
340 done:
341 	if (nfp != NULL)
342 		fdrop(nfp, td);
343 	fdrop(lfp, td);
344 	return (error);
345 }
346 
347 /*
348  * accept_args(int s, caddr_t name, int *anamelen)
349  */
350 int
351 accept(struct accept_args *uap)
352 {
353 	struct sockaddr *sa = NULL;
354 	int sa_len;
355 	int error;
356 
357 	if (uap->name) {
358 		error = copyin(uap->anamelen, &sa_len, sizeof(sa_len));
359 		if (error)
360 			return (error);
361 
362 		error = kern_accept(uap->s, &sa, &sa_len, &uap->sysmsg_result);
363 
364 		if (error == 0)
365 			error = copyout(sa, uap->name, sa_len);
366 		if (error == 0) {
367 			error = copyout(&sa_len, uap->anamelen,
368 			    sizeof(*uap->anamelen));
369 		}
370 		if (sa)
371 			FREE(sa, M_SONAME);
372 	} else {
373 		error = kern_accept(uap->s, NULL, 0, &uap->sysmsg_result);
374 	}
375 	return (error);
376 }
377 
378 int
379 kern_connect(int s, struct sockaddr *sa)
380 {
381 	struct thread *td = curthread;
382 	struct proc *p = td->td_proc;
383 	struct file *fp;
384 	struct socket *so;
385 	int error;
386 
387 	error = holdsock(p->p_fd, s, &fp);
388 	if (error)
389 		return (error);
390 	so = (struct socket *)fp->f_data;
391 	if ((so->so_state & SS_NBIO) && (so->so_state & SS_ISCONNECTING)) {
392 		error = EALREADY;
393 		goto done;
394 	}
395 	error = soconnect(so, sa, td);
396 	if (error)
397 		goto bad;
398 	if ((so->so_state & SS_NBIO) && (so->so_state & SS_ISCONNECTING)) {
399 		error = EINPROGRESS;
400 		goto done;
401 	}
402 	s = splnet();
403 	while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
404 		error = tsleep((caddr_t)&so->so_timeo, PCATCH, "connec", 0);
405 		if (error)
406 			break;
407 	}
408 	if (error == 0) {
409 		error = so->so_error;
410 		so->so_error = 0;
411 	}
412 	splx(s);
413 bad:
414 	so->so_state &= ~SS_ISCONNECTING;
415 	if (error == ERESTART)
416 		error = EINTR;
417 done:
418 	fdrop(fp, td);
419 	return (error);
420 }
421 
422 /*
423  * connect_args(int s, caddr_t name, int namelen)
424  */
425 int
426 connect(struct connect_args *uap)
427 {
428 	struct sockaddr *sa;
429 	int error;
430 
431 	error = getsockaddr(&sa, uap->name, uap->namelen);
432 	if (error)
433 		return (error);
434 	error = kern_connect(uap->s, sa);
435 	FREE(sa, M_SONAME);
436 
437 	return (error);
438 }
439 
440 int
441 kern_socketpair(int domain, int type, int protocol, int *sv)
442 {
443 	struct thread *td = curthread;
444 	struct proc *p = td->td_proc;
445 	struct filedesc *fdp;
446 	struct file *fp1, *fp2;
447 	struct socket *so1, *so2;
448 	int fd, error;
449 
450 	KKASSERT(p);
451 	fdp = p->p_fd;
452 	error = socreate(domain, &so1, type, protocol, td);
453 	if (error)
454 		return (error);
455 	error = socreate(domain, &so2, type, protocol, td);
456 	if (error)
457 		goto free1;
458 	error = falloc(p, &fp1, &fd);
459 	if (error)
460 		goto free2;
461 	fhold(fp1);
462 	sv[0] = fd;
463 	fp1->f_data = (caddr_t)so1;
464 	error = falloc(p, &fp2, &fd);
465 	if (error)
466 		goto free3;
467 	fhold(fp2);
468 	fp2->f_data = (caddr_t)so2;
469 	sv[1] = fd;
470 	error = soconnect2(so1, so2);
471 	if (error)
472 		goto free4;
473 	if (type == SOCK_DGRAM) {
474 		/*
475 		 * Datagram socket connection is asymmetric.
476 		 */
477 		 error = soconnect2(so2, so1);
478 		 if (error)
479 			goto free4;
480 	}
481 	fp1->f_flag = fp2->f_flag = FREAD|FWRITE;
482 	fp1->f_ops = fp2->f_ops = &socketops;
483 	fp1->f_type = fp2->f_type = DTYPE_SOCKET;
484 	fdrop(fp1, td);
485 	fdrop(fp2, td);
486 	return (error);
487 free4:
488 	if (fdp->fd_ofiles[sv[1]] == fp2) {
489 		fdp->fd_ofiles[sv[1]] = NULL;
490 		fdrop(fp2, td);
491 	}
492 	fdrop(fp2, td);
493 free3:
494 	if (fdp->fd_ofiles[sv[0]] == fp1) {
495 		fdp->fd_ofiles[sv[0]] = NULL;
496 		fdrop(fp1, td);
497 	}
498 	fdrop(fp1, td);
499 free2:
500 	(void)soclose(so2);
501 free1:
502 	(void)soclose(so1);
503 	return (error);
504 }
505 
506 /*
507  * socketpair(int domain, int type, int protocol, int *rsv)
508  */
509 int
510 socketpair(struct socketpair_args *uap)
511 {
512 	int error, sockv[2];
513 
514 	error = kern_socketpair(uap->domain, uap->type, uap->protocol, sockv);
515 
516 	if (error == 0)
517 		error = copyout(sockv, uap->rsv, sizeof(sockv));
518 	return (error);
519 }
520 
521 /*
522  * This function never touches mp->msg_namelen.
523  */
524 int
525 kern_sendmsg(int s, struct msghdr *mp, int *res)
526 {
527 	struct thread *td = curthread;
528 	struct proc *p = td->td_proc;
529 	struct file *fp;
530 	struct uio auio;
531 	struct iovec *iov;
532 	int i;
533 	int len, error;
534 	struct socket *so;
535 #ifdef KTRACE
536 	struct iovec *ktriov = NULL;
537 	struct uio ktruio;
538 #endif
539 
540 	error = holdsock(p->p_fd, s, &fp);
541 	if (error)
542 		return (error);
543 	auio.uio_iov = mp->msg_iov;
544 	auio.uio_iovcnt = mp->msg_iovlen;
545 	auio.uio_segflg = UIO_USERSPACE;
546 	auio.uio_rw = UIO_WRITE;
547 	auio.uio_td = td;
548 	auio.uio_offset = 0;			/* XXX */
549 	auio.uio_resid = 0;
550 	iov = mp->msg_iov;
551 	for (i = 0; i < mp->msg_iovlen; i++, iov++) {
552 		if ((auio.uio_resid += iov->iov_len) < 0) {
553 			fdrop(fp, td);
554 			return (EINVAL);
555 		}
556 	}
557 #ifdef KTRACE
558 	if (KTRPOINT(td, KTR_GENIO)) {
559 		int iovlen = auio.uio_iovcnt * sizeof (struct iovec);
560 
561 		MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
562 		bcopy((caddr_t)auio.uio_iov, (caddr_t)ktriov, iovlen);
563 		ktruio = auio;
564 	}
565 #endif
566 	len = auio.uio_resid;
567 	so = (struct socket *)fp->f_data;
568 	error = so->so_proto->pr_usrreqs->pru_sosend(so, mp->msg_name,
569 	    &auio, NULL, mp->msg_control, mp->msg_flags, td);
570 	if (error) {
571 		if (auio.uio_resid != len && (error == ERESTART ||
572 		    error == EINTR || error == EWOULDBLOCK))
573 			error = 0;
574 		if (error == EPIPE)
575 			psignal(p, SIGPIPE);
576 	}
577 #ifdef KTRACE
578 	if (ktriov != NULL) {
579 		if (error == 0) {
580 			ktruio.uio_iov = ktriov;
581 			ktruio.uio_resid = len - auio.uio_resid;
582 			ktrgenio(p->p_tracep, s, UIO_WRITE, &ktruio, error);
583 		}
584 		FREE(ktriov, M_TEMP);
585 	}
586 #endif
587 	if (error == 0)
588 		*res  = len - auio.uio_resid;
589 	fdrop(fp, td);
590 	return (error);
591 }
592 
593 /*
594  * sendto_args(int s, caddr_t buf, size_t len, int flags, caddr_t to, int tolen)
595  */
596 int
597 sendto(struct sendto_args *uap)
598 {
599 	struct msghdr msg;
600 	struct iovec aiov;
601 	struct sockaddr *sa = NULL;
602 	int error;
603 
604 	if (uap->to) {
605 		error = getsockaddr(&sa, uap->to, uap->tolen);
606 		if (error)
607 			return (error);
608 		msg.msg_name = sa;
609 	} else {
610 		msg.msg_name = NULL;
611 	}
612 	/* msg.msg_namelen is ignored by kern_sendmsg() */
613 	msg.msg_iov = &aiov;
614 	msg.msg_iovlen = 1;
615 	msg.msg_control = NULL;
616 	msg.msg_flags = uap->flags;
617 	aiov.iov_base = uap->buf;
618 	aiov.iov_len = uap->len;
619 
620 	error = kern_sendmsg(uap->s, &msg, &uap->sysmsg_result);
621 
622 	if (sa)
623 		FREE(sa, M_SONAME);
624 	return (error);
625 }
626 
627 /*
628  * sendmsg_args(int s, caddr_t msg, int flags)
629  *
630  * We must copyin the msghdr and copyin a bunch of it's fields.  We
631  * explicitly copyin msg.msg_iov and conditionally copyin the
632  * msg.msg_control and msg.msg_name fields.
633  */
634 int
635 sendmsg(struct sendmsg_args *uap)
636 {
637 	struct msghdr msg;
638 	struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
639 	struct sockaddr *sa = NULL;
640 	struct mbuf *control = NULL;
641 	int error;
642 
643 	error = copyin(uap->msg, (caddr_t)&msg, sizeof(msg));
644 	if (error)
645 		return (error);
646 
647 	/*
648 	 * Conditionally copyin msg.msg_name.
649 	 */
650 	if (msg.msg_name) {
651 		error = getsockaddr(&sa, msg.msg_name, msg.msg_namelen);
652 		if (error)
653 			return (error);
654 		msg.msg_name = sa;
655 	}
656 
657 	/*
658 	 * We always copyin msg.msg_iov.
659 	 */
660 	if (msg.msg_iovlen >= UIO_MAXIOV) {
661 		error =  EMSGSIZE;
662 		goto cleanup;
663 	}
664 	if (msg.msg_iovlen >= UIO_SMALLIOV) {
665 		MALLOC(iov, struct iovec *,
666 		    sizeof(struct iovec) * msg.msg_iovlen, M_IOV, M_WAITOK);
667 	} else {
668 		iov = aiov;
669 	}
670 	error = copyin(msg.msg_iov, iov,
671 	    msg.msg_iovlen * sizeof(struct iovec));
672 	if (error)
673 		goto cleanup;
674 	msg.msg_iov = iov;
675 
676 	/*
677 	 * Conditionally copyin msg.msg_control.
678 	 */
679 	if (msg.msg_control) {
680 		if (msg.msg_controllen < sizeof(struct cmsghdr)) {
681 			error = EINVAL;
682 			goto cleanup;
683 		}
684 		error = sockargs(&control, msg.msg_control,
685 		    msg.msg_controllen, MT_CONTROL);
686 		if (error)
687 			goto cleanup;
688 		msg.msg_control = control;
689 	}
690 
691 	/* Don't forget the flags. */
692 	msg.msg_flags = uap->flags;
693 
694 	error = kern_sendmsg(uap->s, &msg, &uap->sysmsg_result);
695 
696 cleanup:
697 	if (sa)
698 		FREE(sa, M_SONAME);
699 	if (iov != aiov)
700 		FREE(iov, M_IOV);
701 	return (error);
702 }
703 
704 /*
705  * If mp->msg_namelen is non-zero, then when we return mp->msg_name is
706  * a pointer to the address that we recieved from.  If it is zero,
707  * the address is freed before return.  Don't forget to FREE()
708  * mp->msg_name if mp->msg_namelen is non-zero.
709  */
710 int
711 kern_recvmsg(int s, struct msghdr *mp, int *res)
712 {
713 	struct thread *td = curthread;
714 	struct proc *p = td->td_proc;
715 	struct file *fp;
716 	struct uio auio;
717 	struct iovec *iov;
718 	int i;
719 	int len, error;
720 	struct socket *so;
721 #ifdef KTRACE
722 	struct iovec *ktriov = NULL;
723 	struct uio ktruio;
724 #endif
725 
726 	error = holdsock(p->p_fd, s, &fp);
727 	if (error)
728 		return (error);
729 	auio.uio_iov = mp->msg_iov;
730 	auio.uio_iovcnt = mp->msg_iovlen;
731 	auio.uio_segflg = UIO_USERSPACE;
732 	auio.uio_rw = UIO_READ;
733 	auio.uio_td = td;
734 	auio.uio_offset = 0;			/* XXX */
735 	auio.uio_resid = 0;
736 	iov = mp->msg_iov;
737 	for (i = 0; i < mp->msg_iovlen; i++, iov++) {
738 		if ((auio.uio_resid += iov->iov_len) < 0) {
739 			fdrop(fp, td);
740 			return (EINVAL);
741 		}
742 	}
743 #ifdef KTRACE
744 	if (KTRPOINT(td, KTR_GENIO)) {
745 		int iovlen = auio.uio_iovcnt * sizeof (struct iovec);
746 
747 		MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
748 		bcopy(auio.uio_iov, ktriov, iovlen);
749 		ktruio = auio;
750 	}
751 #endif
752 	len = auio.uio_resid;
753 	so = (struct socket *)fp->f_data;
754 	error = so->so_proto->pr_usrreqs->pru_soreceive(so,
755 	    mp->msg_namelen ? (struct sockaddr **)&mp->msg_name : NULL,
756 	    &auio, NULL,
757 	    mp->msg_controllen ? (struct mbuf **)&mp->msg_control : NULL,
758 	    &mp->msg_flags);
759 	if (error) {
760 		if (auio.uio_resid != len && (error == ERESTART ||
761 		    error == EINTR || error == EWOULDBLOCK))
762 			error = 0;
763 	}
764 #ifdef KTRACE
765 	if (ktriov != NULL) {
766 		if (error == 0) {
767 			ktruio.uio_iov = ktriov;
768 			ktruio.uio_resid = len - auio.uio_resid;
769 			ktrgenio(p->p_tracep, s, UIO_READ, &ktruio, error);
770 		}
771 		FREE(ktriov, M_TEMP);
772 	}
773 #endif
774 	if (error == 0)
775 		*res = len - auio.uio_resid;
776 	fdrop(fp, td);
777 	return (error);
778 }
779 
780 /*
781  * recvfrom_args(int s, caddr_t buf, size_t len, int flags,
782  *			caddr_t from, int *fromlenaddr)
783  */
784 int
785 recvfrom(struct recvfrom_args *uap)
786 {
787 	struct msghdr msg;
788 	struct iovec aiov;
789 	int error, fromlen;
790 
791 	if (uap->fromlenaddr) {
792 		error = copyin(uap->fromlenaddr, &fromlen, sizeof(fromlen));
793 		if (error)
794 			return (error);
795 	} else {
796 		fromlen = 0;
797 	}
798 
799 	msg.msg_name = NULL;
800 	msg.msg_namelen = fromlen;
801 	msg.msg_iov = &aiov;
802 	msg.msg_iovlen = 1;
803 	msg.msg_control = NULL;
804 	msg.msg_flags = uap->flags;
805 	aiov.iov_base = uap->buf;
806 	aiov.iov_len = uap->len;
807 
808 	error = kern_recvmsg(uap->s, &msg, &uap->sysmsg_result);
809 
810 	fromlen = MIN(msg.msg_namelen, fromlen);
811 	if (uap->from) {
812 		if (error == 0)
813 			error = copyout(msg.msg_name, uap->from, fromlen);
814 		if (error == 0)
815 			error = copyout(&fromlen, uap->fromlenaddr,
816 			    sizeof(fromlen));
817 	}
818 	if (msg.msg_name)
819 		FREE(msg.msg_name, M_SONAME);
820 
821 	return (error);
822 }
823 
824 /*
825  * recvmsg_args(int s, struct msghdr *msg, int flags)
826  */
827 int
828 recvmsg(struct recvmsg_args *uap)
829 {
830 	struct msghdr msg;
831 	struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
832 	struct mbuf *m, *ucontrol;
833 	struct sockaddr *uname;
834 	caddr_t ctlbuf;
835 	socklen_t *unamelenp, *ucontrollenp;
836 	int error, fromlen, len;
837 
838 	/*
839 	 * This copyin handles everything except the iovec.
840 	 */
841 	error = copyin(uap->msg, &msg, sizeof(msg));
842 	if (error)
843 		return (error);
844 
845 	/*
846 	 * Save some userland pointers for the copyouts.
847 	 */
848 	uname = msg.msg_name;
849 	unamelenp = (socklen_t *)((caddr_t)uap->msg + offsetof(struct msghdr,
850 	    msg_namelen));
851 	ucontrol = msg.msg_control;
852 	ucontrollenp = (socklen_t *)((caddr_t)uap->msg + offsetof(struct msghdr,
853 	    msg_controllen));
854 
855 	fromlen = msg.msg_namelen;
856 
857 	/*
858 	 * Copyin msg.msg_iov.
859 	 */
860 	if (msg.msg_iovlen >= UIO_MAXIOV)
861 		return (EMSGSIZE);
862 	if (msg.msg_iovlen >= UIO_SMALLIOV) {
863 		MALLOC(iov, struct iovec *,
864 		    sizeof(struct iovec) * msg.msg_iovlen, M_IOV, M_WAITOK);
865 	} else {
866 		iov = aiov;
867 	}
868 	error = copyin(msg.msg_iov, iov, msg.msg_iovlen * sizeof(struct iovec));
869 	if (error)
870 		goto cleanup;
871 	msg.msg_iov = iov;
872 
873 	/* Don't forget the flags. */
874 	msg.msg_flags = uap->flags;
875 
876 	error = kern_recvmsg(uap->s, &msg, &uap->sysmsg_result);
877 
878 	/*
879 	 * Copyout msg.msg_name and msg.msg_namelen.
880 	 */
881 	if (error == 0 && uname) {
882 		fromlen = MIN(msg.msg_namelen, fromlen);
883 		error = copyout(msg.msg_name, uname, fromlen);
884 		if (error == 0)
885 			error = copyout(&fromlen, unamelenp,
886 			    sizeof(*unamelenp));
887 	}
888 
889 	/*
890 	 * Copyout msg.msg_control and msg.msg_controllen.
891 	 */
892 	if (error == 0 && ucontrol) {
893 		len = msg.msg_controllen;
894 		msg.msg_controllen = 0;
895 		m = msg.msg_control;
896 		ctlbuf = (caddr_t)ucontrol;
897 
898 		while(m && len > 0) {
899 			unsigned int tocopy;
900 
901 			if (len >= m->m_len) {
902 				tocopy = m->m_len;
903 			} else {
904 				msg.msg_flags |= MSG_CTRUNC;
905 				tocopy = len;
906 			}
907 
908 			error = copyout(mtod(m, caddr_t), ctlbuf, tocopy);
909 			if (error)
910 				goto cleanup;
911 
912 			ctlbuf += tocopy;
913 			len -= tocopy;
914 			m = m->m_next;
915 		}
916 		msg.msg_controllen = ctlbuf - (caddr_t)ucontrol;
917 		error = copyout(&msg.msg_controllen, ucontrollenp,
918 		    sizeof(*ucontrollenp));
919 	}
920 
921 cleanup:
922 	if (msg.msg_name)
923 		FREE(msg.msg_name, M_SONAME);
924 	if (iov != aiov)
925 		FREE(iov, M_IOV);
926 	if (msg.msg_control)
927 		m_freem(msg.msg_control);
928 	return (error);
929 }
930 
931 /*
932  * shutdown_args(int s, int how)
933  */
934 /* ARGSUSED */
935 int
936 shutdown(struct shutdown_args *uap)
937 {
938 	struct thread *td = curthread;
939 	struct proc *p = td->td_proc;
940 	struct file *fp;
941 	int error;
942 
943 	KKASSERT(p);
944 	error = holdsock(p->p_fd, uap->s, &fp);
945 	if (error)
946 		return (error);
947 	error = soshutdown((struct socket *)fp->f_data, uap->how);
948 	fdrop(fp, td);
949 	return(error);
950 }
951 
952 /*
953  * If sopt->sopt_td == NULL, then sopt->sopt_val is treated as an
954  * in kernel pointer instead of a userland pointer.  This allows us
955  * to manipulate socket options in the emulation code.
956  */
957 int
958 kern_setsockopt(int s, struct sockopt *sopt)
959 {
960 	struct thread *td = curthread;
961 	struct proc *p = td->td_proc;
962 	struct file *fp;
963 	int error;
964 
965 	if (sopt->sopt_val == 0 && sopt->sopt_valsize != 0)
966 		return (EFAULT);
967 	if (sopt->sopt_valsize < 0)
968 		return (EINVAL);
969 
970 	error = holdsock(p->p_fd, s, &fp);
971 	if (error)
972 		return (error);
973 
974 	error = sosetopt((struct socket *)fp->f_data, sopt);
975 	fdrop(fp, td);
976 	return (error);
977 }
978 
979 /*
980  * setsockopt_args(int s, int level, int name, caddr_t val, int valsize)
981  */
982 int
983 setsockopt(struct setsockopt_args *uap)
984 {
985 	struct thread *td = curthread;
986 	struct sockopt sopt;
987 	int error;
988 
989 	sopt.sopt_dir = SOPT_SET;
990 	sopt.sopt_level = uap->level;
991 	sopt.sopt_name = uap->name;
992 	sopt.sopt_val = uap->val;
993 	sopt.sopt_valsize = uap->valsize;
994 	sopt.sopt_td = td;
995 
996 	error = kern_setsockopt(uap->s, &sopt);
997 	return(error);
998 }
999 
1000 /*
1001  * If sopt->sopt_td == NULL, then sopt->sopt_val is treated as an
1002  * in kernel pointer instead of a userland pointer.  This allows us
1003  * to manipulate socket options in the emulation code.
1004  */
1005 int
1006 kern_getsockopt(int s, struct sockopt *sopt)
1007 {
1008 	struct thread *td = curthread;
1009 	struct proc *p = td->td_proc;
1010 	struct file *fp;
1011 	int error;
1012 
1013 	if (sopt->sopt_val == 0 && sopt->sopt_valsize != 0)
1014 		return (EFAULT);
1015 	if (sopt->sopt_valsize < 0)
1016 		return (EINVAL);
1017 
1018 	error = holdsock(p->p_fd, s, &fp);
1019 	if (error)
1020 		return (error);
1021 
1022 	error = sogetopt((struct socket *)fp->f_data, sopt);
1023 	fdrop(fp, td);
1024 	return (error);
1025 }
1026 
1027 /*
1028  * getsockopt_Args(int s, int level, int name, caddr_t val, int *avalsize)
1029  */
1030 int
1031 getsockopt(struct getsockopt_args *uap)
1032 {
1033 	struct thread *td = curthread;
1034 	struct	sockopt sopt;
1035 	int	error, valsize;
1036 
1037 	if (uap->val) {
1038 		error = copyin(uap->avalsize, &valsize, sizeof(valsize));
1039 		if (error)
1040 			return (error);
1041 		if (valsize < 0)
1042 			return (EINVAL);
1043 	} else {
1044 		valsize = 0;
1045 	}
1046 
1047 	sopt.sopt_dir = SOPT_GET;
1048 	sopt.sopt_level = uap->level;
1049 	sopt.sopt_name = uap->name;
1050 	sopt.sopt_val = uap->val;
1051 	sopt.sopt_valsize = valsize;
1052 	sopt.sopt_td = td;
1053 
1054 	error = kern_getsockopt(uap->s, &sopt);
1055 	if (error == 0) {
1056 		valsize = sopt.sopt_valsize;
1057 		error = copyout(&valsize, uap->avalsize, sizeof(valsize));
1058 	}
1059 	return (error);
1060 }
1061 
1062 /*
1063  * The second argument to kern_getsockname() is a handle to a struct sockaddr.
1064  * This allows kern_getsockname() to return a pointer to an allocated struct
1065  * sockaddr which must be freed later with FREE().  The caller must
1066  * initialize *name to NULL.
1067  */
1068 int
1069 kern_getsockname(int s, struct sockaddr **name, int *namelen)
1070 {
1071 	struct thread *td = curthread;
1072 	struct proc *p = td->td_proc;
1073 	struct file *fp;
1074 	struct socket *so;
1075 	struct sockaddr *sa = NULL;
1076 	int error;
1077 
1078 	error = holdsock(p->p_fd, s, &fp);
1079 	if (error)
1080 		return (error);
1081 	if (*namelen < 0) {
1082 		fdrop(fp, td);
1083 		return (EINVAL);
1084 	}
1085 	so = (struct socket *)fp->f_data;
1086 	error = (*so->so_proto->pr_usrreqs->pru_sockaddr)(so, &sa);
1087 	if (error == 0) {
1088 		if (sa == 0) {
1089 			*namelen = 0;
1090 		} else {
1091 			*namelen = MIN(*namelen, sa->sa_len);
1092 			*name = sa;
1093 		}
1094 	}
1095 
1096 	fdrop(fp, td);
1097 	return (error);
1098 }
1099 
1100 /*
1101  * getsockname_args(int fdes, caddr_t asa, int *alen)
1102  *
1103  * Get socket name.
1104  */
1105 int
1106 getsockname(struct getsockname_args *uap)
1107 {
1108 	struct sockaddr *sa = NULL;
1109 	int error, sa_len;
1110 
1111 	error = copyin(uap->alen, &sa_len, sizeof(sa_len));
1112 	if (error)
1113 		return (error);
1114 
1115 	error = kern_getsockname(uap->fdes, &sa, &sa_len);
1116 
1117 	if (error == 0)
1118 		error = copyout(sa, uap->asa, sa_len);
1119 	if (error == 0)
1120 		error = copyout(&sa_len, uap->alen, sizeof(*uap->alen));
1121 	if (sa)
1122 		FREE(sa, M_SONAME);
1123 	return (error);
1124 }
1125 
1126 /*
1127  * The second argument to kern_getpeername() is a handle to a struct sockaddr.
1128  * This allows kern_getpeername() to return a pointer to an allocated struct
1129  * sockaddr which must be freed later with FREE().  The caller must
1130  * initialize *name to NULL.
1131  */
1132 int
1133 kern_getpeername(int s, struct sockaddr **name, int *namelen)
1134 {
1135 	struct thread *td = curthread;
1136 	struct proc *p = td->td_proc;
1137 	struct file *fp;
1138 	struct socket *so;
1139 	struct sockaddr *sa = NULL;
1140 	int error;
1141 
1142 	error = holdsock(p->p_fd, s, &fp);
1143 	if (error)
1144 		return (error);
1145 	if (*namelen < 0) {
1146 		fdrop(fp, td);
1147 		return (EINVAL);
1148 	}
1149 	so = (struct socket *)fp->f_data;
1150 	if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) {
1151 		fdrop(fp, td);
1152 		return (ENOTCONN);
1153 	}
1154 	error = (*so->so_proto->pr_usrreqs->pru_peeraddr)(so, &sa);
1155 	if (error == 0) {
1156 		if (sa == 0) {
1157 			*namelen = 0;
1158 		} else {
1159 			*namelen = MIN(*namelen, sa->sa_len);
1160 			*name = sa;
1161 		}
1162 	}
1163 
1164 	fdrop(fp, td);
1165 	return (error);
1166 }
1167 
1168 /*
1169  * getpeername_args(int fdes, caddr_t asa, int *alen)
1170  *
1171  * Get name of peer for connected socket.
1172  */
1173 int
1174 getpeername(struct getpeername_args *uap)
1175 {
1176 	struct sockaddr *sa = NULL;
1177 	int error, sa_len;
1178 
1179 	error = copyin(uap->alen, &sa_len, sizeof(sa_len));
1180 	if (error)
1181 		return (error);
1182 
1183 	error = kern_getpeername(uap->fdes, &sa, &sa_len);
1184 
1185 	if (error == 0)
1186 		error = copyout(sa, uap->asa, sa_len);
1187 	if (error == 0)
1188 		error = copyout(&sa_len, uap->alen, sizeof(*uap->alen));
1189 	if (sa)
1190 		FREE(sa, M_SONAME);
1191 	return (error);
1192 }
1193 
1194 int
1195 sockargs(mp, buf, buflen, type)
1196 	struct mbuf **mp;
1197 	caddr_t buf;
1198 	int buflen, type;
1199 {
1200 	struct sockaddr *sa;
1201 	struct mbuf *m;
1202 	int error;
1203 
1204 	if ((u_int)buflen > MLEN) {
1205 #ifdef COMPAT_OLDSOCK
1206 		if (type == MT_SONAME && (u_int)buflen <= 112)
1207 			buflen = MLEN;		/* unix domain compat. hack */
1208 		else
1209 #endif
1210 		return (EINVAL);
1211 	}
1212 	m = m_get(M_WAIT, type);
1213 	if (m == NULL)
1214 		return (ENOBUFS);
1215 	m->m_len = buflen;
1216 	error = copyin(buf, mtod(m, caddr_t), (u_int)buflen);
1217 	if (error)
1218 		(void) m_free(m);
1219 	else {
1220 		*mp = m;
1221 		if (type == MT_SONAME) {
1222 			sa = mtod(m, struct sockaddr *);
1223 
1224 #if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN
1225 			if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
1226 				sa->sa_family = sa->sa_len;
1227 #endif
1228 			sa->sa_len = buflen;
1229 		}
1230 	}
1231 	return (error);
1232 }
1233 
1234 int
1235 getsockaddr(struct sockaddr **namp, caddr_t uaddr, size_t len)
1236 {
1237 	struct sockaddr *sa;
1238 	int error;
1239 
1240 	*namp = NULL;
1241 	if (len > SOCK_MAXADDRLEN)
1242 		return ENAMETOOLONG;
1243 	if (len < offsetof(struct sockaddr, sa_data[0]))
1244 		return EDOM;
1245 	MALLOC(sa, struct sockaddr *, len, M_SONAME, M_WAITOK);
1246 	error = copyin(uaddr, sa, len);
1247 	if (error) {
1248 		FREE(sa, M_SONAME);
1249 	} else {
1250 #if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN
1251 		if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
1252 			sa->sa_family = sa->sa_len;
1253 #endif
1254 		sa->sa_len = len;
1255 		*namp = sa;
1256 	}
1257 	return error;
1258 }
1259 
1260 /*
1261  * holdsock() - load the struct file pointer associated
1262  * with a socket into *fpp.  If an error occurs, non-zero
1263  * will be returned and *fpp will be set to NULL.
1264  */
1265 int
1266 holdsock(fdp, fdes, fpp)
1267 	struct filedesc *fdp;
1268 	int fdes;
1269 	struct file **fpp;
1270 {
1271 	struct file *fp = NULL;
1272 	int error = 0;
1273 
1274 	if ((unsigned)fdes >= fdp->fd_nfiles ||
1275 	    (fp = fdp->fd_ofiles[fdes]) == NULL) {
1276 		error = EBADF;
1277 	} else if (fp->f_type != DTYPE_SOCKET) {
1278 		error = ENOTSOCK;
1279 		fp = NULL;
1280 	} else {
1281 		fhold(fp);
1282 	}
1283 	*fpp = fp;
1284 	return(error);
1285 }
1286 
1287 /*
1288  * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
1289  */
1290 static void
1291 sf_buf_init(void *arg)
1292 {
1293 	int i;
1294 
1295 	SLIST_INIT(&sf_freelist);
1296 	sf_base = kmem_alloc_pageable(kernel_map, nsfbufs * PAGE_SIZE);
1297 	sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP, M_NOWAIT);
1298 	bzero(sf_bufs, nsfbufs * sizeof(struct sf_buf));
1299 	for (i = 0; i < nsfbufs; i++) {
1300 		sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
1301 		SLIST_INSERT_HEAD(&sf_freelist, &sf_bufs[i], free_list);
1302 	}
1303 }
1304 
1305 /*
1306  * Get an sf_buf from the freelist. Will block if none are available.
1307  */
1308 struct sf_buf *
1309 sf_buf_alloc()
1310 {
1311 	struct sf_buf *sf;
1312 	int s;
1313 	int error;
1314 
1315 	s = splimp();
1316 	while ((sf = SLIST_FIRST(&sf_freelist)) == NULL) {
1317 		sf_buf_alloc_want = 1;
1318 		error = tsleep(&sf_freelist, PCATCH, "sfbufa", 0);
1319 		if (error)
1320 			break;
1321 	}
1322 	if (sf != NULL) {
1323 		SLIST_REMOVE_HEAD(&sf_freelist, free_list);
1324 		sf->refcnt = 1;
1325 	}
1326 	splx(s);
1327 	return (sf);
1328 }
1329 
1330 #define dtosf(x)	(&sf_bufs[((uintptr_t)(x) - (uintptr_t)sf_base) >> PAGE_SHIFT])
1331 void
1332 sf_buf_ref(caddr_t addr, u_int size)
1333 {
1334 	struct sf_buf *sf;
1335 
1336 	sf = dtosf(addr);
1337 	if (sf->refcnt == 0)
1338 		panic("sf_buf_ref: referencing a free sf_buf");
1339 	sf->refcnt++;
1340 }
1341 
1342 /*
1343  * Lose a reference to an sf_buf. When none left, detach mapped page
1344  * and release resources back to the system.
1345  *
1346  * Must be called at splimp.
1347  */
1348 void
1349 sf_buf_free(caddr_t addr, u_int size)
1350 {
1351 	struct sf_buf *sf;
1352 	struct vm_page *m;
1353 	int s;
1354 
1355 	sf = dtosf(addr);
1356 	if (sf->refcnt == 0)
1357 		panic("sf_buf_free: freeing free sf_buf");
1358 	sf->refcnt--;
1359 	if (sf->refcnt == 0) {
1360 		pmap_qremove((vm_offset_t)addr, 1);
1361 		m = sf->m;
1362 		s = splvm();
1363 		vm_page_unwire(m, 0);
1364 		/*
1365 		 * Check for the object going away on us. This can
1366 		 * happen since we don't hold a reference to it.
1367 		 * If so, we're responsible for freeing the page.
1368 		 */
1369 		if (m->wire_count == 0 && m->object == NULL)
1370 			vm_page_free(m);
1371 		splx(s);
1372 		sf->m = NULL;
1373 		SLIST_INSERT_HEAD(&sf_freelist, sf, free_list);
1374 		if (sf_buf_alloc_want) {
1375 			sf_buf_alloc_want = 0;
1376 			wakeup(&sf_freelist);
1377 		}
1378 	}
1379 }
1380 
1381 /*
1382  * sendfile(2).
1383  * int sendfile(int fd, int s, off_t offset, size_t nbytes,
1384  *	 struct sf_hdtr *hdtr, off_t *sbytes, int flags)
1385  *
1386  * Send a file specified by 'fd' and starting at 'offset' to a socket
1387  * specified by 's'. Send only 'nbytes' of the file or until EOF if
1388  * nbytes == 0. Optionally add a header and/or trailer to the socket
1389  * output. If specified, write the total number of bytes sent into *sbytes.
1390  */
1391 int
1392 sendfile(struct sendfile_args *uap)
1393 {
1394 	return (do_sendfile(uap, 0));
1395 }
1396 
1397 #ifdef COMPAT_43
1398 int
1399 osendfile(struct osendfile_args *uap)
1400 {
1401 	struct sendfile_args args;
1402 
1403 	args.fd = uap->fd;
1404 	args.s = uap->s;
1405 	args.offset = uap->offset;
1406 	args.nbytes = uap->nbytes;
1407 	args.hdtr = uap->hdtr;
1408 	args.sbytes = uap->sbytes;
1409 	args.flags = uap->flags;
1410 
1411 	return (do_sendfile(&args, 1));
1412 }
1413 #endif
1414 
1415 int
1416 do_sendfile(struct sendfile_args *uap, int compat)
1417 {
1418 	struct thread *td = curthread;
1419 	struct proc *p = td->td_proc;
1420 	struct file *fp;
1421 	struct filedesc *fdp;
1422 	struct vnode *vp;
1423 	struct vm_object *obj;
1424 	struct socket *so;
1425 	struct mbuf *m;
1426 	struct sf_buf *sf;
1427 	struct vm_page *pg;
1428 	struct writev_args nuap;
1429 	struct sf_hdtr hdtr;
1430 	off_t off, xfsize, hdtr_size, sbytes = 0;
1431 	int error = 0, s;
1432 
1433 	KKASSERT(p);
1434 	fdp = p->p_fd;
1435 
1436 	vp = NULL;
1437 	hdtr_size = 0;
1438 	/*
1439 	 * Do argument checking. Must be a regular file in, stream
1440 	 * type and connected socket out, positive offset.
1441 	 */
1442 	fp = holdfp(fdp, uap->fd, FREAD);
1443 	if (fp == NULL) {
1444 		error = EBADF;
1445 		goto done;
1446 	}
1447 	if (fp->f_type != DTYPE_VNODE) {
1448 		error = EINVAL;
1449 		goto done;
1450 	}
1451 	vp = (struct vnode *)fp->f_data;
1452 	vref(vp);
1453 	if (vp->v_type != VREG || VOP_GETVOBJECT(vp, &obj) != 0) {
1454 		error = EINVAL;
1455 		goto done;
1456 	}
1457 	fdrop(fp, td);
1458 	error = holdsock(p->p_fd, uap->s, &fp);
1459 	if (error)
1460 		goto done;
1461 	so = (struct socket *)fp->f_data;
1462 	if (so->so_type != SOCK_STREAM) {
1463 		error = EINVAL;
1464 		goto done;
1465 	}
1466 	if ((so->so_state & SS_ISCONNECTED) == 0) {
1467 		error = ENOTCONN;
1468 		goto done;
1469 	}
1470 	if (uap->offset < 0) {
1471 		error = EINVAL;
1472 		goto done;
1473 	}
1474 
1475 	/*
1476 	 * If specified, get the pointer to the sf_hdtr struct for
1477 	 * any headers/trailers.
1478 	 */
1479 	if (uap->hdtr != NULL) {
1480 		error = copyin(uap->hdtr, &hdtr, sizeof(hdtr));
1481 		if (error)
1482 			goto done;
1483 		/*
1484 		 * Send any headers. Wimp out and use writev(2).
1485 		 */
1486 		if (hdtr.headers != NULL) {
1487 			nuap.fd = uap->s;
1488 			nuap.iovp = hdtr.headers;
1489 			nuap.iovcnt = hdtr.hdr_cnt;
1490 			error = writev(&nuap);
1491 			if (error)
1492 				goto done;
1493 			if (compat)
1494 				sbytes += nuap.sysmsg_result;
1495 			else
1496 				hdtr_size += nuap.sysmsg_result;
1497 		}
1498 	}
1499 
1500 	/*
1501 	 * Protect against multiple writers to the socket.
1502 	 */
1503 	(void) sblock(&so->so_snd, M_WAITOK);
1504 
1505 	/*
1506 	 * Loop through the pages in the file, starting with the requested
1507 	 * offset. Get a file page (do I/O if necessary), map the file page
1508 	 * into an sf_buf, attach an mbuf header to the sf_buf, and queue
1509 	 * it on the socket.
1510 	 */
1511 	for (off = uap->offset; ; off += xfsize, sbytes += xfsize) {
1512 		vm_pindex_t pindex;
1513 		vm_offset_t pgoff;
1514 
1515 		pindex = OFF_TO_IDX(off);
1516 retry_lookup:
1517 		/*
1518 		 * Calculate the amount to transfer. Not to exceed a page,
1519 		 * the EOF, or the passed in nbytes.
1520 		 */
1521 		xfsize = obj->un_pager.vnp.vnp_size - off;
1522 		if (xfsize > PAGE_SIZE)
1523 			xfsize = PAGE_SIZE;
1524 		pgoff = (vm_offset_t)(off & PAGE_MASK);
1525 		if (PAGE_SIZE - pgoff < xfsize)
1526 			xfsize = PAGE_SIZE - pgoff;
1527 		if (uap->nbytes && xfsize > (uap->nbytes - sbytes))
1528 			xfsize = uap->nbytes - sbytes;
1529 		if (xfsize <= 0)
1530 			break;
1531 		/*
1532 		 * Optimize the non-blocking case by looking at the socket space
1533 		 * before going to the extra work of constituting the sf_buf.
1534 		 */
1535 		if ((so->so_state & SS_NBIO) && sbspace(&so->so_snd) <= 0) {
1536 			if (so->so_state & SS_CANTSENDMORE)
1537 				error = EPIPE;
1538 			else
1539 				error = EAGAIN;
1540 			sbunlock(&so->so_snd);
1541 			goto done;
1542 		}
1543 		/*
1544 		 * Attempt to look up the page.
1545 		 *
1546 		 *	Allocate if not found
1547 		 *
1548 		 *	Wait and loop if busy.
1549 		 */
1550 		pg = vm_page_lookup(obj, pindex);
1551 
1552 		if (pg == NULL) {
1553 			pg = vm_page_alloc(obj, pindex, VM_ALLOC_NORMAL);
1554 			if (pg == NULL) {
1555 				VM_WAIT;
1556 				goto retry_lookup;
1557 			}
1558 			vm_page_wakeup(pg);
1559 		} else if (vm_page_sleep_busy(pg, TRUE, "sfpbsy")) {
1560 			goto retry_lookup;
1561 		}
1562 
1563 		/*
1564 		 * Wire the page so it does not get ripped out from under
1565 		 * us.
1566 		 */
1567 
1568 		vm_page_wire(pg);
1569 
1570 		/*
1571 		 * If page is not valid for what we need, initiate I/O
1572 		 */
1573 
1574 		if (!pg->valid || !vm_page_is_valid(pg, pgoff, xfsize)) {
1575 			struct uio auio;
1576 			struct iovec aiov;
1577 			int bsize;
1578 
1579 			/*
1580 			 * Ensure that our page is still around when the I/O
1581 			 * completes.
1582 			 */
1583 			vm_page_io_start(pg);
1584 
1585 			/*
1586 			 * Get the page from backing store.
1587 			 */
1588 			bsize = vp->v_mount->mnt_stat.f_iosize;
1589 			auio.uio_iov = &aiov;
1590 			auio.uio_iovcnt = 1;
1591 			aiov.iov_base = 0;
1592 			aiov.iov_len = MAXBSIZE;
1593 			auio.uio_resid = MAXBSIZE;
1594 			auio.uio_offset = trunc_page(off);
1595 			auio.uio_segflg = UIO_NOCOPY;
1596 			auio.uio_rw = UIO_READ;
1597 			auio.uio_td = td;
1598 			vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY, td);
1599 			error = VOP_READ(vp, &auio,
1600 				    IO_VMIO | ((MAXBSIZE / bsize) << 16),
1601 				    p->p_ucred);
1602 			VOP_UNLOCK(vp, 0, td);
1603 			vm_page_flag_clear(pg, PG_ZERO);
1604 			vm_page_io_finish(pg);
1605 			if (error) {
1606 				vm_page_unwire(pg, 0);
1607 				/*
1608 				 * See if anyone else might know about this page.
1609 				 * If not and it is not valid, then free it.
1610 				 */
1611 				if (pg->wire_count == 0 && pg->valid == 0 &&
1612 				    pg->busy == 0 && !(pg->flags & PG_BUSY) &&
1613 				    pg->hold_count == 0) {
1614 					vm_page_busy(pg);
1615 					vm_page_free(pg);
1616 				}
1617 				sbunlock(&so->so_snd);
1618 				goto done;
1619 			}
1620 		}
1621 
1622 
1623 		/*
1624 		 * Get a sendfile buf. We usually wait as long as necessary,
1625 		 * but this wait can be interrupted.
1626 		 */
1627 		if ((sf = sf_buf_alloc()) == NULL) {
1628 			s = splvm();
1629 			vm_page_unwire(pg, 0);
1630 			if (pg->wire_count == 0 && pg->object == NULL)
1631 				vm_page_free(pg);
1632 			splx(s);
1633 			sbunlock(&so->so_snd);
1634 			error = EINTR;
1635 			goto done;
1636 		}
1637 
1638 
1639 		/*
1640 		 * Allocate a kernel virtual page and insert the physical page
1641 		 * into it.
1642 		 */
1643 
1644 		sf->m = pg;
1645 		pmap_qenter(sf->kva, &pg, 1);
1646 		/*
1647 		 * Get an mbuf header and set it up as having external storage.
1648 		 */
1649 		MGETHDR(m, M_WAIT, MT_DATA);
1650 		if (m == NULL) {
1651 			error = ENOBUFS;
1652 			sf_buf_free((void *)sf->kva, PAGE_SIZE);
1653 			sbunlock(&so->so_snd);
1654 			goto done;
1655 		}
1656 		m->m_ext.ext_free = sf_buf_free;
1657 		m->m_ext.ext_ref = sf_buf_ref;
1658 		m->m_ext.ext_buf = (void *)sf->kva;
1659 		m->m_ext.ext_size = PAGE_SIZE;
1660 		m->m_data = (char *) sf->kva + pgoff;
1661 		m->m_flags |= M_EXT;
1662 		m->m_pkthdr.len = m->m_len = xfsize;
1663 		/*
1664 		 * Add the buffer to the socket buffer chain.
1665 		 */
1666 		s = splnet();
1667 retry_space:
1668 		/*
1669 		 * Make sure that the socket is still able to take more data.
1670 		 * CANTSENDMORE being true usually means that the connection
1671 		 * was closed. so_error is true when an error was sensed after
1672 		 * a previous send.
1673 		 * The state is checked after the page mapping and buffer
1674 		 * allocation above since those operations may block and make
1675 		 * any socket checks stale. From this point forward, nothing
1676 		 * blocks before the pru_send (or more accurately, any blocking
1677 		 * results in a loop back to here to re-check).
1678 		 */
1679 		if ((so->so_state & SS_CANTSENDMORE) || so->so_error) {
1680 			if (so->so_state & SS_CANTSENDMORE) {
1681 				error = EPIPE;
1682 			} else {
1683 				error = so->so_error;
1684 				so->so_error = 0;
1685 			}
1686 			m_freem(m);
1687 			sbunlock(&so->so_snd);
1688 			splx(s);
1689 			goto done;
1690 		}
1691 		/*
1692 		 * Wait for socket space to become available. We do this just
1693 		 * after checking the connection state above in order to avoid
1694 		 * a race condition with sbwait().
1695 		 */
1696 		if (sbspace(&so->so_snd) < so->so_snd.sb_lowat) {
1697 			if (so->so_state & SS_NBIO) {
1698 				m_freem(m);
1699 				sbunlock(&so->so_snd);
1700 				splx(s);
1701 				error = EAGAIN;
1702 				goto done;
1703 			}
1704 			error = sbwait(&so->so_snd);
1705 			/*
1706 			 * An error from sbwait usually indicates that we've
1707 			 * been interrupted by a signal. If we've sent anything
1708 			 * then return bytes sent, otherwise return the error.
1709 			 */
1710 			if (error) {
1711 				m_freem(m);
1712 				sbunlock(&so->so_snd);
1713 				splx(s);
1714 				goto done;
1715 			}
1716 			goto retry_space;
1717 		}
1718 		error =
1719 		    (*so->so_proto->pr_usrreqs->pru_send)(so, 0, m, 0, 0, td);
1720 		splx(s);
1721 		if (error) {
1722 			sbunlock(&so->so_snd);
1723 			goto done;
1724 		}
1725 	}
1726 	sbunlock(&so->so_snd);
1727 
1728 	/*
1729 	 * Send trailers. Wimp out and use writev(2).
1730 	 */
1731 	if (uap->hdtr != NULL && hdtr.trailers != NULL) {
1732 			nuap.fd = uap->s;
1733 			nuap.iovp = hdtr.trailers;
1734 			nuap.iovcnt = hdtr.trl_cnt;
1735 			error = writev(&nuap);
1736 			if (error)
1737 				goto done;
1738 			if (compat)
1739 				sbytes += nuap.sysmsg_result;
1740 			else
1741 				hdtr_size += nuap.sysmsg_result;
1742 	}
1743 
1744 done:
1745 	if (uap->sbytes != NULL) {
1746 		if (compat == 0)
1747 			sbytes += hdtr_size;
1748 		copyout(&sbytes, uap->sbytes, sizeof(off_t));
1749 	}
1750 	if (vp)
1751 		vrele(vp);
1752 	if (fp)
1753 		fdrop(fp, td);
1754 	return (error);
1755 }
1756