xref: /dragonfly/sys/kern/uipc_syscalls.c (revision cecb9aae)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1990, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * sendfile(2) and related extensions:
6  * Copyright (c) 1998, David Greenman. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)uipc_syscalls.c	8.4 (Berkeley) 2/21/94
37  * $FreeBSD: src/sys/kern/uipc_syscalls.c,v 1.65.2.17 2003/04/04 17:11:16 tegge Exp $
38  */
39 
40 #include "opt_ktrace.h"
41 #include "opt_sctp.h"
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/sysproto.h>
47 #include <sys/malloc.h>
48 #include <sys/filedesc.h>
49 #include <sys/event.h>
50 #include <sys/proc.h>
51 #include <sys/fcntl.h>
52 #include <sys/file.h>
53 #include <sys/filio.h>
54 #include <sys/kern_syscall.h>
55 #include <sys/mbuf.h>
56 #include <sys/protosw.h>
57 #include <sys/sfbuf.h>
58 #include <sys/socket.h>
59 #include <sys/socketvar.h>
60 #include <sys/socketops.h>
61 #include <sys/uio.h>
62 #include <sys/vnode.h>
63 #include <sys/lock.h>
64 #include <sys/mount.h>
65 #ifdef KTRACE
66 #include <sys/ktrace.h>
67 #endif
68 #include <vm/vm.h>
69 #include <vm/vm_object.h>
70 #include <vm/vm_page.h>
71 #include <vm/vm_pageout.h>
72 #include <vm/vm_kern.h>
73 #include <vm/vm_extern.h>
74 #include <sys/file2.h>
75 #include <sys/signalvar.h>
76 #include <sys/serialize.h>
77 
78 #include <sys/thread2.h>
79 #include <sys/msgport2.h>
80 #include <sys/socketvar2.h>
81 #include <net/netmsg2.h>
82 
83 #ifdef SCTP
84 #include <netinet/sctp_peeloff.h>
85 #endif /* SCTP */
86 
87 extern int use_soaccept_pred_fast;
88 extern int use_sendfile_async;
89 
90 /*
91  * System call interface to the socket abstraction.
92  */
93 
94 extern	struct fileops socketops;
95 
96 /*
97  * socket_args(int domain, int type, int protocol)
98  */
99 int
100 kern_socket(int domain, int type, int protocol, int *res)
101 {
102 	struct thread *td = curthread;
103 	struct filedesc *fdp = td->td_proc->p_fd;
104 	struct socket *so;
105 	struct file *fp;
106 	int fd, error;
107 
108 	KKASSERT(td->td_lwp);
109 
110 	error = falloc(td->td_lwp, &fp, &fd);
111 	if (error)
112 		return (error);
113 	error = socreate(domain, &so, type, protocol, td);
114 	if (error) {
115 		fsetfd(fdp, NULL, fd);
116 	} else {
117 		fp->f_type = DTYPE_SOCKET;
118 		fp->f_flag = FREAD | FWRITE;
119 		fp->f_ops = &socketops;
120 		fp->f_data = so;
121 		*res = fd;
122 		fsetfd(fdp, fp, fd);
123 	}
124 	fdrop(fp);
125 	return (error);
126 }
127 
128 /*
129  * MPALMOSTSAFE
130  */
131 int
132 sys_socket(struct socket_args *uap)
133 {
134 	int error;
135 
136 	error = kern_socket(uap->domain, uap->type, uap->protocol,
137 			    &uap->sysmsg_iresult);
138 
139 	return (error);
140 }
141 
142 int
143 kern_bind(int s, struct sockaddr *sa)
144 {
145 	struct thread *td = curthread;
146 	struct proc *p = td->td_proc;
147 	struct file *fp;
148 	int error;
149 
150 	KKASSERT(p);
151 	error = holdsock(p->p_fd, s, &fp);
152 	if (error)
153 		return (error);
154 	error = sobind((struct socket *)fp->f_data, sa, td);
155 	fdrop(fp);
156 	return (error);
157 }
158 
159 /*
160  * bind_args(int s, caddr_t name, int namelen)
161  *
162  * MPALMOSTSAFE
163  */
164 int
165 sys_bind(struct bind_args *uap)
166 {
167 	struct sockaddr *sa;
168 	int error;
169 
170 	error = getsockaddr(&sa, uap->name, uap->namelen);
171 	if (error)
172 		return (error);
173 	error = kern_bind(uap->s, sa);
174 	kfree(sa, M_SONAME);
175 
176 	return (error);
177 }
178 
179 int
180 kern_listen(int s, int backlog)
181 {
182 	struct thread *td = curthread;
183 	struct proc *p = td->td_proc;
184 	struct file *fp;
185 	int error;
186 
187 	KKASSERT(p);
188 	error = holdsock(p->p_fd, s, &fp);
189 	if (error)
190 		return (error);
191 	error = solisten((struct socket *)fp->f_data, backlog, td);
192 	fdrop(fp);
193 	return(error);
194 }
195 
196 /*
197  * listen_args(int s, int backlog)
198  *
199  * MPALMOSTSAFE
200  */
201 int
202 sys_listen(struct listen_args *uap)
203 {
204 	int error;
205 
206 	error = kern_listen(uap->s, uap->backlog);
207 	return (error);
208 }
209 
210 /*
211  * Returns the accepted socket as well.
212  *
213  * NOTE!  The sockets sitting on so_comp/so_incomp might have 0 refs, the
214  *	  pool token is absolutely required to avoid a sofree() race,
215  *	  as well as to avoid tailq handling races.
216  */
217 static boolean_t
218 soaccept_predicate(struct netmsg_so_notify *msg)
219 {
220 	struct socket *head = msg->base.nm_so;
221 	struct socket *so;
222 
223 	if (head->so_error != 0) {
224 		msg->base.lmsg.ms_error = head->so_error;
225 		return (TRUE);
226 	}
227 	lwkt_getpooltoken(head);
228 	if (!TAILQ_EMPTY(&head->so_comp)) {
229 		/* Abuse nm_so field as copy in/copy out parameter. XXX JH */
230 		so = TAILQ_FIRST(&head->so_comp);
231 		TAILQ_REMOVE(&head->so_comp, so, so_list);
232 		head->so_qlen--;
233 		soclrstate(so, SS_COMP);
234 		so->so_head = NULL;
235 		soreference(so);
236 
237 		lwkt_relpooltoken(head);
238 
239 		msg->base.lmsg.ms_error = 0;
240 		msg->base.nm_so = so;
241 		return (TRUE);
242 	}
243 	lwkt_relpooltoken(head);
244 	if (head->so_state & SS_CANTRCVMORE) {
245 		msg->base.lmsg.ms_error = ECONNABORTED;
246 		return (TRUE);
247 	}
248 	if (msg->nm_fflags & FNONBLOCK) {
249 		msg->base.lmsg.ms_error = EWOULDBLOCK;
250 		return (TRUE);
251 	}
252 
253 	return (FALSE);
254 }
255 
256 /*
257  * The second argument to kern_accept() is a handle to a struct sockaddr.
258  * This allows kern_accept() to return a pointer to an allocated struct
259  * sockaddr which must be freed later with FREE().  The caller must
260  * initialize *name to NULL.
261  */
262 int
263 kern_accept(int s, int fflags, struct sockaddr **name, int *namelen, int *res)
264 {
265 	struct thread *td = curthread;
266 	struct filedesc *fdp = td->td_proc->p_fd;
267 	struct file *lfp = NULL;
268 	struct file *nfp = NULL;
269 	struct sockaddr *sa;
270 	struct socket *head, *so;
271 	struct netmsg_so_notify msg;
272 	int fd;
273 	u_int fflag;		/* type must match fp->f_flag */
274 	int error, tmp;
275 
276 	*res = -1;
277 	if (name && namelen && *namelen < 0)
278 		return (EINVAL);
279 
280 	error = holdsock(td->td_proc->p_fd, s, &lfp);
281 	if (error)
282 		return (error);
283 
284 	error = falloc(td->td_lwp, &nfp, &fd);
285 	if (error) {		/* Probably ran out of file descriptors. */
286 		fdrop(lfp);
287 		return (error);
288 	}
289 	head = (struct socket *)lfp->f_data;
290 	if ((head->so_options & SO_ACCEPTCONN) == 0) {
291 		error = EINVAL;
292 		goto done;
293 	}
294 
295 	if (fflags & O_FBLOCKING)
296 		fflags |= lfp->f_flag & ~FNONBLOCK;
297 	else if (fflags & O_FNONBLOCKING)
298 		fflags |= lfp->f_flag | FNONBLOCK;
299 	else
300 		fflags = lfp->f_flag;
301 
302 	if (use_soaccept_pred_fast) {
303 		boolean_t pred;
304 
305 		/* Initialize necessary parts for soaccept_predicate() */
306 		netmsg_init(&msg.base, head, &netisr_apanic_rport, 0, NULL);
307 		msg.nm_fflags = fflags;
308 
309 		lwkt_getpooltoken(head);
310 		pred = soaccept_predicate(&msg);
311 		lwkt_relpooltoken(head);
312 
313 		if (pred) {
314 			error = msg.base.lmsg.ms_error;
315 			if (error)
316 				goto done;
317 			else
318 				goto accepted;
319 		}
320 	}
321 
322 	/* optimize for uniprocessor case later XXX JH */
323 	netmsg_init_abortable(&msg.base, head, &curthread->td_msgport,
324 			      0, netmsg_so_notify, netmsg_so_notify_doabort);
325 	msg.nm_predicate = soaccept_predicate;
326 	msg.nm_fflags = fflags;
327 	msg.nm_etype = NM_REVENT;
328 	error = lwkt_domsg(head->so_port, &msg.base.lmsg, PCATCH);
329 	if (error)
330 		goto done;
331 
332 accepted:
333 	/*
334 	 * At this point we have the connection that's ready to be accepted.
335 	 *
336 	 * NOTE! soaccept_predicate() ref'd so for us, and soaccept() expects
337 	 * 	 to eat the ref and turn it into a descriptor.
338 	 */
339 	so = msg.base.nm_so;
340 
341 	fflag = lfp->f_flag;
342 
343 	/* connection has been removed from the listen queue */
344 	KNOTE(&head->so_rcv.ssb_kq.ki_note, 0);
345 
346 	if (head->so_sigio != NULL)
347 		fsetown(fgetown(&head->so_sigio), &so->so_sigio);
348 
349 	nfp->f_type = DTYPE_SOCKET;
350 	nfp->f_flag = fflag;
351 	nfp->f_ops = &socketops;
352 	nfp->f_data = so;
353 	/* Sync socket nonblocking/async state with file flags */
354 	tmp = fflag & FNONBLOCK;
355 	fo_ioctl(nfp, FIONBIO, (caddr_t)&tmp, td->td_ucred, NULL);
356 	tmp = fflag & FASYNC;
357 	fo_ioctl(nfp, FIOASYNC, (caddr_t)&tmp, td->td_ucred, NULL);
358 
359 	sa = NULL;
360 	if (so->so_faddr != NULL) {
361 		sa = so->so_faddr;
362 		so->so_faddr = NULL;
363 
364 		soaccept_generic(so);
365 		error = 0;
366 	} else {
367 		error = soaccept(so, &sa);
368 	}
369 
370 	/*
371 	 * Set the returned name and namelen as applicable.  Set the returned
372 	 * namelen to 0 for older code which might ignore the return value
373 	 * from accept.
374 	 */
375 	if (error == 0) {
376 		if (sa && name && namelen) {
377 			if (*namelen > sa->sa_len)
378 				*namelen = sa->sa_len;
379 			*name = sa;
380 		} else {
381 			if (sa)
382 				kfree(sa, M_SONAME);
383 		}
384 	}
385 
386 done:
387 	/*
388 	 * If an error occured clear the reserved descriptor, else associate
389 	 * nfp with it.
390 	 *
391 	 * Note that *res is normally ignored if an error is returned but
392 	 * a syscall message will still have access to the result code.
393 	 */
394 	if (error) {
395 		fsetfd(fdp, NULL, fd);
396 	} else {
397 		*res = fd;
398 		fsetfd(fdp, nfp, fd);
399 	}
400 	fdrop(nfp);
401 	fdrop(lfp);
402 	return (error);
403 }
404 
405 /*
406  * accept(int s, caddr_t name, int *anamelen)
407  *
408  * MPALMOSTSAFE
409  */
410 int
411 sys_accept(struct accept_args *uap)
412 {
413 	struct sockaddr *sa = NULL;
414 	int sa_len;
415 	int error;
416 
417 	if (uap->name) {
418 		error = copyin(uap->anamelen, &sa_len, sizeof(sa_len));
419 		if (error)
420 			return (error);
421 
422 		error = kern_accept(uap->s, 0, &sa, &sa_len,
423 				    &uap->sysmsg_iresult);
424 
425 		if (error == 0)
426 			error = copyout(sa, uap->name, sa_len);
427 		if (error == 0) {
428 			error = copyout(&sa_len, uap->anamelen,
429 			    sizeof(*uap->anamelen));
430 		}
431 		if (sa)
432 			kfree(sa, M_SONAME);
433 	} else {
434 		error = kern_accept(uap->s, 0, NULL, 0,
435 				    &uap->sysmsg_iresult);
436 	}
437 	return (error);
438 }
439 
440 /*
441  * extaccept(int s, int fflags, caddr_t name, int *anamelen)
442  *
443  * MPALMOSTSAFE
444  */
445 int
446 sys_extaccept(struct extaccept_args *uap)
447 {
448 	struct sockaddr *sa = NULL;
449 	int sa_len;
450 	int error;
451 	int fflags = uap->flags & O_FMASK;
452 
453 	if (uap->name) {
454 		error = copyin(uap->anamelen, &sa_len, sizeof(sa_len));
455 		if (error)
456 			return (error);
457 
458 		error = kern_accept(uap->s, fflags, &sa, &sa_len,
459 				    &uap->sysmsg_iresult);
460 
461 		if (error == 0)
462 			error = copyout(sa, uap->name, sa_len);
463 		if (error == 0) {
464 			error = copyout(&sa_len, uap->anamelen,
465 			    sizeof(*uap->anamelen));
466 		}
467 		if (sa)
468 			kfree(sa, M_SONAME);
469 	} else {
470 		error = kern_accept(uap->s, fflags, NULL, 0,
471 				    &uap->sysmsg_iresult);
472 	}
473 	return (error);
474 }
475 
476 
477 /*
478  * Returns TRUE if predicate satisfied.
479  */
480 static boolean_t
481 soconnected_predicate(struct netmsg_so_notify *msg)
482 {
483 	struct socket *so = msg->base.nm_so;
484 
485 	/* check predicate */
486 	if (!(so->so_state & SS_ISCONNECTING) || so->so_error != 0) {
487 		msg->base.lmsg.ms_error = so->so_error;
488 		return (TRUE);
489 	}
490 
491 	return (FALSE);
492 }
493 
494 int
495 kern_connect(int s, int fflags, struct sockaddr *sa)
496 {
497 	struct thread *td = curthread;
498 	struct proc *p = td->td_proc;
499 	struct file *fp;
500 	struct socket *so;
501 	int error, interrupted = 0;
502 
503 	error = holdsock(p->p_fd, s, &fp);
504 	if (error)
505 		return (error);
506 	so = (struct socket *)fp->f_data;
507 
508 	if (fflags & O_FBLOCKING)
509 		/* fflags &= ~FNONBLOCK; */;
510 	else if (fflags & O_FNONBLOCKING)
511 		fflags |= FNONBLOCK;
512 	else
513 		fflags = fp->f_flag;
514 
515 	if (so->so_state & SS_ISCONNECTING) {
516 		error = EALREADY;
517 		goto done;
518 	}
519 	error = soconnect(so, sa, td);
520 	if (error)
521 		goto bad;
522 	if ((fflags & FNONBLOCK) && (so->so_state & SS_ISCONNECTING)) {
523 		error = EINPROGRESS;
524 		goto done;
525 	}
526 	if ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
527 		struct netmsg_so_notify msg;
528 
529 		netmsg_init_abortable(&msg.base, so,
530 				      &curthread->td_msgport,
531 				      0,
532 				      netmsg_so_notify,
533 				      netmsg_so_notify_doabort);
534 		msg.nm_predicate = soconnected_predicate;
535 		msg.nm_etype = NM_REVENT;
536 		error = lwkt_domsg(so->so_port, &msg.base.lmsg, PCATCH);
537 		if (error == EINTR || error == ERESTART)
538 			interrupted = 1;
539 	}
540 	if (error == 0) {
541 		error = so->so_error;
542 		so->so_error = 0;
543 	}
544 bad:
545 	if (!interrupted)
546 		soclrstate(so, SS_ISCONNECTING);
547 	if (error == ERESTART)
548 		error = EINTR;
549 done:
550 	fdrop(fp);
551 	return (error);
552 }
553 
554 /*
555  * connect_args(int s, caddr_t name, int namelen)
556  *
557  * MPALMOSTSAFE
558  */
559 int
560 sys_connect(struct connect_args *uap)
561 {
562 	struct sockaddr *sa;
563 	int error;
564 
565 	error = getsockaddr(&sa, uap->name, uap->namelen);
566 	if (error)
567 		return (error);
568 	error = kern_connect(uap->s, 0, sa);
569 	kfree(sa, M_SONAME);
570 
571 	return (error);
572 }
573 
574 /*
575  * connect_args(int s, int fflags, caddr_t name, int namelen)
576  *
577  * MPALMOSTSAFE
578  */
579 int
580 sys_extconnect(struct extconnect_args *uap)
581 {
582 	struct sockaddr *sa;
583 	int error;
584 	int fflags = uap->flags & O_FMASK;
585 
586 	error = getsockaddr(&sa, uap->name, uap->namelen);
587 	if (error)
588 		return (error);
589 	error = kern_connect(uap->s, fflags, sa);
590 	kfree(sa, M_SONAME);
591 
592 	return (error);
593 }
594 
595 int
596 kern_socketpair(int domain, int type, int protocol, int *sv)
597 {
598 	struct thread *td = curthread;
599 	struct filedesc *fdp;
600 	struct file *fp1, *fp2;
601 	struct socket *so1, *so2;
602 	int fd1, fd2, error;
603 
604 	fdp = td->td_proc->p_fd;
605 	error = socreate(domain, &so1, type, protocol, td);
606 	if (error)
607 		return (error);
608 	error = socreate(domain, &so2, type, protocol, td);
609 	if (error)
610 		goto free1;
611 	error = falloc(td->td_lwp, &fp1, &fd1);
612 	if (error)
613 		goto free2;
614 	sv[0] = fd1;
615 	fp1->f_data = so1;
616 	error = falloc(td->td_lwp, &fp2, &fd2);
617 	if (error)
618 		goto free3;
619 	fp2->f_data = so2;
620 	sv[1] = fd2;
621 	error = soconnect2(so1, so2);
622 	if (error)
623 		goto free4;
624 	if (type == SOCK_DGRAM) {
625 		/*
626 		 * Datagram socket connection is asymmetric.
627 		 */
628 		 error = soconnect2(so2, so1);
629 		 if (error)
630 			goto free4;
631 	}
632 	fp1->f_type = fp2->f_type = DTYPE_SOCKET;
633 	fp1->f_flag = fp2->f_flag = FREAD|FWRITE;
634 	fp1->f_ops = fp2->f_ops = &socketops;
635 	fsetfd(fdp, fp1, fd1);
636 	fsetfd(fdp, fp2, fd2);
637 	fdrop(fp1);
638 	fdrop(fp2);
639 	return (error);
640 free4:
641 	fsetfd(fdp, NULL, fd2);
642 	fdrop(fp2);
643 free3:
644 	fsetfd(fdp, NULL, fd1);
645 	fdrop(fp1);
646 free2:
647 	(void)soclose(so2, 0);
648 free1:
649 	(void)soclose(so1, 0);
650 	return (error);
651 }
652 
653 /*
654  * socketpair(int domain, int type, int protocol, int *rsv)
655  */
656 int
657 sys_socketpair(struct socketpair_args *uap)
658 {
659 	int error, sockv[2];
660 
661 	error = kern_socketpair(uap->domain, uap->type, uap->protocol, sockv);
662 
663 	if (error == 0) {
664 		error = copyout(sockv, uap->rsv, sizeof(sockv));
665 
666 		if (error != 0) {
667 			kern_close(sockv[0]);
668 			kern_close(sockv[1]);
669 		}
670 	}
671 
672 	return (error);
673 }
674 
675 int
676 kern_sendmsg(int s, struct sockaddr *sa, struct uio *auio,
677 	     struct mbuf *control, int flags, size_t *res)
678 {
679 	struct thread *td = curthread;
680 	struct lwp *lp = td->td_lwp;
681 	struct proc *p = td->td_proc;
682 	struct file *fp;
683 	size_t len;
684 	int error;
685 	struct socket *so;
686 #ifdef KTRACE
687 	struct iovec *ktriov = NULL;
688 	struct uio ktruio;
689 #endif
690 
691 	error = holdsock(p->p_fd, s, &fp);
692 	if (error)
693 		return (error);
694 #ifdef KTRACE
695 	if (KTRPOINT(td, KTR_GENIO)) {
696 		int iovlen = auio->uio_iovcnt * sizeof (struct iovec);
697 
698 		ktriov = kmalloc(iovlen, M_TEMP, M_WAITOK);
699 		bcopy((caddr_t)auio->uio_iov, (caddr_t)ktriov, iovlen);
700 		ktruio = *auio;
701 	}
702 #endif
703 	len = auio->uio_resid;
704 	so = (struct socket *)fp->f_data;
705 	if ((flags & (MSG_FNONBLOCKING|MSG_FBLOCKING)) == 0) {
706 		if (fp->f_flag & FNONBLOCK)
707 			flags |= MSG_FNONBLOCKING;
708 	}
709 	error = so_pru_sosend(so, sa, auio, NULL, control, flags, td);
710 	if (error) {
711 		if (auio->uio_resid != len && (error == ERESTART ||
712 		    error == EINTR || error == EWOULDBLOCK))
713 			error = 0;
714 		if (error == EPIPE && !(flags & MSG_NOSIGNAL) &&
715 		    !(so->so_options & SO_NOSIGPIPE))
716 			lwpsignal(p, lp, SIGPIPE);
717 	}
718 #ifdef KTRACE
719 	if (ktriov != NULL) {
720 		if (error == 0) {
721 			ktruio.uio_iov = ktriov;
722 			ktruio.uio_resid = len - auio->uio_resid;
723 			ktrgenio(lp, s, UIO_WRITE, &ktruio, error);
724 		}
725 		kfree(ktriov, M_TEMP);
726 	}
727 #endif
728 	if (error == 0)
729 		*res  = len - auio->uio_resid;
730 	fdrop(fp);
731 	return (error);
732 }
733 
734 /*
735  * sendto_args(int s, caddr_t buf, size_t len, int flags, caddr_t to, int tolen)
736  *
737  * MPALMOSTSAFE
738  */
739 int
740 sys_sendto(struct sendto_args *uap)
741 {
742 	struct thread *td = curthread;
743 	struct uio auio;
744 	struct iovec aiov;
745 	struct sockaddr *sa = NULL;
746 	int error;
747 
748 	if (uap->to) {
749 		error = getsockaddr(&sa, uap->to, uap->tolen);
750 		if (error)
751 			return (error);
752 	}
753 	aiov.iov_base = uap->buf;
754 	aiov.iov_len = uap->len;
755 	auio.uio_iov = &aiov;
756 	auio.uio_iovcnt = 1;
757 	auio.uio_offset = 0;
758 	auio.uio_resid = uap->len;
759 	auio.uio_segflg = UIO_USERSPACE;
760 	auio.uio_rw = UIO_WRITE;
761 	auio.uio_td = td;
762 
763 	error = kern_sendmsg(uap->s, sa, &auio, NULL, uap->flags,
764 			     &uap->sysmsg_szresult);
765 
766 	if (sa)
767 		kfree(sa, M_SONAME);
768 	return (error);
769 }
770 
771 /*
772  * sendmsg_args(int s, caddr_t msg, int flags)
773  *
774  * MPALMOSTSAFE
775  */
776 int
777 sys_sendmsg(struct sendmsg_args *uap)
778 {
779 	struct thread *td = curthread;
780 	struct msghdr msg;
781 	struct uio auio;
782 	struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
783 	struct sockaddr *sa = NULL;
784 	struct mbuf *control = NULL;
785 	int error;
786 
787 	error = copyin(uap->msg, (caddr_t)&msg, sizeof(msg));
788 	if (error)
789 		return (error);
790 
791 	/*
792 	 * Conditionally copyin msg.msg_name.
793 	 */
794 	if (msg.msg_name) {
795 		error = getsockaddr(&sa, msg.msg_name, msg.msg_namelen);
796 		if (error)
797 			return (error);
798 	}
799 
800 	/*
801 	 * Populate auio.
802 	 */
803 	error = iovec_copyin(msg.msg_iov, &iov, aiov, msg.msg_iovlen,
804 			     &auio.uio_resid);
805 	if (error)
806 		goto cleanup2;
807 	auio.uio_iov = iov;
808 	auio.uio_iovcnt = msg.msg_iovlen;
809 	auio.uio_offset = 0;
810 	auio.uio_segflg = UIO_USERSPACE;
811 	auio.uio_rw = UIO_WRITE;
812 	auio.uio_td = td;
813 
814 	/*
815 	 * Conditionally copyin msg.msg_control.
816 	 */
817 	if (msg.msg_control) {
818 		if (msg.msg_controllen < sizeof(struct cmsghdr) ||
819 		    msg.msg_controllen > MLEN) {
820 			error = EINVAL;
821 			goto cleanup;
822 		}
823 		control = m_get(MB_WAIT, MT_CONTROL);
824 		if (control == NULL) {
825 			error = ENOBUFS;
826 			goto cleanup;
827 		}
828 		control->m_len = msg.msg_controllen;
829 		error = copyin(msg.msg_control, mtod(control, caddr_t),
830 			       msg.msg_controllen);
831 		if (error) {
832 			m_free(control);
833 			goto cleanup;
834 		}
835 	}
836 
837 	error = kern_sendmsg(uap->s, sa, &auio, control, uap->flags,
838 			     &uap->sysmsg_szresult);
839 
840 cleanup:
841 	iovec_free(&iov, aiov);
842 cleanup2:
843 	if (sa)
844 		kfree(sa, M_SONAME);
845 	return (error);
846 }
847 
848 /*
849  * kern_recvmsg() takes a handle to sa and control.  If the handle is non-
850  * null, it returns a dynamically allocated struct sockaddr and an mbuf.
851  * Don't forget to FREE() and m_free() these if they are returned.
852  */
853 int
854 kern_recvmsg(int s, struct sockaddr **sa, struct uio *auio,
855 	     struct mbuf **control, int *flags, size_t *res)
856 {
857 	struct thread *td = curthread;
858 	struct proc *p = td->td_proc;
859 	struct file *fp;
860 	size_t len;
861 	int error;
862 	int lflags;
863 	struct socket *so;
864 #ifdef KTRACE
865 	struct iovec *ktriov = NULL;
866 	struct uio ktruio;
867 #endif
868 
869 	error = holdsock(p->p_fd, s, &fp);
870 	if (error)
871 		return (error);
872 #ifdef KTRACE
873 	if (KTRPOINT(td, KTR_GENIO)) {
874 		int iovlen = auio->uio_iovcnt * sizeof (struct iovec);
875 
876 		ktriov = kmalloc(iovlen, M_TEMP, M_WAITOK);
877 		bcopy(auio->uio_iov, ktriov, iovlen);
878 		ktruio = *auio;
879 	}
880 #endif
881 	len = auio->uio_resid;
882 	so = (struct socket *)fp->f_data;
883 
884 	if (flags == NULL || (*flags & (MSG_FNONBLOCKING|MSG_FBLOCKING)) == 0) {
885 		if (fp->f_flag & FNONBLOCK) {
886 			if (flags) {
887 				*flags |= MSG_FNONBLOCKING;
888 			} else {
889 				lflags = MSG_FNONBLOCKING;
890 				flags = &lflags;
891 			}
892 		}
893 	}
894 
895 	error = so_pru_soreceive(so, sa, auio, NULL, control, flags);
896 	if (error) {
897 		if (auio->uio_resid != len && (error == ERESTART ||
898 		    error == EINTR || error == EWOULDBLOCK))
899 			error = 0;
900 	}
901 #ifdef KTRACE
902 	if (ktriov != NULL) {
903 		if (error == 0) {
904 			ktruio.uio_iov = ktriov;
905 			ktruio.uio_resid = len - auio->uio_resid;
906 			ktrgenio(td->td_lwp, s, UIO_READ, &ktruio, error);
907 		}
908 		kfree(ktriov, M_TEMP);
909 	}
910 #endif
911 	if (error == 0)
912 		*res = len - auio->uio_resid;
913 	fdrop(fp);
914 	return (error);
915 }
916 
917 /*
918  * recvfrom_args(int s, caddr_t buf, size_t len, int flags,
919  *			caddr_t from, int *fromlenaddr)
920  *
921  * MPALMOSTSAFE
922  */
923 int
924 sys_recvfrom(struct recvfrom_args *uap)
925 {
926 	struct thread *td = curthread;
927 	struct uio auio;
928 	struct iovec aiov;
929 	struct sockaddr *sa = NULL;
930 	int error, fromlen;
931 
932 	if (uap->from && uap->fromlenaddr) {
933 		error = copyin(uap->fromlenaddr, &fromlen, sizeof(fromlen));
934 		if (error)
935 			return (error);
936 		if (fromlen < 0)
937 			return (EINVAL);
938 	} else {
939 		fromlen = 0;
940 	}
941 	aiov.iov_base = uap->buf;
942 	aiov.iov_len = uap->len;
943 	auio.uio_iov = &aiov;
944 	auio.uio_iovcnt = 1;
945 	auio.uio_offset = 0;
946 	auio.uio_resid = uap->len;
947 	auio.uio_segflg = UIO_USERSPACE;
948 	auio.uio_rw = UIO_READ;
949 	auio.uio_td = td;
950 
951 	error = kern_recvmsg(uap->s, uap->from ? &sa : NULL, &auio, NULL,
952 			     &uap->flags, &uap->sysmsg_szresult);
953 
954 	if (error == 0 && uap->from) {
955 		/* note: sa may still be NULL */
956 		if (sa) {
957 			fromlen = MIN(fromlen, sa->sa_len);
958 			error = copyout(sa, uap->from, fromlen);
959 		} else {
960 			fromlen = 0;
961 		}
962 		if (error == 0) {
963 			error = copyout(&fromlen, uap->fromlenaddr,
964 					sizeof(fromlen));
965 		}
966 	}
967 	if (sa)
968 		kfree(sa, M_SONAME);
969 
970 	return (error);
971 }
972 
973 /*
974  * recvmsg_args(int s, struct msghdr *msg, int flags)
975  *
976  * MPALMOSTSAFE
977  */
978 int
979 sys_recvmsg(struct recvmsg_args *uap)
980 {
981 	struct thread *td = curthread;
982 	struct msghdr msg;
983 	struct uio auio;
984 	struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
985 	struct mbuf *m, *control = NULL;
986 	struct sockaddr *sa = NULL;
987 	caddr_t ctlbuf;
988 	socklen_t *ufromlenp, *ucontrollenp;
989 	int error, fromlen, controllen, len, flags, *uflagsp;
990 
991 	/*
992 	 * This copyin handles everything except the iovec.
993 	 */
994 	error = copyin(uap->msg, &msg, sizeof(msg));
995 	if (error)
996 		return (error);
997 
998 	if (msg.msg_name && msg.msg_namelen < 0)
999 		return (EINVAL);
1000 	if (msg.msg_control && msg.msg_controllen < 0)
1001 		return (EINVAL);
1002 
1003 	ufromlenp = (socklen_t *)((caddr_t)uap->msg + offsetof(struct msghdr,
1004 		    msg_namelen));
1005 	ucontrollenp = (socklen_t *)((caddr_t)uap->msg + offsetof(struct msghdr,
1006 		       msg_controllen));
1007 	uflagsp = (int *)((caddr_t)uap->msg + offsetof(struct msghdr,
1008 							msg_flags));
1009 
1010 	/*
1011 	 * Populate auio.
1012 	 */
1013 	error = iovec_copyin(msg.msg_iov, &iov, aiov, msg.msg_iovlen,
1014 			     &auio.uio_resid);
1015 	if (error)
1016 		return (error);
1017 	auio.uio_iov = iov;
1018 	auio.uio_iovcnt = msg.msg_iovlen;
1019 	auio.uio_offset = 0;
1020 	auio.uio_segflg = UIO_USERSPACE;
1021 	auio.uio_rw = UIO_READ;
1022 	auio.uio_td = td;
1023 
1024 	flags = uap->flags;
1025 
1026 	error = kern_recvmsg(uap->s,
1027 			     (msg.msg_name ? &sa : NULL), &auio,
1028 			     (msg.msg_control ? &control : NULL), &flags,
1029 			     &uap->sysmsg_szresult);
1030 
1031 	/*
1032 	 * Conditionally copyout the name and populate the namelen field.
1033 	 */
1034 	if (error == 0 && msg.msg_name) {
1035 		/* note: sa may still be NULL */
1036 		if (sa != NULL) {
1037 			fromlen = MIN(msg.msg_namelen, sa->sa_len);
1038 			error = copyout(sa, msg.msg_name, fromlen);
1039 		} else {
1040 			fromlen = 0;
1041 		}
1042 		if (error == 0)
1043 			error = copyout(&fromlen, ufromlenp,
1044 			    sizeof(*ufromlenp));
1045 	}
1046 
1047 	/*
1048 	 * Copyout msg.msg_control and msg.msg_controllen.
1049 	 */
1050 	if (error == 0 && msg.msg_control) {
1051 		len = msg.msg_controllen;
1052 		m = control;
1053 		ctlbuf = (caddr_t)msg.msg_control;
1054 
1055 		while(m && len > 0) {
1056 			unsigned int tocopy;
1057 
1058 			if (len >= m->m_len) {
1059 				tocopy = m->m_len;
1060 			} else {
1061 				msg.msg_flags |= MSG_CTRUNC;
1062 				tocopy = len;
1063 			}
1064 
1065 			error = copyout(mtod(m, caddr_t), ctlbuf, tocopy);
1066 			if (error)
1067 				goto cleanup;
1068 
1069 			ctlbuf += tocopy;
1070 			len -= tocopy;
1071 			m = m->m_next;
1072 		}
1073 		controllen = ctlbuf - (caddr_t)msg.msg_control;
1074 		error = copyout(&controllen, ucontrollenp,
1075 		    sizeof(*ucontrollenp));
1076 	}
1077 
1078 	if (error == 0)
1079 		error = copyout(&flags, uflagsp, sizeof(*uflagsp));
1080 
1081 cleanup:
1082 	if (sa)
1083 		kfree(sa, M_SONAME);
1084 	iovec_free(&iov, aiov);
1085 	if (control)
1086 		m_freem(control);
1087 	return (error);
1088 }
1089 
1090 /*
1091  * If sopt->sopt_td == NULL, then sopt->sopt_val is treated as an
1092  * in kernel pointer instead of a userland pointer.  This allows us
1093  * to manipulate socket options in the emulation code.
1094  */
1095 int
1096 kern_setsockopt(int s, struct sockopt *sopt)
1097 {
1098 	struct thread *td = curthread;
1099 	struct proc *p = td->td_proc;
1100 	struct file *fp;
1101 	int error;
1102 
1103 	if (sopt->sopt_val == NULL && sopt->sopt_valsize != 0)
1104 		return (EFAULT);
1105 	if (sopt->sopt_val != NULL && sopt->sopt_valsize == 0)
1106 		return (EINVAL);
1107 	if (sopt->sopt_valsize > SOMAXOPT_SIZE)	/* unsigned */
1108 		return (EINVAL);
1109 
1110 	error = holdsock(p->p_fd, s, &fp);
1111 	if (error)
1112 		return (error);
1113 
1114 	error = sosetopt((struct socket *)fp->f_data, sopt);
1115 	fdrop(fp);
1116 	return (error);
1117 }
1118 
1119 /*
1120  * setsockopt_args(int s, int level, int name, caddr_t val, int valsize)
1121  *
1122  * MPALMOSTSAFE
1123  */
1124 int
1125 sys_setsockopt(struct setsockopt_args *uap)
1126 {
1127 	struct thread *td = curthread;
1128 	struct sockopt sopt;
1129 	int error;
1130 
1131 	sopt.sopt_level = uap->level;
1132 	sopt.sopt_name = uap->name;
1133 	sopt.sopt_valsize = uap->valsize;
1134 	sopt.sopt_td = td;
1135 	sopt.sopt_val = NULL;
1136 
1137 	if (sopt.sopt_valsize > SOMAXOPT_SIZE) /* unsigned */
1138 		return (EINVAL);
1139 	if (uap->val) {
1140 		sopt.sopt_val = kmalloc(sopt.sopt_valsize, M_TEMP, M_WAITOK);
1141 		error = copyin(uap->val, sopt.sopt_val, sopt.sopt_valsize);
1142 		if (error)
1143 			goto out;
1144 	}
1145 
1146 	error = kern_setsockopt(uap->s, &sopt);
1147 out:
1148 	if (uap->val)
1149 		kfree(sopt.sopt_val, M_TEMP);
1150 	return(error);
1151 }
1152 
1153 /*
1154  * If sopt->sopt_td == NULL, then sopt->sopt_val is treated as an
1155  * in kernel pointer instead of a userland pointer.  This allows us
1156  * to manipulate socket options in the emulation code.
1157  */
1158 int
1159 kern_getsockopt(int s, struct sockopt *sopt)
1160 {
1161 	struct thread *td = curthread;
1162 	struct proc *p = td->td_proc;
1163 	struct file *fp;
1164 	int error;
1165 
1166 	if (sopt->sopt_val == NULL && sopt->sopt_valsize != 0)
1167 		return (EFAULT);
1168 	if (sopt->sopt_val != NULL && sopt->sopt_valsize == 0)
1169 		return (EINVAL);
1170 	if (sopt->sopt_valsize > SOMAXOPT_SIZE) /* unsigned */
1171 		return (EINVAL);
1172 
1173 	error = holdsock(p->p_fd, s, &fp);
1174 	if (error)
1175 		return (error);
1176 
1177 	error = sogetopt((struct socket *)fp->f_data, sopt);
1178 	fdrop(fp);
1179 	return (error);
1180 }
1181 
1182 /*
1183  * getsockopt_args(int s, int level, int name, caddr_t val, int *avalsize)
1184  *
1185  * MPALMOSTSAFE
1186  */
1187 int
1188 sys_getsockopt(struct getsockopt_args *uap)
1189 {
1190 	struct thread *td = curthread;
1191 	struct	sockopt sopt;
1192 	int	error, valsize;
1193 
1194 	if (uap->val) {
1195 		error = copyin(uap->avalsize, &valsize, sizeof(valsize));
1196 		if (error)
1197 			return (error);
1198 	} else {
1199 		valsize = 0;
1200 	}
1201 
1202 	sopt.sopt_level = uap->level;
1203 	sopt.sopt_name = uap->name;
1204 	sopt.sopt_valsize = valsize;
1205 	sopt.sopt_td = td;
1206 	sopt.sopt_val = NULL;
1207 
1208 	if (sopt.sopt_valsize > SOMAXOPT_SIZE) /* unsigned */
1209 		return (EINVAL);
1210 	if (uap->val) {
1211 		sopt.sopt_val = kmalloc(sopt.sopt_valsize, M_TEMP, M_WAITOK);
1212 		error = copyin(uap->val, sopt.sopt_val, sopt.sopt_valsize);
1213 		if (error)
1214 			goto out;
1215 	}
1216 
1217 	error = kern_getsockopt(uap->s, &sopt);
1218 	if (error)
1219 		goto out;
1220 	valsize = sopt.sopt_valsize;
1221 	error = copyout(&valsize, uap->avalsize, sizeof(valsize));
1222 	if (error)
1223 		goto out;
1224 	if (uap->val)
1225 		error = copyout(sopt.sopt_val, uap->val, sopt.sopt_valsize);
1226 out:
1227 	if (uap->val)
1228 		kfree(sopt.sopt_val, M_TEMP);
1229 	return (error);
1230 }
1231 
1232 /*
1233  * The second argument to kern_getsockname() is a handle to a struct sockaddr.
1234  * This allows kern_getsockname() to return a pointer to an allocated struct
1235  * sockaddr which must be freed later with FREE().  The caller must
1236  * initialize *name to NULL.
1237  */
1238 int
1239 kern_getsockname(int s, struct sockaddr **name, int *namelen)
1240 {
1241 	struct thread *td = curthread;
1242 	struct proc *p = td->td_proc;
1243 	struct file *fp;
1244 	struct socket *so;
1245 	struct sockaddr *sa = NULL;
1246 	int error;
1247 
1248 	error = holdsock(p->p_fd, s, &fp);
1249 	if (error)
1250 		return (error);
1251 	if (*namelen < 0) {
1252 		fdrop(fp);
1253 		return (EINVAL);
1254 	}
1255 	so = (struct socket *)fp->f_data;
1256 	error = so_pru_sockaddr(so, &sa);
1257 	if (error == 0) {
1258 		if (sa == NULL) {
1259 			*namelen = 0;
1260 		} else {
1261 			*namelen = MIN(*namelen, sa->sa_len);
1262 			*name = sa;
1263 		}
1264 	}
1265 
1266 	fdrop(fp);
1267 	return (error);
1268 }
1269 
1270 /*
1271  * getsockname_args(int fdes, caddr_t asa, int *alen)
1272  *
1273  * Get socket name.
1274  *
1275  * MPALMOSTSAFE
1276  */
1277 int
1278 sys_getsockname(struct getsockname_args *uap)
1279 {
1280 	struct sockaddr *sa = NULL;
1281 	int error, sa_len;
1282 
1283 	error = copyin(uap->alen, &sa_len, sizeof(sa_len));
1284 	if (error)
1285 		return (error);
1286 
1287 	error = kern_getsockname(uap->fdes, &sa, &sa_len);
1288 
1289 	if (error == 0)
1290 		error = copyout(sa, uap->asa, sa_len);
1291 	if (error == 0)
1292 		error = copyout(&sa_len, uap->alen, sizeof(*uap->alen));
1293 	if (sa)
1294 		kfree(sa, M_SONAME);
1295 	return (error);
1296 }
1297 
1298 /*
1299  * The second argument to kern_getpeername() is a handle to a struct sockaddr.
1300  * This allows kern_getpeername() to return a pointer to an allocated struct
1301  * sockaddr which must be freed later with FREE().  The caller must
1302  * initialize *name to NULL.
1303  */
1304 int
1305 kern_getpeername(int s, struct sockaddr **name, int *namelen)
1306 {
1307 	struct thread *td = curthread;
1308 	struct proc *p = td->td_proc;
1309 	struct file *fp;
1310 	struct socket *so;
1311 	struct sockaddr *sa = NULL;
1312 	int error;
1313 
1314 	error = holdsock(p->p_fd, s, &fp);
1315 	if (error)
1316 		return (error);
1317 	if (*namelen < 0) {
1318 		fdrop(fp);
1319 		return (EINVAL);
1320 	}
1321 	so = (struct socket *)fp->f_data;
1322 	if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) {
1323 		fdrop(fp);
1324 		return (ENOTCONN);
1325 	}
1326 	error = so_pru_peeraddr(so, &sa);
1327 	if (error == 0) {
1328 		if (sa == NULL) {
1329 			*namelen = 0;
1330 		} else {
1331 			*namelen = MIN(*namelen, sa->sa_len);
1332 			*name = sa;
1333 		}
1334 	}
1335 
1336 	fdrop(fp);
1337 	return (error);
1338 }
1339 
1340 /*
1341  * getpeername_args(int fdes, caddr_t asa, int *alen)
1342  *
1343  * Get name of peer for connected socket.
1344  *
1345  * MPALMOSTSAFE
1346  */
1347 int
1348 sys_getpeername(struct getpeername_args *uap)
1349 {
1350 	struct sockaddr *sa = NULL;
1351 	int error, sa_len;
1352 
1353 	error = copyin(uap->alen, &sa_len, sizeof(sa_len));
1354 	if (error)
1355 		return (error);
1356 
1357 	error = kern_getpeername(uap->fdes, &sa, &sa_len);
1358 
1359 	if (error == 0)
1360 		error = copyout(sa, uap->asa, sa_len);
1361 	if (error == 0)
1362 		error = copyout(&sa_len, uap->alen, sizeof(*uap->alen));
1363 	if (sa)
1364 		kfree(sa, M_SONAME);
1365 	return (error);
1366 }
1367 
1368 int
1369 getsockaddr(struct sockaddr **namp, caddr_t uaddr, size_t len)
1370 {
1371 	struct sockaddr *sa;
1372 	int error;
1373 
1374 	*namp = NULL;
1375 	if (len > SOCK_MAXADDRLEN)
1376 		return ENAMETOOLONG;
1377 	if (len < offsetof(struct sockaddr, sa_data[0]))
1378 		return EDOM;
1379 	sa = kmalloc(len, M_SONAME, M_WAITOK);
1380 	error = copyin(uaddr, sa, len);
1381 	if (error) {
1382 		kfree(sa, M_SONAME);
1383 	} else {
1384 #if BYTE_ORDER != BIG_ENDIAN
1385 		/*
1386 		 * The bind(), connect(), and sendto() syscalls were not
1387 		 * versioned for COMPAT_43.  Thus, this check must stay.
1388 		 */
1389 		if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
1390 			sa->sa_family = sa->sa_len;
1391 #endif
1392 		sa->sa_len = len;
1393 		*namp = sa;
1394 	}
1395 	return error;
1396 }
1397 
1398 /*
1399  * Detach a mapped page and release resources back to the system.
1400  * We must release our wiring and if the object is ripped out
1401  * from under the vm_page we become responsible for freeing the
1402  * page.
1403  *
1404  * MPSAFE
1405  */
1406 static void
1407 sf_buf_mfree(void *arg)
1408 {
1409 	struct sf_buf *sf = arg;
1410 	vm_page_t m;
1411 
1412 	m = sf_buf_page(sf);
1413 	if (sf_buf_free(sf)) {
1414 		/* sf invalid now */
1415 		vm_page_busy_wait(m, FALSE, "sockpgf");
1416 		vm_page_unwire(m, 0);
1417 		if (m->object == NULL &&
1418 		    m->wire_count == 0 &&
1419 		    (m->flags & PG_NEED_COMMIT) == 0) {
1420 			vm_page_free(m);
1421 		} else {
1422 			vm_page_wakeup(m);
1423 		}
1424 	}
1425 }
1426 
1427 /*
1428  * sendfile(2).
1429  * int sendfile(int fd, int s, off_t offset, size_t nbytes,
1430  *	 struct sf_hdtr *hdtr, off_t *sbytes, int flags)
1431  *
1432  * Send a file specified by 'fd' and starting at 'offset' to a socket
1433  * specified by 's'. Send only 'nbytes' of the file or until EOF if
1434  * nbytes == 0. Optionally add a header and/or trailer to the socket
1435  * output. If specified, write the total number of bytes sent into *sbytes.
1436  *
1437  * In FreeBSD kern/uipc_syscalls.c,v 1.103, a bug was fixed that caused
1438  * the headers to count against the remaining bytes to be sent from
1439  * the file descriptor.  We may wish to implement a compatibility syscall
1440  * in the future.
1441  *
1442  * MPALMOSTSAFE
1443  */
1444 int
1445 sys_sendfile(struct sendfile_args *uap)
1446 {
1447 	struct thread *td = curthread;
1448 	struct proc *p = td->td_proc;
1449 	struct file *fp;
1450 	struct vnode *vp = NULL;
1451 	struct sf_hdtr hdtr;
1452 	struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
1453 	struct uio auio;
1454 	struct mbuf *mheader = NULL;
1455 	size_t hbytes = 0;
1456 	size_t tbytes;
1457 	off_t hdtr_size = 0;
1458 	off_t sbytes;
1459 	int error;
1460 
1461 	KKASSERT(p);
1462 
1463 	/*
1464 	 * Do argument checking. Must be a regular file in, stream
1465 	 * type and connected socket out, positive offset.
1466 	 */
1467 	fp = holdfp(p->p_fd, uap->fd, FREAD);
1468 	if (fp == NULL) {
1469 		return (EBADF);
1470 	}
1471 	if (fp->f_type != DTYPE_VNODE) {
1472 		fdrop(fp);
1473 		return (EINVAL);
1474 	}
1475 	vp = (struct vnode *)fp->f_data;
1476 	vref(vp);
1477 	fdrop(fp);
1478 
1479 	/*
1480 	 * If specified, get the pointer to the sf_hdtr struct for
1481 	 * any headers/trailers.
1482 	 */
1483 	if (uap->hdtr) {
1484 		error = copyin(uap->hdtr, &hdtr, sizeof(hdtr));
1485 		if (error)
1486 			goto done;
1487 		/*
1488 		 * Send any headers.
1489 		 */
1490 		if (hdtr.headers) {
1491 			error = iovec_copyin(hdtr.headers, &iov, aiov,
1492 					     hdtr.hdr_cnt, &hbytes);
1493 			if (error)
1494 				goto done;
1495 			auio.uio_iov = iov;
1496 			auio.uio_iovcnt = hdtr.hdr_cnt;
1497 			auio.uio_offset = 0;
1498 			auio.uio_segflg = UIO_USERSPACE;
1499 			auio.uio_rw = UIO_WRITE;
1500 			auio.uio_td = td;
1501 			auio.uio_resid = hbytes;
1502 
1503 			mheader = m_uiomove(&auio);
1504 
1505 			iovec_free(&iov, aiov);
1506 			if (mheader == NULL)
1507 				goto done;
1508 		}
1509 	}
1510 
1511 	error = kern_sendfile(vp, uap->s, uap->offset, uap->nbytes, mheader,
1512 			      &sbytes, uap->flags);
1513 	if (error)
1514 		goto done;
1515 
1516 	/*
1517 	 * Send trailers. Wimp out and use writev(2).
1518 	 */
1519 	if (uap->hdtr != NULL && hdtr.trailers != NULL) {
1520 		error = iovec_copyin(hdtr.trailers, &iov, aiov,
1521 				     hdtr.trl_cnt, &auio.uio_resid);
1522 		if (error)
1523 			goto done;
1524 		auio.uio_iov = iov;
1525 		auio.uio_iovcnt = hdtr.trl_cnt;
1526 		auio.uio_offset = 0;
1527 		auio.uio_segflg = UIO_USERSPACE;
1528 		auio.uio_rw = UIO_WRITE;
1529 		auio.uio_td = td;
1530 
1531 		error = kern_sendmsg(uap->s, NULL, &auio, NULL, 0, &tbytes);
1532 
1533 		iovec_free(&iov, aiov);
1534 		if (error)
1535 			goto done;
1536 		hdtr_size += tbytes;	/* trailer bytes successfully sent */
1537 	}
1538 
1539 done:
1540 	if (vp)
1541 		vrele(vp);
1542 	if (uap->sbytes != NULL) {
1543 		sbytes += hdtr_size;
1544 		copyout(&sbytes, uap->sbytes, sizeof(off_t));
1545 	}
1546 	return (error);
1547 }
1548 
1549 int
1550 kern_sendfile(struct vnode *vp, int sfd, off_t offset, size_t nbytes,
1551 	      struct mbuf *mheader, off_t *sbytes, int flags)
1552 {
1553 	struct thread *td = curthread;
1554 	struct proc *p = td->td_proc;
1555 	struct vm_object *obj;
1556 	struct socket *so;
1557 	struct file *fp;
1558 	struct mbuf *m, *mp;
1559 	struct sf_buf *sf;
1560 	struct vm_page *pg;
1561 	off_t off, xfsize;
1562 	off_t hbytes = 0;
1563 	int error = 0;
1564 
1565 	if (vp->v_type != VREG) {
1566 		error = EINVAL;
1567 		goto done0;
1568 	}
1569 	if ((obj = vp->v_object) == NULL) {
1570 		error = EINVAL;
1571 		goto done0;
1572 	}
1573 	error = holdsock(p->p_fd, sfd, &fp);
1574 	if (error)
1575 		goto done0;
1576 	so = (struct socket *)fp->f_data;
1577 	if (so->so_type != SOCK_STREAM) {
1578 		error = EINVAL;
1579 		goto done;
1580 	}
1581 	if ((so->so_state & SS_ISCONNECTED) == 0) {
1582 		error = ENOTCONN;
1583 		goto done;
1584 	}
1585 	if (offset < 0) {
1586 		error = EINVAL;
1587 		goto done;
1588 	}
1589 
1590 	*sbytes = 0;
1591 	/*
1592 	 * Protect against multiple writers to the socket.
1593 	 */
1594 	ssb_lock(&so->so_snd, M_WAITOK);
1595 
1596 	/*
1597 	 * Loop through the pages in the file, starting with the requested
1598 	 * offset. Get a file page (do I/O if necessary), map the file page
1599 	 * into an sf_buf, attach an mbuf header to the sf_buf, and queue
1600 	 * it on the socket.
1601 	 */
1602 	for (off = offset; ; off += xfsize, *sbytes += xfsize + hbytes) {
1603 		vm_pindex_t pindex;
1604 		vm_offset_t pgoff;
1605 		int space;
1606 
1607 		pindex = OFF_TO_IDX(off);
1608 retry_lookup:
1609 		/*
1610 		 * Calculate the amount to transfer. Not to exceed a page,
1611 		 * the EOF, or the passed in nbytes.
1612 		 */
1613 		xfsize = vp->v_filesize - off;
1614 		if (xfsize > PAGE_SIZE)
1615 			xfsize = PAGE_SIZE;
1616 		pgoff = (vm_offset_t)(off & PAGE_MASK);
1617 		if (PAGE_SIZE - pgoff < xfsize)
1618 			xfsize = PAGE_SIZE - pgoff;
1619 		if (nbytes && xfsize > (nbytes - *sbytes))
1620 			xfsize = nbytes - *sbytes;
1621 		if (xfsize <= 0)
1622 			break;
1623 		/*
1624 		 * Optimize the non-blocking case by looking at the socket space
1625 		 * before going to the extra work of constituting the sf_buf.
1626 		 */
1627 		if ((fp->f_flag & FNONBLOCK) &&
1628 		    ssb_space_prealloc(&so->so_snd) <= 0) {
1629 			if (so->so_state & SS_CANTSENDMORE)
1630 				error = EPIPE;
1631 			else
1632 				error = EAGAIN;
1633 			ssb_unlock(&so->so_snd);
1634 			goto done;
1635 		}
1636 		/*
1637 		 * Attempt to look up the page.
1638 		 *
1639 		 *	Allocate if not found, wait and loop if busy, then
1640 		 *	wire the page.  critical section protection is
1641 		 * 	required to maintain the object association (an
1642 		 *	interrupt can free the page) through to the
1643 		 *	vm_page_wire() call.
1644 		 */
1645 		vm_object_hold(obj);
1646 		pg = vm_page_lookup_busy_try(obj, pindex, TRUE, &error);
1647 		if (error) {
1648 			vm_page_sleep_busy(pg, TRUE, "sfpbsy");
1649 			vm_object_drop(obj);
1650 			goto retry_lookup;
1651 		}
1652 		if (pg == NULL) {
1653 			pg = vm_page_alloc(obj, pindex, VM_ALLOC_NORMAL |
1654 							VM_ALLOC_NULL_OK);
1655 			if (pg == NULL) {
1656 				vm_wait(0);
1657 				vm_object_drop(obj);
1658 				goto retry_lookup;
1659 			}
1660 		}
1661 		vm_page_wire(pg);
1662 		vm_object_drop(obj);
1663 
1664 		/*
1665 		 * If page is not valid for what we need, initiate I/O
1666 		 */
1667 
1668 		if (!pg->valid || !vm_page_is_valid(pg, pgoff, xfsize)) {
1669 			struct uio auio;
1670 			struct iovec aiov;
1671 			int bsize;
1672 
1673 			/*
1674 			 * Ensure that our page is still around when the I/O
1675 			 * completes.
1676 			 */
1677 			vm_page_io_start(pg);
1678 			vm_page_wakeup(pg);
1679 
1680 			/*
1681 			 * Get the page from backing store.
1682 			 */
1683 			bsize = vp->v_mount->mnt_stat.f_iosize;
1684 			auio.uio_iov = &aiov;
1685 			auio.uio_iovcnt = 1;
1686 			aiov.iov_base = 0;
1687 			aiov.iov_len = MAXBSIZE;
1688 			auio.uio_resid = MAXBSIZE;
1689 			auio.uio_offset = trunc_page(off);
1690 			auio.uio_segflg = UIO_NOCOPY;
1691 			auio.uio_rw = UIO_READ;
1692 			auio.uio_td = td;
1693 			vn_lock(vp, LK_SHARED | LK_RETRY);
1694 			error = VOP_READ(vp, &auio,
1695 				    IO_VMIO | ((MAXBSIZE / bsize) << 16),
1696 				    td->td_ucred);
1697 			vn_unlock(vp);
1698 			vm_page_flag_clear(pg, PG_ZERO);
1699 			vm_page_busy_wait(pg, FALSE, "sockpg");
1700 			vm_page_io_finish(pg);
1701 			if (error) {
1702 				vm_page_unwire(pg, 0);
1703 				vm_page_wakeup(pg);
1704 				vm_page_try_to_free(pg);
1705 				ssb_unlock(&so->so_snd);
1706 				goto done;
1707 			}
1708 		}
1709 
1710 
1711 		/*
1712 		 * Get a sendfile buf. We usually wait as long as necessary,
1713 		 * but this wait can be interrupted.
1714 		 */
1715 		if ((sf = sf_buf_alloc(pg)) == NULL) {
1716 			vm_page_unwire(pg, 0);
1717 			vm_page_wakeup(pg);
1718 			vm_page_try_to_free(pg);
1719 			ssb_unlock(&so->so_snd);
1720 			error = EINTR;
1721 			goto done;
1722 		}
1723 		vm_page_wakeup(pg);
1724 
1725 		/*
1726 		 * Get an mbuf header and set it up as having external storage.
1727 		 */
1728 		MGETHDR(m, MB_WAIT, MT_DATA);
1729 		if (m == NULL) {
1730 			error = ENOBUFS;
1731 			sf_buf_free(sf);
1732 			ssb_unlock(&so->so_snd);
1733 			goto done;
1734 		}
1735 
1736 		m->m_ext.ext_free = sf_buf_mfree;
1737 		m->m_ext.ext_ref = sf_buf_ref;
1738 		m->m_ext.ext_arg = sf;
1739 		m->m_ext.ext_buf = (void *)sf_buf_kva(sf);
1740 		m->m_ext.ext_size = PAGE_SIZE;
1741 		m->m_data = (char *)sf_buf_kva(sf) + pgoff;
1742 		m->m_flags |= M_EXT;
1743 		m->m_pkthdr.len = m->m_len = xfsize;
1744 		KKASSERT((m->m_flags & (M_EXT_CLUSTER)) == 0);
1745 
1746 		if (mheader != NULL) {
1747 			hbytes = mheader->m_pkthdr.len;
1748 			mheader->m_pkthdr.len += m->m_pkthdr.len;
1749 			m_cat(mheader, m);
1750 			m = mheader;
1751 			mheader = NULL;
1752 		} else
1753 			hbytes = 0;
1754 
1755 		/*
1756 		 * Add the buffer to the socket buffer chain.
1757 		 */
1758 		crit_enter();
1759 retry_space:
1760 		/*
1761 		 * Make sure that the socket is still able to take more data.
1762 		 * CANTSENDMORE being true usually means that the connection
1763 		 * was closed. so_error is true when an error was sensed after
1764 		 * a previous send.
1765 		 * The state is checked after the page mapping and buffer
1766 		 * allocation above since those operations may block and make
1767 		 * any socket checks stale. From this point forward, nothing
1768 		 * blocks before the pru_send (or more accurately, any blocking
1769 		 * results in a loop back to here to re-check).
1770 		 */
1771 		if ((so->so_state & SS_CANTSENDMORE) || so->so_error) {
1772 			if (so->so_state & SS_CANTSENDMORE) {
1773 				error = EPIPE;
1774 			} else {
1775 				error = so->so_error;
1776 				so->so_error = 0;
1777 			}
1778 			m_freem(m);
1779 			ssb_unlock(&so->so_snd);
1780 			crit_exit();
1781 			goto done;
1782 		}
1783 		/*
1784 		 * Wait for socket space to become available. We do this just
1785 		 * after checking the connection state above in order to avoid
1786 		 * a race condition with ssb_wait().
1787 		 */
1788 		space = ssb_space_prealloc(&so->so_snd);
1789 		if (space < m->m_pkthdr.len && space < so->so_snd.ssb_lowat) {
1790 			if (fp->f_flag & FNONBLOCK) {
1791 				m_freem(m);
1792 				ssb_unlock(&so->so_snd);
1793 				crit_exit();
1794 				error = EAGAIN;
1795 				goto done;
1796 			}
1797 			error = ssb_wait(&so->so_snd);
1798 			/*
1799 			 * An error from ssb_wait usually indicates that we've
1800 			 * been interrupted by a signal. If we've sent anything
1801 			 * then return bytes sent, otherwise return the error.
1802 			 */
1803 			if (error) {
1804 				m_freem(m);
1805 				ssb_unlock(&so->so_snd);
1806 				crit_exit();
1807 				goto done;
1808 			}
1809 			goto retry_space;
1810 		}
1811 
1812 		for (mp = m; mp != NULL; mp = mp->m_next)
1813 			ssb_preallocstream(&so->so_snd, mp);
1814 		if (use_sendfile_async)
1815 			error = so_pru_senda(so, 0, m, NULL, NULL, td);
1816 		else
1817 			error = so_pru_send(so, 0, m, NULL, NULL, td);
1818 
1819 		crit_exit();
1820 		if (error) {
1821 			ssb_unlock(&so->so_snd);
1822 			goto done;
1823 		}
1824 	}
1825 	if (mheader != NULL) {
1826 		*sbytes += mheader->m_pkthdr.len;
1827 
1828 		for (mp = mheader; mp != NULL; mp = mp->m_next)
1829 			ssb_preallocstream(&so->so_snd, mp);
1830 		if (use_sendfile_async)
1831 			error = so_pru_senda(so, 0, mheader, NULL, NULL, td);
1832 		else
1833 			error = so_pru_send(so, 0, mheader, NULL, NULL, td);
1834 
1835 		mheader = NULL;
1836 	}
1837 	ssb_unlock(&so->so_snd);
1838 
1839 done:
1840 	fdrop(fp);
1841 done0:
1842 	if (mheader != NULL)
1843 		m_freem(mheader);
1844 	return (error);
1845 }
1846 
1847 /*
1848  * MPALMOSTSAFE
1849  */
1850 int
1851 sys_sctp_peeloff(struct sctp_peeloff_args *uap)
1852 {
1853 #ifdef SCTP
1854 	struct thread *td = curthread;
1855 	struct filedesc *fdp = td->td_proc->p_fd;
1856 	struct file *lfp = NULL;
1857 	struct file *nfp = NULL;
1858 	int error;
1859 	struct socket *head, *so;
1860 	caddr_t assoc_id;
1861 	int fd;
1862 	short fflag;		/* type must match fp->f_flag */
1863 
1864 	assoc_id = uap->name;
1865 	error = holdsock(td->td_proc->p_fd, uap->sd, &lfp);
1866 	if (error)
1867 		return (error);
1868 
1869 	crit_enter();
1870 	head = (struct socket *)lfp->f_data;
1871 	error = sctp_can_peel_off(head, assoc_id);
1872 	if (error) {
1873 		crit_exit();
1874 		goto done;
1875 	}
1876 	/*
1877 	 * At this point we know we do have a assoc to pull
1878 	 * we proceed to get the fd setup. This may block
1879 	 * but that is ok.
1880 	 */
1881 
1882 	fflag = lfp->f_flag;
1883 	error = falloc(td->td_lwp, &nfp, &fd);
1884 	if (error) {
1885 		/*
1886 		 * Probably ran out of file descriptors. Put the
1887 		 * unaccepted connection back onto the queue and
1888 		 * do another wakeup so some other process might
1889 		 * have a chance at it.
1890 		 */
1891 		crit_exit();
1892 		goto done;
1893 	}
1894 	uap->sysmsg_iresult = fd;
1895 
1896 	so = sctp_get_peeloff(head, assoc_id, &error);
1897 	if (so == NULL) {
1898 		/*
1899 		 * Either someone else peeled it off OR
1900 		 * we can't get a socket.
1901 		 */
1902 		goto noconnection;
1903 	}
1904 	soreference(so);			/* reference needed */
1905 	soclrstate(so, SS_NOFDREF | SS_COMP);	/* when clearing NOFDREF */
1906 	so->so_head = NULL;
1907 	if (head->so_sigio != NULL)
1908 		fsetown(fgetown(&head->so_sigio), &so->so_sigio);
1909 
1910 	nfp->f_type = DTYPE_SOCKET;
1911 	nfp->f_flag = fflag;
1912 	nfp->f_ops = &socketops;
1913 	nfp->f_data = so;
1914 
1915 noconnection:
1916 	/*
1917 	 * Assign the file pointer to the reserved descriptor, or clear
1918 	 * the reserved descriptor if an error occured.
1919 	 */
1920 	if (error)
1921 		fsetfd(fdp, NULL, fd);
1922 	else
1923 		fsetfd(fdp, nfp, fd);
1924 	crit_exit();
1925 	/*
1926 	 * Release explicitly held references before returning.
1927 	 */
1928 done:
1929 	if (nfp != NULL)
1930 		fdrop(nfp);
1931 	fdrop(lfp);
1932 	return (error);
1933 #else /* SCTP */
1934 	return(EOPNOTSUPP);
1935 #endif /* SCTP */
1936 }
1937