xref: /dragonfly/sys/kern/uipc_syscalls.c (revision cad2e385)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1990, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * sendfile(2) and related extensions:
6  * Copyright (c) 1998, David Greenman. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)uipc_syscalls.c	8.4 (Berkeley) 2/21/94
33  * $FreeBSD: src/sys/kern/uipc_syscalls.c,v 1.65.2.17 2003/04/04 17:11:16 tegge Exp $
34  */
35 
36 #include "opt_ktrace.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/sysproto.h>
42 #include <sys/malloc.h>
43 #include <sys/filedesc.h>
44 #include <sys/event.h>
45 #include <sys/proc.h>
46 #include <sys/fcntl.h>
47 #include <sys/file.h>
48 #include <sys/filio.h>
49 #include <sys/kern_syscall.h>
50 #include <sys/mbuf.h>
51 #include <sys/protosw.h>
52 #include <sys/sfbuf.h>
53 #include <sys/socket.h>
54 #include <sys/socketvar.h>
55 #include <sys/socketops.h>
56 #include <sys/uio.h>
57 #include <sys/vnode.h>
58 #include <sys/lock.h>
59 #include <sys/mount.h>
60 #ifdef KTRACE
61 #include <sys/ktrace.h>
62 #endif
63 #include <vm/vm.h>
64 #include <vm/vm_object.h>
65 #include <vm/vm_page.h>
66 #include <vm/vm_pageout.h>
67 #include <vm/vm_kern.h>
68 #include <vm/vm_extern.h>
69 #include <sys/file2.h>
70 #include <sys/signalvar.h>
71 #include <sys/serialize.h>
72 
73 #include <sys/thread2.h>
74 #include <sys/msgport2.h>
75 #include <sys/socketvar2.h>
76 #include <net/netmsg2.h>
77 #include <vm/vm_page2.h>
78 
79 extern int use_soaccept_pred_fast;
80 extern int use_sendfile_async;
81 extern int use_soconnect_async;
82 
83 /*
84  * System call interface to the socket abstraction.
85  */
86 
87 extern	struct fileops socketops;
88 
89 /*
90  * socket_args(int domain, int type, int protocol)
91  */
92 int
93 kern_socket(int domain, int type, int protocol, int *res)
94 {
95 	struct thread *td = curthread;
96 	struct filedesc *fdp = td->td_proc->p_fd;
97 	struct socket *so;
98 	struct file *fp;
99 	int fd, error;
100 	u_int fflags = 0;
101 	int oflags = 0;
102 
103 	KKASSERT(td->td_lwp);
104 
105 	if (type & SOCK_NONBLOCK) {
106 		type &= ~SOCK_NONBLOCK;
107 		fflags |= FNONBLOCK;
108 	}
109 	if (type & SOCK_CLOEXEC) {
110 		type &= ~SOCK_CLOEXEC;
111 		oflags |= O_CLOEXEC;
112 	}
113 
114 	error = falloc(td->td_lwp, &fp, &fd);
115 	if (error)
116 		return (error);
117 	error = socreate(domain, &so, type, protocol, td);
118 	if (error) {
119 		fsetfd(fdp, NULL, fd);
120 	} else {
121 		fp->f_type = DTYPE_SOCKET;
122 		fp->f_flag = FREAD | FWRITE | fflags;
123 		fp->f_ops = &socketops;
124 		fp->f_data = so;
125 		if (oflags & O_CLOEXEC)
126 			fdp->fd_files[fd].fileflags |= UF_EXCLOSE;
127 		if (fflags & FNONBLOCK) {
128 			int tmp = 1;
129 			fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, td->td_ucred, NULL);
130 		}
131 		*res = fd;
132 		fsetfd(fdp, fp, fd);
133 	}
134 	fdrop(fp);
135 	return (error);
136 }
137 
138 /*
139  * MPALMOSTSAFE
140  */
141 int
142 sys_socket(struct socket_args *uap)
143 {
144 	int error;
145 
146 	error = kern_socket(uap->domain, uap->type, uap->protocol,
147 			    &uap->sysmsg_iresult);
148 
149 	return (error);
150 }
151 
152 int
153 kern_bind(int s, struct sockaddr *sa)
154 {
155 	struct thread *td = curthread;
156 	struct proc *p = td->td_proc;
157 	struct file *fp;
158 	int error;
159 
160 	KKASSERT(p);
161 	error = holdsock(p->p_fd, s, &fp);
162 	if (error)
163 		return (error);
164 	error = sobind((struct socket *)fp->f_data, sa, td);
165 	fdrop(fp);
166 	return (error);
167 }
168 
169 /*
170  * bind_args(int s, caddr_t name, int namelen)
171  *
172  * MPALMOSTSAFE
173  */
174 int
175 sys_bind(struct bind_args *uap)
176 {
177 	struct sockaddr *sa;
178 	int error;
179 
180 	error = getsockaddr(&sa, uap->name, uap->namelen);
181 	if (error)
182 		return (error);
183 	error = kern_bind(uap->s, sa);
184 	kfree(sa, M_SONAME);
185 
186 	return (error);
187 }
188 
189 int
190 kern_listen(int s, int backlog)
191 {
192 	struct thread *td = curthread;
193 	struct proc *p = td->td_proc;
194 	struct file *fp;
195 	int error;
196 
197 	KKASSERT(p);
198 	error = holdsock(p->p_fd, s, &fp);
199 	if (error)
200 		return (error);
201 	error = solisten((struct socket *)fp->f_data, backlog, td);
202 	fdrop(fp);
203 	return(error);
204 }
205 
206 /*
207  * listen_args(int s, int backlog)
208  *
209  * MPALMOSTSAFE
210  */
211 int
212 sys_listen(struct listen_args *uap)
213 {
214 	int error;
215 
216 	error = kern_listen(uap->s, uap->backlog);
217 	return (error);
218 }
219 
220 /*
221  * Returns the accepted socket as well.
222  *
223  * NOTE!  The sockets sitting on so_comp/so_incomp might have 0 refs, the
224  *	  pool token is absolutely required to avoid a sofree() race,
225  *	  as well as to avoid tailq handling races.
226  */
227 static boolean_t
228 soaccept_predicate(struct netmsg_so_notify *msg)
229 {
230 	struct socket *head = msg->base.nm_so;
231 	struct socket *so;
232 
233 	if (head->so_error != 0) {
234 		msg->base.lmsg.ms_error = head->so_error;
235 		return (TRUE);
236 	}
237 	lwkt_getpooltoken(head);
238 	if (!TAILQ_EMPTY(&head->so_comp)) {
239 		/* Abuse nm_so field as copy in/copy out parameter. XXX JH */
240 		so = TAILQ_FIRST(&head->so_comp);
241 		KKASSERT((so->so_state & (SS_INCOMP | SS_COMP)) == SS_COMP);
242 		TAILQ_REMOVE(&head->so_comp, so, so_list);
243 		head->so_qlen--;
244 		soclrstate(so, SS_COMP);
245 
246 		/*
247 		 * Keep a reference before clearing the so_head
248 		 * to avoid racing socket close in netisr.
249 		 */
250 		soreference(so);
251 		so->so_head = NULL;
252 
253 		lwkt_relpooltoken(head);
254 
255 		msg->base.lmsg.ms_error = 0;
256 		msg->base.nm_so = so;
257 		return (TRUE);
258 	}
259 	lwkt_relpooltoken(head);
260 	if (head->so_state & SS_CANTRCVMORE) {
261 		msg->base.lmsg.ms_error = ECONNABORTED;
262 		return (TRUE);
263 	}
264 	if (msg->nm_fflags & FNONBLOCK) {
265 		msg->base.lmsg.ms_error = EWOULDBLOCK;
266 		return (TRUE);
267 	}
268 
269 	return (FALSE);
270 }
271 
272 /*
273  * The second argument to kern_accept() is a handle to a struct sockaddr.
274  * This allows kern_accept() to return a pointer to an allocated struct
275  * sockaddr which must be freed later with FREE().  The caller must
276  * initialize *name to NULL.
277  */
278 int
279 kern_accept(int s, int fflags, struct sockaddr **name, int *namelen, int *res,
280     int sockflags)
281 {
282 	struct thread *td = curthread;
283 	struct filedesc *fdp = td->td_proc->p_fd;
284 	struct file *lfp = NULL;
285 	struct file *nfp = NULL;
286 	struct sockaddr *sa;
287 	struct socket *head, *so;
288 	struct netmsg_so_notify msg;
289 	int fd;
290 	u_int fflag;		/* type must match fp->f_flag */
291 	int error, tmp;
292 
293 	*res = -1;
294 	if (name && namelen && *namelen < 0)
295 		return (EINVAL);
296 
297 	error = holdsock(td->td_proc->p_fd, s, &lfp);
298 	if (error)
299 		return (error);
300 
301 	error = falloc(td->td_lwp, &nfp, &fd);
302 	if (error) {		/* Probably ran out of file descriptors. */
303 		fdrop(lfp);
304 		return (error);
305 	}
306 	head = (struct socket *)lfp->f_data;
307 	if ((head->so_options & SO_ACCEPTCONN) == 0) {
308 		error = EINVAL;
309 		goto done;
310 	}
311 
312 	if (fflags & O_FBLOCKING)
313 		fflags |= lfp->f_flag & ~FNONBLOCK;
314 	else if (fflags & O_FNONBLOCKING)
315 		fflags |= lfp->f_flag | FNONBLOCK;
316 	else
317 		fflags = lfp->f_flag;
318 
319 	if (use_soaccept_pred_fast) {
320 		boolean_t pred;
321 
322 		/* Initialize necessary parts for soaccept_predicate() */
323 		netmsg_init(&msg.base, head, &netisr_apanic_rport, 0, NULL);
324 		msg.nm_fflags = fflags;
325 
326 		lwkt_getpooltoken(head);
327 		pred = soaccept_predicate(&msg);
328 		lwkt_relpooltoken(head);
329 
330 		if (pred) {
331 			error = msg.base.lmsg.ms_error;
332 			if (error)
333 				goto done;
334 			else
335 				goto accepted;
336 		}
337 	}
338 
339 	/* optimize for uniprocessor case later XXX JH */
340 	netmsg_init_abortable(&msg.base, head, &curthread->td_msgport,
341 			      0, netmsg_so_notify, netmsg_so_notify_doabort);
342 	msg.nm_predicate = soaccept_predicate;
343 	msg.nm_fflags = fflags;
344 	msg.nm_etype = NM_REVENT;
345 	error = lwkt_domsg(head->so_port, &msg.base.lmsg, PCATCH);
346 	if (error)
347 		goto done;
348 
349 accepted:
350 	/*
351 	 * At this point we have the connection that's ready to be accepted.
352 	 *
353 	 * NOTE! soaccept_predicate() ref'd so for us, and soaccept() expects
354 	 * 	 to eat the ref and turn it into a descriptor.
355 	 */
356 	so = msg.base.nm_so;
357 
358 	fflag = lfp->f_flag;
359 
360 	/* connection has been removed from the listen queue */
361 	KNOTE(&head->so_rcv.ssb_kq.ki_note, 0);
362 
363 	if (sockflags & SOCK_KERN_NOINHERIT) {
364 		fflag &= ~(FASYNC | FNONBLOCK);
365 		if (sockflags & SOCK_NONBLOCK)
366 			fflag |= FNONBLOCK;
367 	} else {
368 		if (head->so_sigio != NULL)
369 			fsetown(fgetown(&head->so_sigio), &so->so_sigio);
370 	}
371 
372 	nfp->f_type = DTYPE_SOCKET;
373 	nfp->f_flag = fflag;
374 	nfp->f_ops = &socketops;
375 	nfp->f_data = so;
376 	/* Sync socket nonblocking/async state with file flags */
377 	tmp = fflag & FNONBLOCK;
378 	fo_ioctl(nfp, FIONBIO, (caddr_t)&tmp, td->td_ucred, NULL);
379 	tmp = fflag & FASYNC;
380 	fo_ioctl(nfp, FIOASYNC, (caddr_t)&tmp, td->td_ucred, NULL);
381 
382 	sa = NULL;
383 	if (so->so_faddr != NULL) {
384 		sa = so->so_faddr;
385 		so->so_faddr = NULL;
386 
387 		soaccept_generic(so);
388 		error = 0;
389 	} else {
390 		error = soaccept(so, &sa);
391 	}
392 
393 	/*
394 	 * Set the returned name and namelen as applicable.  Set the returned
395 	 * namelen to 0 for older code which might ignore the return value
396 	 * from accept.
397 	 */
398 	if (error == 0) {
399 		if (sa && name && namelen) {
400 			if (*namelen > sa->sa_len)
401 				*namelen = sa->sa_len;
402 			*name = sa;
403 		} else {
404 			if (sa)
405 				kfree(sa, M_SONAME);
406 		}
407 	}
408 
409 done:
410 	/*
411 	 * If an error occured clear the reserved descriptor, else associate
412 	 * nfp with it.
413 	 *
414 	 * Note that *res is normally ignored if an error is returned but
415 	 * a syscall message will still have access to the result code.
416 	 */
417 	if (error) {
418 		fsetfd(fdp, NULL, fd);
419 	} else {
420 		if (sockflags & SOCK_CLOEXEC)
421 			fdp->fd_files[fd].fileflags |= UF_EXCLOSE;
422 		*res = fd;
423 		fsetfd(fdp, nfp, fd);
424 	}
425 	fdrop(nfp);
426 	fdrop(lfp);
427 	return (error);
428 }
429 
430 /*
431  * accept(int s, caddr_t name, int *anamelen)
432  *
433  * MPALMOSTSAFE
434  */
435 int
436 sys_accept(struct accept_args *uap)
437 {
438 	struct sockaddr *sa = NULL;
439 	int sa_len;
440 	int error;
441 
442 	if (uap->name) {
443 		error = copyin(uap->anamelen, &sa_len, sizeof(sa_len));
444 		if (error)
445 			return (error);
446 
447 		error = kern_accept(uap->s, 0, &sa, &sa_len,
448 				    &uap->sysmsg_iresult, 0);
449 
450 		if (error == 0)
451 			error = copyout(sa, uap->name, sa_len);
452 		if (error == 0) {
453 			error = copyout(&sa_len, uap->anamelen,
454 			    sizeof(*uap->anamelen));
455 		}
456 		if (sa)
457 			kfree(sa, M_SONAME);
458 	} else {
459 		error = kern_accept(uap->s, 0, NULL, 0,
460 				    &uap->sysmsg_iresult, 0);
461 	}
462 	return (error);
463 }
464 
465 /*
466  * extaccept(int s, int fflags, caddr_t name, int *anamelen)
467  *
468  * MPALMOSTSAFE
469  */
470 int
471 sys_extaccept(struct extaccept_args *uap)
472 {
473 	struct sockaddr *sa = NULL;
474 	int sa_len;
475 	int error;
476 	int fflags = uap->flags & O_FMASK;
477 
478 	if (uap->name) {
479 		error = copyin(uap->anamelen, &sa_len, sizeof(sa_len));
480 		if (error)
481 			return (error);
482 
483 		error = kern_accept(uap->s, fflags, &sa, &sa_len,
484 				    &uap->sysmsg_iresult, 0);
485 
486 		if (error == 0)
487 			error = copyout(sa, uap->name, sa_len);
488 		if (error == 0) {
489 			error = copyout(&sa_len, uap->anamelen,
490 			    sizeof(*uap->anamelen));
491 		}
492 		if (sa)
493 			kfree(sa, M_SONAME);
494 	} else {
495 		error = kern_accept(uap->s, fflags, NULL, 0,
496 				    &uap->sysmsg_iresult, 0);
497 	}
498 	return (error);
499 }
500 
501 /*
502  * accept4(int s, caddr_t name, int *anamelen, int flags)
503  *
504  * MPALMOSTSAFE
505  */
506 int
507 sys_accept4(struct accept4_args *uap)
508 {
509 	struct sockaddr *sa = NULL;
510 	int sa_len;
511 	int error;
512 	int sockflags;
513 
514 	if (uap->flags & ~(SOCK_NONBLOCK | SOCK_CLOEXEC))
515 		return (EINVAL);
516 	sockflags = uap->flags | SOCK_KERN_NOINHERIT;
517 
518 	if (uap->name) {
519 		error = copyin(uap->anamelen, &sa_len, sizeof(sa_len));
520 		if (error)
521 			return (error);
522 
523 		error = kern_accept(uap->s, 0, &sa, &sa_len,
524 				    &uap->sysmsg_iresult, sockflags);
525 
526 		if (error == 0)
527 			error = copyout(sa, uap->name, sa_len);
528 		if (error == 0) {
529 			error = copyout(&sa_len, uap->anamelen,
530 			    sizeof(*uap->anamelen));
531 		}
532 		if (sa)
533 			kfree(sa, M_SONAME);
534 	} else {
535 		error = kern_accept(uap->s, 0, NULL, 0,
536 				    &uap->sysmsg_iresult, sockflags);
537 	}
538 	return (error);
539 }
540 
541 /*
542  * Returns TRUE if predicate satisfied.
543  */
544 static boolean_t
545 soconnected_predicate(struct netmsg_so_notify *msg)
546 {
547 	struct socket *so = msg->base.nm_so;
548 
549 	/* check predicate */
550 	if (!(so->so_state & SS_ISCONNECTING) || so->so_error != 0) {
551 		msg->base.lmsg.ms_error = so->so_error;
552 		return (TRUE);
553 	}
554 
555 	return (FALSE);
556 }
557 
558 int
559 kern_connect(int s, int fflags, struct sockaddr *sa)
560 {
561 	struct thread *td = curthread;
562 	struct proc *p = td->td_proc;
563 	struct file *fp;
564 	struct socket *so;
565 	int error, interrupted = 0;
566 
567 	error = holdsock(p->p_fd, s, &fp);
568 	if (error)
569 		return (error);
570 	so = (struct socket *)fp->f_data;
571 
572 	if (fflags & O_FBLOCKING)
573 		/* fflags &= ~FNONBLOCK; */;
574 	else if (fflags & O_FNONBLOCKING)
575 		fflags |= FNONBLOCK;
576 	else
577 		fflags = fp->f_flag;
578 
579 	if (so->so_state & SS_ISCONNECTING) {
580 		error = EALREADY;
581 		goto done;
582 	}
583 	error = soconnect(so, sa, td, use_soconnect_async ? FALSE : TRUE);
584 	if (error)
585 		goto bad;
586 	if ((fflags & FNONBLOCK) && (so->so_state & SS_ISCONNECTING)) {
587 		error = EINPROGRESS;
588 		goto done;
589 	}
590 	if ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
591 		struct netmsg_so_notify msg;
592 
593 		netmsg_init_abortable(&msg.base, so,
594 				      &curthread->td_msgport,
595 				      0,
596 				      netmsg_so_notify,
597 				      netmsg_so_notify_doabort);
598 		msg.nm_predicate = soconnected_predicate;
599 		msg.nm_etype = NM_REVENT;
600 		error = lwkt_domsg(so->so_port, &msg.base.lmsg, PCATCH);
601 		if (error == EINTR || error == ERESTART)
602 			interrupted = 1;
603 	}
604 	if (error == 0) {
605 		error = so->so_error;
606 		so->so_error = 0;
607 	}
608 bad:
609 	if (!interrupted)
610 		soclrstate(so, SS_ISCONNECTING);
611 	if (error == ERESTART)
612 		error = EINTR;
613 done:
614 	fdrop(fp);
615 	return (error);
616 }
617 
618 /*
619  * connect_args(int s, caddr_t name, int namelen)
620  *
621  * MPALMOSTSAFE
622  */
623 int
624 sys_connect(struct connect_args *uap)
625 {
626 	struct sockaddr *sa;
627 	int error;
628 
629 	error = getsockaddr(&sa, uap->name, uap->namelen);
630 	if (error)
631 		return (error);
632 	error = kern_connect(uap->s, 0, sa);
633 	kfree(sa, M_SONAME);
634 
635 	return (error);
636 }
637 
638 /*
639  * connect_args(int s, int fflags, caddr_t name, int namelen)
640  *
641  * MPALMOSTSAFE
642  */
643 int
644 sys_extconnect(struct extconnect_args *uap)
645 {
646 	struct sockaddr *sa;
647 	int error;
648 	int fflags = uap->flags & O_FMASK;
649 
650 	error = getsockaddr(&sa, uap->name, uap->namelen);
651 	if (error)
652 		return (error);
653 	error = kern_connect(uap->s, fflags, sa);
654 	kfree(sa, M_SONAME);
655 
656 	return (error);
657 }
658 
659 int
660 kern_socketpair(int domain, int type, int protocol, int *sv)
661 {
662 	struct thread *td = curthread;
663 	struct filedesc *fdp;
664 	struct file *fp1, *fp2;
665 	struct socket *so1, *so2;
666 	int fd1, fd2, error;
667 	u_int fflags = 0;
668 	int oflags = 0;
669 
670 	if (type & SOCK_NONBLOCK) {
671 		type &= ~SOCK_NONBLOCK;
672 		fflags |= FNONBLOCK;
673 	}
674 	if (type & SOCK_CLOEXEC) {
675 		type &= ~SOCK_CLOEXEC;
676 		oflags |= O_CLOEXEC;
677 	}
678 
679 	fdp = td->td_proc->p_fd;
680 	error = socreate(domain, &so1, type, protocol, td);
681 	if (error)
682 		return (error);
683 	error = socreate(domain, &so2, type, protocol, td);
684 	if (error)
685 		goto free1;
686 	error = falloc(td->td_lwp, &fp1, &fd1);
687 	if (error)
688 		goto free2;
689 	sv[0] = fd1;
690 	fp1->f_data = so1;
691 	error = falloc(td->td_lwp, &fp2, &fd2);
692 	if (error)
693 		goto free3;
694 	fp2->f_data = so2;
695 	sv[1] = fd2;
696 	error = soconnect2(so1, so2);
697 	if (error)
698 		goto free4;
699 	if (type == SOCK_DGRAM) {
700 		/*
701 		 * Datagram socket connection is asymmetric.
702 		 */
703 		 error = soconnect2(so2, so1);
704 		 if (error)
705 			goto free4;
706 	}
707 	fp1->f_type = fp2->f_type = DTYPE_SOCKET;
708 	fp1->f_flag = fp2->f_flag = FREAD|FWRITE|fflags;
709 	fp1->f_ops = fp2->f_ops = &socketops;
710 	if (oflags & O_CLOEXEC) {
711 		fdp->fd_files[fd1].fileflags |= UF_EXCLOSE;
712 		fdp->fd_files[fd2].fileflags |= UF_EXCLOSE;
713 	}
714 	if (fflags & FNONBLOCK) {
715 		int tmp;
716 
717 		tmp = 1;
718 		fo_ioctl(fp1, FIONBIO, (caddr_t)&tmp, td->td_ucred, NULL);
719 		tmp = 1;
720 		fo_ioctl(fp2, FIONBIO, (caddr_t)&tmp, td->td_ucred, NULL);
721 	}
722 	fsetfd(fdp, fp1, fd1);
723 	fsetfd(fdp, fp2, fd2);
724 	fdrop(fp1);
725 	fdrop(fp2);
726 	return (error);
727 free4:
728 	fsetfd(fdp, NULL, fd2);
729 	fdrop(fp2);
730 free3:
731 	fsetfd(fdp, NULL, fd1);
732 	fdrop(fp1);
733 free2:
734 	(void)soclose(so2, 0);
735 free1:
736 	(void)soclose(so1, 0);
737 	return (error);
738 }
739 
740 /*
741  * socketpair(int domain, int type, int protocol, int *rsv)
742  */
743 int
744 sys_socketpair(struct socketpair_args *uap)
745 {
746 	int error, sockv[2];
747 
748 	error = kern_socketpair(uap->domain, uap->type, uap->protocol, sockv);
749 
750 	if (error == 0) {
751 		error = copyout(sockv, uap->rsv, sizeof(sockv));
752 
753 		if (error != 0) {
754 			kern_close(sockv[0]);
755 			kern_close(sockv[1]);
756 		}
757 	}
758 
759 	return (error);
760 }
761 
762 int
763 kern_sendmsg(int s, struct sockaddr *sa, struct uio *auio,
764 	     struct mbuf *control, int flags, size_t *res)
765 {
766 	struct thread *td = curthread;
767 	struct lwp *lp = td->td_lwp;
768 	struct proc *p = td->td_proc;
769 	struct file *fp;
770 	size_t len;
771 	int error;
772 	struct socket *so;
773 #ifdef KTRACE
774 	struct iovec *ktriov = NULL;
775 	struct uio ktruio;
776 #endif
777 
778 	error = holdsock(p->p_fd, s, &fp);
779 	if (error)
780 		return (error);
781 #ifdef KTRACE
782 	if (KTRPOINT(td, KTR_GENIO)) {
783 		int iovlen = auio->uio_iovcnt * sizeof (struct iovec);
784 
785 		ktriov = kmalloc(iovlen, M_TEMP, M_WAITOK);
786 		bcopy((caddr_t)auio->uio_iov, (caddr_t)ktriov, iovlen);
787 		ktruio = *auio;
788 	}
789 #endif
790 	len = auio->uio_resid;
791 	so = (struct socket *)fp->f_data;
792 	if ((flags & (MSG_FNONBLOCKING|MSG_FBLOCKING)) == 0) {
793 		if (fp->f_flag & FNONBLOCK)
794 			flags |= MSG_FNONBLOCKING;
795 	}
796 	error = so_pru_sosend(so, sa, auio, NULL, control, flags, td);
797 	if (error) {
798 		if (auio->uio_resid != len && (error == ERESTART ||
799 		    error == EINTR || error == EWOULDBLOCK))
800 			error = 0;
801 		if (error == EPIPE && !(flags & MSG_NOSIGNAL) &&
802 		    !(so->so_options & SO_NOSIGPIPE))
803 			lwpsignal(p, lp, SIGPIPE);
804 	}
805 #ifdef KTRACE
806 	if (ktriov != NULL) {
807 		if (error == 0) {
808 			ktruio.uio_iov = ktriov;
809 			ktruio.uio_resid = len - auio->uio_resid;
810 			ktrgenio(lp, s, UIO_WRITE, &ktruio, error);
811 		}
812 		kfree(ktriov, M_TEMP);
813 	}
814 #endif
815 	if (error == 0)
816 		*res  = len - auio->uio_resid;
817 	fdrop(fp);
818 	return (error);
819 }
820 
821 /*
822  * sendto_args(int s, caddr_t buf, size_t len, int flags, caddr_t to, int tolen)
823  *
824  * MPALMOSTSAFE
825  */
826 int
827 sys_sendto(struct sendto_args *uap)
828 {
829 	struct thread *td = curthread;
830 	struct uio auio;
831 	struct iovec aiov;
832 	struct sockaddr *sa = NULL;
833 	int error;
834 
835 	if (uap->to) {
836 		error = getsockaddr(&sa, uap->to, uap->tolen);
837 		if (error)
838 			return (error);
839 	}
840 	aiov.iov_base = uap->buf;
841 	aiov.iov_len = uap->len;
842 	auio.uio_iov = &aiov;
843 	auio.uio_iovcnt = 1;
844 	auio.uio_offset = 0;
845 	auio.uio_resid = uap->len;
846 	auio.uio_segflg = UIO_USERSPACE;
847 	auio.uio_rw = UIO_WRITE;
848 	auio.uio_td = td;
849 
850 	error = kern_sendmsg(uap->s, sa, &auio, NULL, uap->flags,
851 			     &uap->sysmsg_szresult);
852 
853 	if (sa)
854 		kfree(sa, M_SONAME);
855 	return (error);
856 }
857 
858 /*
859  * sendmsg_args(int s, caddr_t msg, int flags)
860  *
861  * MPALMOSTSAFE
862  */
863 int
864 sys_sendmsg(struct sendmsg_args *uap)
865 {
866 	struct thread *td = curthread;
867 	struct msghdr msg;
868 	struct uio auio;
869 	struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
870 	struct sockaddr *sa = NULL;
871 	struct mbuf *control = NULL;
872 	int error;
873 
874 	error = copyin(uap->msg, (caddr_t)&msg, sizeof(msg));
875 	if (error)
876 		return (error);
877 
878 	/*
879 	 * Conditionally copyin msg.msg_name.
880 	 */
881 	if (msg.msg_name) {
882 		error = getsockaddr(&sa, msg.msg_name, msg.msg_namelen);
883 		if (error)
884 			return (error);
885 	}
886 
887 	/*
888 	 * Populate auio.
889 	 */
890 	error = iovec_copyin(msg.msg_iov, &iov, aiov, msg.msg_iovlen,
891 			     &auio.uio_resid);
892 	if (error)
893 		goto cleanup2;
894 	auio.uio_iov = iov;
895 	auio.uio_iovcnt = msg.msg_iovlen;
896 	auio.uio_offset = 0;
897 	auio.uio_segflg = UIO_USERSPACE;
898 	auio.uio_rw = UIO_WRITE;
899 	auio.uio_td = td;
900 
901 	/*
902 	 * Conditionally copyin msg.msg_control.
903 	 */
904 	if (msg.msg_control) {
905 		if (msg.msg_controllen < sizeof(struct cmsghdr) ||
906 		    msg.msg_controllen > MLEN) {
907 			error = EINVAL;
908 			goto cleanup;
909 		}
910 		control = m_get(M_WAITOK, MT_CONTROL);
911 		if (control == NULL) {
912 			error = ENOBUFS;
913 			goto cleanup;
914 		}
915 		control->m_len = msg.msg_controllen;
916 		error = copyin(msg.msg_control, mtod(control, caddr_t),
917 			       msg.msg_controllen);
918 		if (error) {
919 			m_free(control);
920 			goto cleanup;
921 		}
922 	}
923 
924 	error = kern_sendmsg(uap->s, sa, &auio, control, uap->flags,
925 			     &uap->sysmsg_szresult);
926 
927 cleanup:
928 	iovec_free(&iov, aiov);
929 cleanup2:
930 	if (sa)
931 		kfree(sa, M_SONAME);
932 	return (error);
933 }
934 
935 /*
936  * kern_recvmsg() takes a handle to sa and control.  If the handle is non-
937  * null, it returns a dynamically allocated struct sockaddr and an mbuf.
938  * Don't forget to FREE() and m_free() these if they are returned.
939  */
940 int
941 kern_recvmsg(int s, struct sockaddr **sa, struct uio *auio,
942 	     struct mbuf **control, int *flags, size_t *res)
943 {
944 	struct thread *td = curthread;
945 	struct proc *p = td->td_proc;
946 	struct file *fp;
947 	size_t len;
948 	int error;
949 	int lflags;
950 	struct socket *so;
951 #ifdef KTRACE
952 	struct iovec *ktriov = NULL;
953 	struct uio ktruio;
954 #endif
955 
956 	error = holdsock(p->p_fd, s, &fp);
957 	if (error)
958 		return (error);
959 #ifdef KTRACE
960 	if (KTRPOINT(td, KTR_GENIO)) {
961 		int iovlen = auio->uio_iovcnt * sizeof (struct iovec);
962 
963 		ktriov = kmalloc(iovlen, M_TEMP, M_WAITOK);
964 		bcopy(auio->uio_iov, ktriov, iovlen);
965 		ktruio = *auio;
966 	}
967 #endif
968 	len = auio->uio_resid;
969 	so = (struct socket *)fp->f_data;
970 
971 	if (flags == NULL || (*flags & (MSG_FNONBLOCKING|MSG_FBLOCKING)) == 0) {
972 		if (fp->f_flag & FNONBLOCK) {
973 			if (flags) {
974 				*flags |= MSG_FNONBLOCKING;
975 			} else {
976 				lflags = MSG_FNONBLOCKING;
977 				flags = &lflags;
978 			}
979 		}
980 	}
981 
982 	error = so_pru_soreceive(so, sa, auio, NULL, control, flags);
983 	if (error) {
984 		if (auio->uio_resid != len && (error == ERESTART ||
985 		    error == EINTR || error == EWOULDBLOCK))
986 			error = 0;
987 	}
988 #ifdef KTRACE
989 	if (ktriov != NULL) {
990 		if (error == 0) {
991 			ktruio.uio_iov = ktriov;
992 			ktruio.uio_resid = len - auio->uio_resid;
993 			ktrgenio(td->td_lwp, s, UIO_READ, &ktruio, error);
994 		}
995 		kfree(ktriov, M_TEMP);
996 	}
997 #endif
998 	if (error == 0)
999 		*res = len - auio->uio_resid;
1000 	fdrop(fp);
1001 	return (error);
1002 }
1003 
1004 /*
1005  * recvfrom_args(int s, caddr_t buf, size_t len, int flags,
1006  *			caddr_t from, int *fromlenaddr)
1007  *
1008  * MPALMOSTSAFE
1009  */
1010 int
1011 sys_recvfrom(struct recvfrom_args *uap)
1012 {
1013 	struct thread *td = curthread;
1014 	struct uio auio;
1015 	struct iovec aiov;
1016 	struct sockaddr *sa = NULL;
1017 	int error, fromlen;
1018 
1019 	if (uap->from && uap->fromlenaddr) {
1020 		error = copyin(uap->fromlenaddr, &fromlen, sizeof(fromlen));
1021 		if (error)
1022 			return (error);
1023 		if (fromlen < 0)
1024 			return (EINVAL);
1025 	} else {
1026 		fromlen = 0;
1027 	}
1028 	aiov.iov_base = uap->buf;
1029 	aiov.iov_len = uap->len;
1030 	auio.uio_iov = &aiov;
1031 	auio.uio_iovcnt = 1;
1032 	auio.uio_offset = 0;
1033 	auio.uio_resid = uap->len;
1034 	auio.uio_segflg = UIO_USERSPACE;
1035 	auio.uio_rw = UIO_READ;
1036 	auio.uio_td = td;
1037 
1038 	error = kern_recvmsg(uap->s, uap->from ? &sa : NULL, &auio, NULL,
1039 			     &uap->flags, &uap->sysmsg_szresult);
1040 
1041 	if (error == 0 && uap->from) {
1042 		/* note: sa may still be NULL */
1043 		if (sa) {
1044 			fromlen = MIN(fromlen, sa->sa_len);
1045 			error = copyout(sa, uap->from, fromlen);
1046 		} else {
1047 			fromlen = 0;
1048 		}
1049 		if (error == 0) {
1050 			error = copyout(&fromlen, uap->fromlenaddr,
1051 					sizeof(fromlen));
1052 		}
1053 	}
1054 	if (sa)
1055 		kfree(sa, M_SONAME);
1056 
1057 	return (error);
1058 }
1059 
1060 /*
1061  * recvmsg_args(int s, struct msghdr *msg, int flags)
1062  *
1063  * MPALMOSTSAFE
1064  */
1065 int
1066 sys_recvmsg(struct recvmsg_args *uap)
1067 {
1068 	struct thread *td = curthread;
1069 	struct msghdr msg;
1070 	struct uio auio;
1071 	struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
1072 	struct mbuf *m, *control = NULL;
1073 	struct sockaddr *sa = NULL;
1074 	caddr_t ctlbuf;
1075 	socklen_t *ufromlenp, *ucontrollenp;
1076 	int error, fromlen, controllen, len, flags, *uflagsp;
1077 
1078 	/*
1079 	 * This copyin handles everything except the iovec.
1080 	 */
1081 	error = copyin(uap->msg, &msg, sizeof(msg));
1082 	if (error)
1083 		return (error);
1084 
1085 	if (msg.msg_name && msg.msg_namelen < 0)
1086 		return (EINVAL);
1087 	if (msg.msg_control && msg.msg_controllen < 0)
1088 		return (EINVAL);
1089 
1090 	ufromlenp = (socklen_t *)((caddr_t)uap->msg + offsetof(struct msghdr,
1091 		    msg_namelen));
1092 	ucontrollenp = (socklen_t *)((caddr_t)uap->msg + offsetof(struct msghdr,
1093 		       msg_controllen));
1094 	uflagsp = (int *)((caddr_t)uap->msg + offsetof(struct msghdr,
1095 							msg_flags));
1096 
1097 	/*
1098 	 * Populate auio.
1099 	 */
1100 	error = iovec_copyin(msg.msg_iov, &iov, aiov, msg.msg_iovlen,
1101 			     &auio.uio_resid);
1102 	if (error)
1103 		return (error);
1104 	auio.uio_iov = iov;
1105 	auio.uio_iovcnt = msg.msg_iovlen;
1106 	auio.uio_offset = 0;
1107 	auio.uio_segflg = UIO_USERSPACE;
1108 	auio.uio_rw = UIO_READ;
1109 	auio.uio_td = td;
1110 
1111 	flags = uap->flags;
1112 
1113 	error = kern_recvmsg(uap->s,
1114 			     (msg.msg_name ? &sa : NULL), &auio,
1115 			     (msg.msg_control ? &control : NULL), &flags,
1116 			     &uap->sysmsg_szresult);
1117 
1118 	/*
1119 	 * Conditionally copyout the name and populate the namelen field.
1120 	 */
1121 	if (error == 0 && msg.msg_name) {
1122 		/* note: sa may still be NULL */
1123 		if (sa != NULL) {
1124 			fromlen = MIN(msg.msg_namelen, sa->sa_len);
1125 			error = copyout(sa, msg.msg_name, fromlen);
1126 		} else {
1127 			fromlen = 0;
1128 		}
1129 		if (error == 0)
1130 			error = copyout(&fromlen, ufromlenp,
1131 			    sizeof(*ufromlenp));
1132 	}
1133 
1134 	/*
1135 	 * Copyout msg.msg_control and msg.msg_controllen.
1136 	 */
1137 	if (error == 0 && msg.msg_control) {
1138 		len = msg.msg_controllen;
1139 		m = control;
1140 		ctlbuf = (caddr_t)msg.msg_control;
1141 
1142 		while(m && len > 0) {
1143 			unsigned int tocopy;
1144 
1145 			if (len >= m->m_len) {
1146 				tocopy = m->m_len;
1147 			} else {
1148 				msg.msg_flags |= MSG_CTRUNC;
1149 				tocopy = len;
1150 			}
1151 
1152 			error = copyout(mtod(m, caddr_t), ctlbuf, tocopy);
1153 			if (error)
1154 				goto cleanup;
1155 
1156 			ctlbuf += tocopy;
1157 			len -= tocopy;
1158 			m = m->m_next;
1159 		}
1160 		controllen = ctlbuf - (caddr_t)msg.msg_control;
1161 		error = copyout(&controllen, ucontrollenp,
1162 		    sizeof(*ucontrollenp));
1163 	}
1164 
1165 	if (error == 0)
1166 		error = copyout(&flags, uflagsp, sizeof(*uflagsp));
1167 
1168 cleanup:
1169 	if (sa)
1170 		kfree(sa, M_SONAME);
1171 	iovec_free(&iov, aiov);
1172 	if (control)
1173 		m_freem(control);
1174 	return (error);
1175 }
1176 
1177 /*
1178  * If sopt->sopt_td == NULL, then sopt->sopt_val is treated as an
1179  * in kernel pointer instead of a userland pointer.  This allows us
1180  * to manipulate socket options in the emulation code.
1181  */
1182 int
1183 kern_setsockopt(int s, struct sockopt *sopt)
1184 {
1185 	struct thread *td = curthread;
1186 	struct proc *p = td->td_proc;
1187 	struct file *fp;
1188 	int error;
1189 
1190 	if (sopt->sopt_val == NULL && sopt->sopt_valsize != 0)
1191 		return (EFAULT);
1192 	if (sopt->sopt_val != NULL && sopt->sopt_valsize == 0)
1193 		return (EINVAL);
1194 	if (sopt->sopt_valsize > SOMAXOPT_SIZE)	/* unsigned */
1195 		return (EINVAL);
1196 
1197 	error = holdsock(p->p_fd, s, &fp);
1198 	if (error)
1199 		return (error);
1200 
1201 	error = sosetopt((struct socket *)fp->f_data, sopt);
1202 	fdrop(fp);
1203 	return (error);
1204 }
1205 
1206 /*
1207  * setsockopt_args(int s, int level, int name, caddr_t val, int valsize)
1208  *
1209  * MPALMOSTSAFE
1210  */
1211 int
1212 sys_setsockopt(struct setsockopt_args *uap)
1213 {
1214 	struct thread *td = curthread;
1215 	struct sockopt sopt;
1216 	int error;
1217 
1218 	sopt.sopt_level = uap->level;
1219 	sopt.sopt_name = uap->name;
1220 	sopt.sopt_valsize = uap->valsize;
1221 	sopt.sopt_td = td;
1222 	sopt.sopt_val = NULL;
1223 
1224 	if (sopt.sopt_valsize > SOMAXOPT_SIZE) /* unsigned */
1225 		return (EINVAL);
1226 	if (uap->val) {
1227 		sopt.sopt_val = kmalloc(sopt.sopt_valsize, M_TEMP, M_WAITOK);
1228 		error = copyin(uap->val, sopt.sopt_val, sopt.sopt_valsize);
1229 		if (error)
1230 			goto out;
1231 	}
1232 
1233 	error = kern_setsockopt(uap->s, &sopt);
1234 out:
1235 	if (uap->val)
1236 		kfree(sopt.sopt_val, M_TEMP);
1237 	return(error);
1238 }
1239 
1240 /*
1241  * If sopt->sopt_td == NULL, then sopt->sopt_val is treated as an
1242  * in kernel pointer instead of a userland pointer.  This allows us
1243  * to manipulate socket options in the emulation code.
1244  */
1245 int
1246 kern_getsockopt(int s, struct sockopt *sopt)
1247 {
1248 	struct thread *td = curthread;
1249 	struct proc *p = td->td_proc;
1250 	struct file *fp;
1251 	int error;
1252 
1253 	if (sopt->sopt_val == NULL && sopt->sopt_valsize != 0)
1254 		return (EFAULT);
1255 	if (sopt->sopt_val != NULL && sopt->sopt_valsize == 0)
1256 		return (EINVAL);
1257 	if (sopt->sopt_valsize > SOMAXOPT_SIZE) /* unsigned */
1258 		return (EINVAL);
1259 
1260 	error = holdsock(p->p_fd, s, &fp);
1261 	if (error)
1262 		return (error);
1263 
1264 	error = sogetopt((struct socket *)fp->f_data, sopt);
1265 	fdrop(fp);
1266 	return (error);
1267 }
1268 
1269 /*
1270  * getsockopt_args(int s, int level, int name, caddr_t val, int *avalsize)
1271  *
1272  * MPALMOSTSAFE
1273  */
1274 int
1275 sys_getsockopt(struct getsockopt_args *uap)
1276 {
1277 	struct thread *td = curthread;
1278 	struct	sockopt sopt;
1279 	int	error, valsize;
1280 
1281 	if (uap->val) {
1282 		error = copyin(uap->avalsize, &valsize, sizeof(valsize));
1283 		if (error)
1284 			return (error);
1285 	} else {
1286 		valsize = 0;
1287 	}
1288 
1289 	sopt.sopt_level = uap->level;
1290 	sopt.sopt_name = uap->name;
1291 	sopt.sopt_valsize = valsize;
1292 	sopt.sopt_td = td;
1293 	sopt.sopt_val = NULL;
1294 
1295 	if (sopt.sopt_valsize > SOMAXOPT_SIZE) /* unsigned */
1296 		return (EINVAL);
1297 	if (uap->val) {
1298 		sopt.sopt_val = kmalloc(sopt.sopt_valsize, M_TEMP, M_WAITOK);
1299 		error = copyin(uap->val, sopt.sopt_val, sopt.sopt_valsize);
1300 		if (error)
1301 			goto out;
1302 	}
1303 
1304 	error = kern_getsockopt(uap->s, &sopt);
1305 	if (error)
1306 		goto out;
1307 	valsize = sopt.sopt_valsize;
1308 	error = copyout(&valsize, uap->avalsize, sizeof(valsize));
1309 	if (error)
1310 		goto out;
1311 	if (uap->val)
1312 		error = copyout(sopt.sopt_val, uap->val, sopt.sopt_valsize);
1313 out:
1314 	if (uap->val)
1315 		kfree(sopt.sopt_val, M_TEMP);
1316 	return (error);
1317 }
1318 
1319 /*
1320  * The second argument to kern_getsockname() is a handle to a struct sockaddr.
1321  * This allows kern_getsockname() to return a pointer to an allocated struct
1322  * sockaddr which must be freed later with FREE().  The caller must
1323  * initialize *name to NULL.
1324  */
1325 int
1326 kern_getsockname(int s, struct sockaddr **name, int *namelen)
1327 {
1328 	struct thread *td = curthread;
1329 	struct proc *p = td->td_proc;
1330 	struct file *fp;
1331 	struct socket *so;
1332 	struct sockaddr *sa = NULL;
1333 	int error;
1334 
1335 	error = holdsock(p->p_fd, s, &fp);
1336 	if (error)
1337 		return (error);
1338 	if (*namelen < 0) {
1339 		fdrop(fp);
1340 		return (EINVAL);
1341 	}
1342 	so = (struct socket *)fp->f_data;
1343 	error = so_pru_sockaddr(so, &sa);
1344 	if (error == 0) {
1345 		if (sa == NULL) {
1346 			*namelen = 0;
1347 		} else {
1348 			*namelen = MIN(*namelen, sa->sa_len);
1349 			*name = sa;
1350 		}
1351 	}
1352 
1353 	fdrop(fp);
1354 	return (error);
1355 }
1356 
1357 /*
1358  * getsockname_args(int fdes, caddr_t asa, int *alen)
1359  *
1360  * Get socket name.
1361  *
1362  * MPALMOSTSAFE
1363  */
1364 int
1365 sys_getsockname(struct getsockname_args *uap)
1366 {
1367 	struct sockaddr *sa = NULL;
1368 	int error, sa_len;
1369 
1370 	error = copyin(uap->alen, &sa_len, sizeof(sa_len));
1371 	if (error)
1372 		return (error);
1373 
1374 	error = kern_getsockname(uap->fdes, &sa, &sa_len);
1375 
1376 	if (error == 0)
1377 		error = copyout(sa, uap->asa, sa_len);
1378 	if (error == 0)
1379 		error = copyout(&sa_len, uap->alen, sizeof(*uap->alen));
1380 	if (sa)
1381 		kfree(sa, M_SONAME);
1382 	return (error);
1383 }
1384 
1385 /*
1386  * The second argument to kern_getpeername() is a handle to a struct sockaddr.
1387  * This allows kern_getpeername() to return a pointer to an allocated struct
1388  * sockaddr which must be freed later with FREE().  The caller must
1389  * initialize *name to NULL.
1390  */
1391 int
1392 kern_getpeername(int s, struct sockaddr **name, int *namelen)
1393 {
1394 	struct thread *td = curthread;
1395 	struct proc *p = td->td_proc;
1396 	struct file *fp;
1397 	struct socket *so;
1398 	struct sockaddr *sa = NULL;
1399 	int error;
1400 
1401 	error = holdsock(p->p_fd, s, &fp);
1402 	if (error)
1403 		return (error);
1404 	if (*namelen < 0) {
1405 		fdrop(fp);
1406 		return (EINVAL);
1407 	}
1408 	so = (struct socket *)fp->f_data;
1409 	if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) {
1410 		fdrop(fp);
1411 		return (ENOTCONN);
1412 	}
1413 	error = so_pru_peeraddr(so, &sa);
1414 	if (error == 0) {
1415 		if (sa == NULL) {
1416 			*namelen = 0;
1417 		} else {
1418 			*namelen = MIN(*namelen, sa->sa_len);
1419 			*name = sa;
1420 		}
1421 	}
1422 
1423 	fdrop(fp);
1424 	return (error);
1425 }
1426 
1427 /*
1428  * getpeername_args(int fdes, caddr_t asa, int *alen)
1429  *
1430  * Get name of peer for connected socket.
1431  *
1432  * MPALMOSTSAFE
1433  */
1434 int
1435 sys_getpeername(struct getpeername_args *uap)
1436 {
1437 	struct sockaddr *sa = NULL;
1438 	int error, sa_len;
1439 
1440 	error = copyin(uap->alen, &sa_len, sizeof(sa_len));
1441 	if (error)
1442 		return (error);
1443 
1444 	error = kern_getpeername(uap->fdes, &sa, &sa_len);
1445 
1446 	if (error == 0)
1447 		error = copyout(sa, uap->asa, sa_len);
1448 	if (error == 0)
1449 		error = copyout(&sa_len, uap->alen, sizeof(*uap->alen));
1450 	if (sa)
1451 		kfree(sa, M_SONAME);
1452 	return (error);
1453 }
1454 
1455 int
1456 getsockaddr(struct sockaddr **namp, caddr_t uaddr, size_t len)
1457 {
1458 	struct sockaddr *sa;
1459 	int error;
1460 
1461 	*namp = NULL;
1462 	if (len > SOCK_MAXADDRLEN)
1463 		return ENAMETOOLONG;
1464 	if (len < offsetof(struct sockaddr, sa_data[0]))
1465 		return EDOM;
1466 	sa = kmalloc(len, M_SONAME, M_WAITOK);
1467 	error = copyin(uaddr, sa, len);
1468 	if (error) {
1469 		kfree(sa, M_SONAME);
1470 	} else {
1471 #if BYTE_ORDER != BIG_ENDIAN
1472 		/*
1473 		 * The bind(), connect(), and sendto() syscalls were not
1474 		 * versioned for COMPAT_43.  Thus, this check must stay.
1475 		 */
1476 		if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
1477 			sa->sa_family = sa->sa_len;
1478 #endif
1479 		sa->sa_len = len;
1480 		*namp = sa;
1481 	}
1482 	return error;
1483 }
1484 
1485 /*
1486  * Detach a mapped page and release resources back to the system.
1487  * We must release our wiring and if the object is ripped out
1488  * from under the vm_page we become responsible for freeing the
1489  * page.
1490  *
1491  * MPSAFE
1492  */
1493 static void
1494 sf_buf_mfree(void *arg)
1495 {
1496 	struct sf_buf *sf = arg;
1497 	vm_page_t m;
1498 
1499 	m = sf_buf_page(sf);
1500 	if (sf_buf_free(sf)) {
1501 		/* sf invalid now */
1502 		/*
1503 		vm_page_busy_wait(m, FALSE, "sockpgf");
1504 		vm_page_wakeup(m);
1505 		*/
1506 		vm_page_unhold(m);
1507 #if 0
1508 		if (m->object == NULL &&
1509 		    m->wire_count == 0 &&
1510 		    (m->flags & PG_NEED_COMMIT) == 0) {
1511 			vm_page_free(m);
1512 		} else {
1513 			vm_page_wakeup(m);
1514 		}
1515 #endif
1516 	}
1517 }
1518 
1519 /*
1520  * sendfile(2).
1521  * int sendfile(int fd, int s, off_t offset, size_t nbytes,
1522  *	 struct sf_hdtr *hdtr, off_t *sbytes, int flags)
1523  *
1524  * Send a file specified by 'fd' and starting at 'offset' to a socket
1525  * specified by 's'. Send only 'nbytes' of the file or until EOF if
1526  * nbytes == 0. Optionally add a header and/or trailer to the socket
1527  * output. If specified, write the total number of bytes sent into *sbytes.
1528  *
1529  * In FreeBSD kern/uipc_syscalls.c,v 1.103, a bug was fixed that caused
1530  * the headers to count against the remaining bytes to be sent from
1531  * the file descriptor.  We may wish to implement a compatibility syscall
1532  * in the future.
1533  *
1534  * MPALMOSTSAFE
1535  */
1536 int
1537 sys_sendfile(struct sendfile_args *uap)
1538 {
1539 	struct thread *td = curthread;
1540 	struct proc *p = td->td_proc;
1541 	struct file *fp;
1542 	struct vnode *vp = NULL;
1543 	struct sf_hdtr hdtr;
1544 	struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
1545 	struct uio auio;
1546 	struct mbuf *mheader = NULL;
1547 	size_t hbytes = 0;
1548 	size_t tbytes;
1549 	off_t hdtr_size = 0;
1550 	off_t sbytes;
1551 	int error;
1552 
1553 	KKASSERT(p);
1554 
1555 	/*
1556 	 * Do argument checking. Must be a regular file in, stream
1557 	 * type and connected socket out, positive offset.
1558 	 */
1559 	fp = holdfp(p->p_fd, uap->fd, FREAD);
1560 	if (fp == NULL) {
1561 		return (EBADF);
1562 	}
1563 	if (fp->f_type != DTYPE_VNODE) {
1564 		fdrop(fp);
1565 		return (EINVAL);
1566 	}
1567 	vp = (struct vnode *)fp->f_data;
1568 	vref(vp);
1569 	fdrop(fp);
1570 
1571 	/*
1572 	 * If specified, get the pointer to the sf_hdtr struct for
1573 	 * any headers/trailers.
1574 	 */
1575 	if (uap->hdtr) {
1576 		error = copyin(uap->hdtr, &hdtr, sizeof(hdtr));
1577 		if (error)
1578 			goto done;
1579 		/*
1580 		 * Send any headers.
1581 		 */
1582 		if (hdtr.headers) {
1583 			error = iovec_copyin(hdtr.headers, &iov, aiov,
1584 					     hdtr.hdr_cnt, &hbytes);
1585 			if (error)
1586 				goto done;
1587 			auio.uio_iov = iov;
1588 			auio.uio_iovcnt = hdtr.hdr_cnt;
1589 			auio.uio_offset = 0;
1590 			auio.uio_segflg = UIO_USERSPACE;
1591 			auio.uio_rw = UIO_WRITE;
1592 			auio.uio_td = td;
1593 			auio.uio_resid = hbytes;
1594 
1595 			mheader = m_uiomove(&auio);
1596 
1597 			iovec_free(&iov, aiov);
1598 			if (mheader == NULL)
1599 				goto done;
1600 		}
1601 	}
1602 
1603 	error = kern_sendfile(vp, uap->s, uap->offset, uap->nbytes, mheader,
1604 			      &sbytes, uap->flags);
1605 	if (error)
1606 		goto done;
1607 
1608 	/*
1609 	 * Send trailers. Wimp out and use writev(2).
1610 	 */
1611 	if (uap->hdtr != NULL && hdtr.trailers != NULL) {
1612 		error = iovec_copyin(hdtr.trailers, &iov, aiov,
1613 				     hdtr.trl_cnt, &auio.uio_resid);
1614 		if (error)
1615 			goto done;
1616 		auio.uio_iov = iov;
1617 		auio.uio_iovcnt = hdtr.trl_cnt;
1618 		auio.uio_offset = 0;
1619 		auio.uio_segflg = UIO_USERSPACE;
1620 		auio.uio_rw = UIO_WRITE;
1621 		auio.uio_td = td;
1622 
1623 		error = kern_sendmsg(uap->s, NULL, &auio, NULL, 0, &tbytes);
1624 
1625 		iovec_free(&iov, aiov);
1626 		if (error)
1627 			goto done;
1628 		hdtr_size += tbytes;	/* trailer bytes successfully sent */
1629 	}
1630 
1631 done:
1632 	if (vp)
1633 		vrele(vp);
1634 	if (uap->sbytes != NULL) {
1635 		sbytes += hdtr_size;
1636 		copyout(&sbytes, uap->sbytes, sizeof(off_t));
1637 	}
1638 	return (error);
1639 }
1640 
1641 int
1642 kern_sendfile(struct vnode *vp, int sfd, off_t offset, size_t nbytes,
1643 	      struct mbuf *mheader, off_t *sbytes, int flags)
1644 {
1645 	struct thread *td = curthread;
1646 	struct proc *p = td->td_proc;
1647 	struct vm_object *obj;
1648 	struct socket *so;
1649 	struct file *fp;
1650 	struct mbuf *m, *mp;
1651 	struct sf_buf *sf;
1652 	struct vm_page *pg;
1653 	off_t off, xfsize, xbytes;
1654 	off_t hbytes = 0;
1655 	int error = 0;
1656 
1657 	if (vp->v_type != VREG) {
1658 		error = EINVAL;
1659 		goto done0;
1660 	}
1661 	if ((obj = vp->v_object) == NULL) {
1662 		error = EINVAL;
1663 		goto done0;
1664 	}
1665 	error = holdsock(p->p_fd, sfd, &fp);
1666 	if (error)
1667 		goto done0;
1668 	so = (struct socket *)fp->f_data;
1669 	if (so->so_type != SOCK_STREAM) {
1670 		error = EINVAL;
1671 		goto done;
1672 	}
1673 	if ((so->so_state & SS_ISCONNECTED) == 0) {
1674 		error = ENOTCONN;
1675 		goto done;
1676 	}
1677 	if (offset < 0) {
1678 		error = EINVAL;
1679 		goto done;
1680 	}
1681 
1682 	/*
1683 	 * preallocation is required for asynchronous passing of mbufs,
1684 	 * otherwise we can wind up building up an infinite number of
1685 	 * mbufs during the asynchronous latency.
1686 	 */
1687 	if ((so->so_snd.ssb_flags & (SSB_PREALLOC | SSB_STOPSUPP)) == 0) {
1688 		error = EINVAL;
1689 		goto done;
1690 	}
1691 
1692 	*sbytes = 0;
1693 	xbytes = 0;
1694 	/*
1695 	 * Protect against multiple writers to the socket.
1696 	 */
1697 	ssb_lock(&so->so_snd, M_WAITOK);
1698 
1699 	/*
1700 	 * Loop through the pages in the file, starting with the requested
1701 	 * offset. Get a file page (do I/O if necessary), map the file page
1702 	 * into an sf_buf, attach an mbuf header to the sf_buf, and queue
1703 	 * it on the socket.
1704 	 */
1705 	for (off = offset; ; off += xfsize, *sbytes += xfsize + hbytes, xbytes += xfsize) {
1706 		vm_pindex_t pindex;
1707 		vm_offset_t pgoff;
1708 		long space;
1709 
1710 		pindex = OFF_TO_IDX(off);
1711 retry_lookup:
1712 		/*
1713 		 * Calculate the amount to transfer. Not to exceed a page,
1714 		 * the EOF, or the passed in nbytes.
1715 		 */
1716 		xfsize = vp->v_filesize - off;
1717 		if (xfsize > PAGE_SIZE)
1718 			xfsize = PAGE_SIZE;
1719 		pgoff = (vm_offset_t)(off & PAGE_MASK);
1720 		if (PAGE_SIZE - pgoff < xfsize)
1721 			xfsize = PAGE_SIZE - pgoff;
1722 		if (nbytes && xfsize > (nbytes - xbytes))
1723 			xfsize = nbytes - xbytes;
1724 		if (xfsize <= 0)
1725 			break;
1726 		/*
1727 		 * Optimize the non-blocking case by looking at the socket space
1728 		 * before going to the extra work of constituting the sf_buf.
1729 		 */
1730 		if (so->so_snd.ssb_flags & SSB_PREALLOC)
1731 			space = ssb_space_prealloc(&so->so_snd);
1732 		else
1733 			space = ssb_space(&so->so_snd);
1734 
1735 		if ((fp->f_flag & FNONBLOCK) && space <= 0) {
1736 			if (so->so_state & SS_CANTSENDMORE)
1737 				error = EPIPE;
1738 			else
1739 				error = EAGAIN;
1740 			ssb_unlock(&so->so_snd);
1741 			goto done;
1742 		}
1743 		/*
1744 		 * Attempt to look up the page.
1745 		 *
1746 		 * Allocate if not found, wait and loop if busy, then hold the page.
1747 		 * We hold rather than wire the page because we do not want to prevent
1748 		 * filesystem truncation operations from occuring on the file.  This
1749 		 * can happen even under normal operation if the file being sent is
1750 		 * remove()d after the sendfile() call completes, because the socket buffer
1751 		 * may still be draining.  tmpfs will crash if we try to use wire.
1752 		 */
1753 		vm_object_hold(obj);
1754 		pg = vm_page_lookup_busy_try(obj, pindex, TRUE, &error);
1755 		if (error) {
1756 			vm_page_sleep_busy(pg, TRUE, "sfpbsy");
1757 			vm_object_drop(obj);
1758 			goto retry_lookup;
1759 		}
1760 		if (pg == NULL) {
1761 			pg = vm_page_alloc(obj, pindex, VM_ALLOC_NORMAL |
1762 							VM_ALLOC_NULL_OK);
1763 			if (pg == NULL) {
1764 				vm_wait(0);
1765 				vm_object_drop(obj);
1766 				goto retry_lookup;
1767 			}
1768 		}
1769 		vm_page_hold(pg);
1770 		vm_object_drop(obj);
1771 
1772 		/*
1773 		 * If page is not valid for what we need, initiate I/O
1774 		 */
1775 
1776 		if (!pg->valid || !vm_page_is_valid(pg, pgoff, xfsize)) {
1777 			struct uio auio;
1778 			struct iovec aiov;
1779 			int bsize;
1780 
1781 			/*
1782 			 * Ensure that our page is still around when the I/O
1783 			 * completes.
1784 			 *
1785 			 * Ensure that our page is not modified while part of
1786 			 * a mbuf as this could mess up tcp checksums, DMA,
1787 			 * etc (XXX NEEDS WORK).  The softbusy is supposed to
1788 			 * help here but it actually doesn't.
1789 			 *
1790 			 * XXX THIS HAS MULTIPLE PROBLEMS.  The underlying
1791 			 *     VM pages are not protected by the soft-busy
1792 			 *     unless we vm_page_protect... READ them, and
1793 			 *     they STILL aren't protected against
1794 			 *     modification via the buffer cache (VOP_WRITE).
1795 			 *
1796 			 *     Fixing the second issue is particularly
1797 			 *     difficult.
1798 			 *
1799 			 * XXX We also can't soft-busy anyway because it can
1800 			 *     deadlock against the syncer doing a vfs_msync(),
1801 			 *     vfs_msync->vmntvnodesca->vfs_msync_scan2->
1802 			 *     vm_object_page_clean->(scan)-> ... page
1803 			 *     busy-wait.
1804 			 */
1805 			/*vm_page_io_start(pg);*/
1806 			vm_page_wakeup(pg);
1807 
1808 			/*
1809 			 * Get the page from backing store.
1810 			 */
1811 			bsize = vp->v_mount->mnt_stat.f_iosize;
1812 			auio.uio_iov = &aiov;
1813 			auio.uio_iovcnt = 1;
1814 			aiov.iov_base = 0;
1815 			aiov.iov_len = MAXBSIZE;
1816 			auio.uio_resid = MAXBSIZE;
1817 			auio.uio_offset = trunc_page(off);
1818 			auio.uio_segflg = UIO_NOCOPY;
1819 			auio.uio_rw = UIO_READ;
1820 			auio.uio_td = td;
1821 			vn_lock(vp, LK_SHARED | LK_RETRY);
1822 			error = VOP_READ(vp, &auio,
1823 				    IO_VMIO | ((MAXBSIZE / bsize) << 16),
1824 				    td->td_ucred);
1825 			vn_unlock(vp);
1826 			vm_page_flag_clear(pg, PG_ZERO);
1827 			vm_page_busy_wait(pg, FALSE, "sockpg");
1828 			/*vm_page_io_finish(pg);*/
1829 			if (error) {
1830 				vm_page_wakeup(pg);
1831 				vm_page_unhold(pg);
1832 				/* vm_page_try_to_free(pg); */
1833 				ssb_unlock(&so->so_snd);
1834 				goto done;
1835 			}
1836 		}
1837 
1838 
1839 		/*
1840 		 * Get a sendfile buf. We usually wait as long as necessary,
1841 		 * but this wait can be interrupted.
1842 		 */
1843 		if ((sf = sf_buf_alloc(pg)) == NULL) {
1844 			vm_page_wakeup(pg);
1845 			vm_page_unhold(pg);
1846 			/* vm_page_try_to_free(pg); */
1847 			ssb_unlock(&so->so_snd);
1848 			error = EINTR;
1849 			goto done;
1850 		}
1851 
1852 		/*
1853 		 * Get an mbuf header and set it up as having external storage.
1854 		 */
1855 		MGETHDR(m, M_WAITOK, MT_DATA);
1856 		if (m == NULL) {
1857 			error = ENOBUFS;
1858 			vm_page_wakeup(pg);
1859 			vm_page_unhold(pg);
1860 			/* vm_page_try_to_free(pg); */
1861 			sf_buf_free(sf);
1862 			ssb_unlock(&so->so_snd);
1863 			goto done;
1864 		}
1865 
1866 		vm_page_wakeup(pg);
1867 
1868 		m->m_ext.ext_free = sf_buf_mfree;
1869 		m->m_ext.ext_ref = sf_buf_ref;
1870 		m->m_ext.ext_arg = sf;
1871 		m->m_ext.ext_buf = (void *)sf_buf_kva(sf);
1872 		m->m_ext.ext_size = PAGE_SIZE;
1873 		m->m_data = (char *)sf_buf_kva(sf) + pgoff;
1874 		m->m_flags |= M_EXT;
1875 		m->m_pkthdr.len = m->m_len = xfsize;
1876 		KKASSERT((m->m_flags & (M_EXT_CLUSTER)) == 0);
1877 
1878 		if (mheader != NULL) {
1879 			hbytes = mheader->m_pkthdr.len;
1880 			mheader->m_pkthdr.len += m->m_pkthdr.len;
1881 			m_cat(mheader, m);
1882 			m = mheader;
1883 			mheader = NULL;
1884 		} else
1885 			hbytes = 0;
1886 
1887 		/*
1888 		 * Add the buffer to the socket buffer chain.
1889 		 */
1890 		crit_enter();
1891 retry_space:
1892 		/*
1893 		 * Make sure that the socket is still able to take more data.
1894 		 * CANTSENDMORE being true usually means that the connection
1895 		 * was closed. so_error is true when an error was sensed after
1896 		 * a previous send.
1897 		 * The state is checked after the page mapping and buffer
1898 		 * allocation above since those operations may block and make
1899 		 * any socket checks stale. From this point forward, nothing
1900 		 * blocks before the pru_send (or more accurately, any blocking
1901 		 * results in a loop back to here to re-check).
1902 		 */
1903 		if ((so->so_state & SS_CANTSENDMORE) || so->so_error) {
1904 			if (so->so_state & SS_CANTSENDMORE) {
1905 				error = EPIPE;
1906 			} else {
1907 				error = so->so_error;
1908 				so->so_error = 0;
1909 			}
1910 			m_freem(m);
1911 			ssb_unlock(&so->so_snd);
1912 			crit_exit();
1913 			goto done;
1914 		}
1915 		/*
1916 		 * Wait for socket space to become available. We do this just
1917 		 * after checking the connection state above in order to avoid
1918 		 * a race condition with ssb_wait().
1919 		 */
1920 		if (so->so_snd.ssb_flags & SSB_PREALLOC)
1921 			space = ssb_space_prealloc(&so->so_snd);
1922 		else
1923 			space = ssb_space(&so->so_snd);
1924 
1925 		if (space < m->m_pkthdr.len && space < so->so_snd.ssb_lowat) {
1926 			if (fp->f_flag & FNONBLOCK) {
1927 				m_freem(m);
1928 				ssb_unlock(&so->so_snd);
1929 				crit_exit();
1930 				error = EAGAIN;
1931 				goto done;
1932 			}
1933 			error = ssb_wait(&so->so_snd);
1934 			/*
1935 			 * An error from ssb_wait usually indicates that we've
1936 			 * been interrupted by a signal. If we've sent anything
1937 			 * then return bytes sent, otherwise return the error.
1938 			 */
1939 			if (error) {
1940 				m_freem(m);
1941 				ssb_unlock(&so->so_snd);
1942 				crit_exit();
1943 				goto done;
1944 			}
1945 			goto retry_space;
1946 		}
1947 
1948 		if (so->so_snd.ssb_flags & SSB_PREALLOC) {
1949 			for (mp = m; mp != NULL; mp = mp->m_next)
1950 				ssb_preallocstream(&so->so_snd, mp);
1951 		}
1952 		if (use_sendfile_async)
1953 			error = so_pru_senda(so, 0, m, NULL, NULL, td);
1954 		else
1955 			error = so_pru_send(so, 0, m, NULL, NULL, td);
1956 
1957 		crit_exit();
1958 		if (error) {
1959 			ssb_unlock(&so->so_snd);
1960 			goto done;
1961 		}
1962 	}
1963 	if (mheader != NULL) {
1964 		*sbytes += mheader->m_pkthdr.len;
1965 
1966 		if (so->so_snd.ssb_flags & SSB_PREALLOC) {
1967 			for (mp = mheader; mp != NULL; mp = mp->m_next)
1968 				ssb_preallocstream(&so->so_snd, mp);
1969 		}
1970 		if (use_sendfile_async)
1971 			error = so_pru_senda(so, 0, mheader, NULL, NULL, td);
1972 		else
1973 			error = so_pru_send(so, 0, mheader, NULL, NULL, td);
1974 
1975 		mheader = NULL;
1976 	}
1977 	ssb_unlock(&so->so_snd);
1978 
1979 done:
1980 	fdrop(fp);
1981 done0:
1982 	if (mheader != NULL)
1983 		m_freem(mheader);
1984 	return (error);
1985 }
1986