xref: /dragonfly/sys/kern/uipc_syscalls.c (revision 3f5e28f4)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1990, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * sendfile(2) and related extensions:
6  * Copyright (c) 1998, David Greenman. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)uipc_syscalls.c	8.4 (Berkeley) 2/21/94
37  * $FreeBSD: src/sys/kern/uipc_syscalls.c,v 1.65.2.17 2003/04/04 17:11:16 tegge Exp $
38  * $DragonFly: src/sys/kern/uipc_syscalls.c,v 1.80 2007/04/22 01:13:10 dillon Exp $
39  */
40 
41 #include "opt_ktrace.h"
42 #include "opt_sctp.h"
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/sysproto.h>
48 #include <sys/malloc.h>
49 #include <sys/filedesc.h>
50 #include <sys/event.h>
51 #include <sys/proc.h>
52 #include <sys/fcntl.h>
53 #include <sys/file.h>
54 #include <sys/filio.h>
55 #include <sys/kern_syscall.h>
56 #include <sys/mbuf.h>
57 #include <sys/protosw.h>
58 #include <sys/sfbuf.h>
59 #include <sys/socket.h>
60 #include <sys/socketvar.h>
61 #include <sys/socketops.h>
62 #include <sys/uio.h>
63 #include <sys/vnode.h>
64 #include <sys/lock.h>
65 #include <sys/mount.h>
66 #ifdef KTRACE
67 #include <sys/ktrace.h>
68 #endif
69 #include <vm/vm.h>
70 #include <vm/vm_object.h>
71 #include <vm/vm_page.h>
72 #include <vm/vm_pageout.h>
73 #include <vm/vm_kern.h>
74 #include <vm/vm_extern.h>
75 #include <sys/file2.h>
76 #include <sys/signalvar.h>
77 #include <sys/serialize.h>
78 
79 #include <sys/thread2.h>
80 #include <sys/msgport2.h>
81 
82 #ifdef SCTP
83 #include <netinet/sctp_peeloff.h>
84 #endif /* SCTP */
85 
86 struct sfbuf_mref {
87 	struct sf_buf	*sf;
88 	int		mref_count;
89 	struct lwkt_serialize serializer;
90 };
91 
92 static MALLOC_DEFINE(M_SENDFILE, "sendfile", "sendfile sfbuf ref structures");
93 
94 /*
95  * System call interface to the socket abstraction.
96  */
97 
98 extern	struct fileops socketops;
99 
100 /*
101  * socket_args(int domain, int type, int protocol)
102  */
103 int
104 kern_socket(int domain, int type, int protocol, int *res)
105 {
106 	struct thread *td = curthread;
107 	struct proc *p = td->td_proc;
108 	struct socket *so;
109 	struct file *fp;
110 	int fd, error;
111 
112 	KKASSERT(p);
113 
114 	error = falloc(p, &fp, &fd);
115 	if (error)
116 		return (error);
117 	error = socreate(domain, &so, type, protocol, td);
118 	if (error) {
119 		fsetfd(p, NULL, fd);
120 	} else {
121 		fp->f_type = DTYPE_SOCKET;
122 		fp->f_flag = FREAD | FWRITE;
123 		fp->f_ops = &socketops;
124 		fp->f_data = so;
125 		*res = fd;
126 		fsetfd(p, fp, fd);
127 	}
128 	fdrop(fp);
129 	return (error);
130 }
131 
132 int
133 sys_socket(struct socket_args *uap)
134 {
135 	int error;
136 
137 	error = kern_socket(uap->domain, uap->type, uap->protocol,
138 	    &uap->sysmsg_result);
139 
140 	return (error);
141 }
142 
143 int
144 kern_bind(int s, struct sockaddr *sa)
145 {
146 	struct thread *td = curthread;
147 	struct proc *p = td->td_proc;
148 	struct file *fp;
149 	int error;
150 
151 	KKASSERT(p);
152 	error = holdsock(p->p_fd, s, &fp);
153 	if (error)
154 		return (error);
155 	error = sobind((struct socket *)fp->f_data, sa, td);
156 	fdrop(fp);
157 	return (error);
158 }
159 
160 /*
161  * bind_args(int s, caddr_t name, int namelen)
162  */
163 int
164 sys_bind(struct bind_args *uap)
165 {
166 	struct sockaddr *sa;
167 	int error;
168 
169 	error = getsockaddr(&sa, uap->name, uap->namelen);
170 	if (error)
171 		return (error);
172 	error = kern_bind(uap->s, sa);
173 	FREE(sa, M_SONAME);
174 
175 	return (error);
176 }
177 
178 int
179 kern_listen(int s, int backlog)
180 {
181 	struct thread *td = curthread;
182 	struct proc *p = td->td_proc;
183 	struct file *fp;
184 	int error;
185 
186 	KKASSERT(p);
187 	error = holdsock(p->p_fd, s, &fp);
188 	if (error)
189 		return (error);
190 	error = solisten((struct socket *)fp->f_data, backlog, td);
191 	fdrop(fp);
192 	return(error);
193 }
194 
195 /*
196  * listen_args(int s, int backlog)
197  */
198 int
199 sys_listen(struct listen_args *uap)
200 {
201 	int error;
202 
203 	error = kern_listen(uap->s, uap->backlog);
204 	return (error);
205 }
206 
207 /*
208  * Returns the accepted socket as well.
209  */
210 static boolean_t
211 soaccept_predicate(struct netmsg *msg0)
212 {
213 	struct netmsg_so_notify *msg = (struct netmsg_so_notify *)msg0;
214 	struct socket *head = msg->nm_so;
215 
216 	if (head->so_error != 0) {
217 		msg->nm_lmsg.ms_error = head->so_error;
218 		return (TRUE);
219 	}
220 	if (!TAILQ_EMPTY(&head->so_comp)) {
221 		/* Abuse nm_so field as copy in/copy out parameter. XXX JH */
222 		msg->nm_so = TAILQ_FIRST(&head->so_comp);
223 		TAILQ_REMOVE(&head->so_comp, msg->nm_so, so_list);
224 		head->so_qlen--;
225 
226 		msg->nm_lmsg.ms_error = 0;
227 		return (TRUE);
228 	}
229 	if (head->so_state & SS_CANTRCVMORE) {
230 		msg->nm_lmsg.ms_error = ECONNABORTED;
231 		return (TRUE);
232 	}
233 	if (msg->nm_fflags & FNONBLOCK) {
234 		msg->nm_lmsg.ms_error = EWOULDBLOCK;
235 		return (TRUE);
236 	}
237 
238 	return (FALSE);
239 }
240 
241 /*
242  * The second argument to kern_accept() is a handle to a struct sockaddr.
243  * This allows kern_accept() to return a pointer to an allocated struct
244  * sockaddr which must be freed later with FREE().  The caller must
245  * initialize *name to NULL.
246  */
247 int
248 kern_accept(int s, int fflags, struct sockaddr **name, int *namelen, int *res)
249 {
250 	struct thread *td = curthread;
251 	struct proc *p = td->td_proc;
252 	struct file *lfp = NULL;
253 	struct file *nfp = NULL;
254 	struct sockaddr *sa;
255 	struct socket *head, *so;
256 	struct netmsg_so_notify msg;
257 	lwkt_port_t port;
258 	int fd;
259 	u_int fflag;		/* type must match fp->f_flag */
260 	int error, tmp;
261 
262 	*res = -1;
263 	if (name && namelen && *namelen < 0)
264 		return (EINVAL);
265 
266 	error = holdsock(p->p_fd, s, &lfp);
267 	if (error)
268 		return (error);
269 
270 	error = falloc(p, &nfp, &fd);
271 	if (error) {		/* Probably ran out of file descriptors. */
272 		fdrop(lfp);
273 		return (error);
274 	}
275 	head = (struct socket *)lfp->f_data;
276 	if ((head->so_options & SO_ACCEPTCONN) == 0) {
277 		error = EINVAL;
278 		goto done;
279 	}
280 
281 	if (fflags & O_FBLOCKING)
282 		fflags |= lfp->f_flag & ~FNONBLOCK;
283 	else if (fflags & O_FNONBLOCKING)
284 		fflags |= lfp->f_flag | FNONBLOCK;
285 	else
286 		fflags = lfp->f_flag;
287 
288 	/* optimize for uniprocessor case later XXX JH */
289 	port = head->so_proto->pr_mport(head, NULL, PRU_PRED);
290 	lwkt_initmsg(&msg.nm_lmsg, &curthread->td_msgport,
291 		     MSGF_PCATCH | MSGF_ABORTABLE,
292 		     lwkt_cmd_func(netmsg_so_notify),
293 		     lwkt_cmd_func(netmsg_so_notify_abort));
294 	msg.nm_predicate = soaccept_predicate;
295 	msg.nm_fflags = fflags;
296 	msg.nm_so = head;
297 	msg.nm_etype = NM_REVENT;
298 	error = lwkt_domsg(port, &msg.nm_lmsg);
299 	if (error)
300 		goto done;
301 
302 	/*
303 	 * At this point we have the connection that's ready to be accepted.
304 	 */
305 	so = msg.nm_so;
306 
307 	fflag = lfp->f_flag;
308 
309 	/* connection has been removed from the listen queue */
310 	KNOTE(&head->so_rcv.ssb_sel.si_note, 0);
311 
312 	so->so_state &= ~SS_COMP;
313 	so->so_head = NULL;
314 	if (head->so_sigio != NULL)
315 		fsetown(fgetown(head->so_sigio), &so->so_sigio);
316 
317 	nfp->f_type = DTYPE_SOCKET;
318 	nfp->f_flag = fflag;
319 	nfp->f_ops = &socketops;
320 	nfp->f_data = so;
321 	/* Sync socket nonblocking/async state with file flags */
322 	tmp = fflag & FNONBLOCK;
323 	(void) fo_ioctl(nfp, FIONBIO, (caddr_t)&tmp, p->p_ucred);
324 	tmp = fflag & FASYNC;
325 	(void) fo_ioctl(nfp, FIOASYNC, (caddr_t)&tmp, p->p_ucred);
326 
327 	sa = NULL;
328 	error = soaccept(so, &sa);
329 
330 	/*
331 	 * Set the returned name and namelen as applicable.  Set the returned
332 	 * namelen to 0 for older code which might ignore the return value
333 	 * from accept.
334 	 */
335 	if (error == 0) {
336 		if (sa && name && namelen) {
337 			if (*namelen > sa->sa_len)
338 				*namelen = sa->sa_len;
339 			*name = sa;
340 		} else {
341 			if (sa)
342 				FREE(sa, M_SONAME);
343 		}
344 	}
345 
346 done:
347 	/*
348 	 * If an error occured clear the reserved descriptor, else associate
349 	 * nfp with it.
350 	 *
351 	 * Note that *res is normally ignored if an error is returned but
352 	 * a syscall message will still have access to the result code.
353 	 */
354 	if (error) {
355 		fsetfd(p, NULL, fd);
356 	} else {
357 		*res = fd;
358 		fsetfd(p, nfp, fd);
359 	}
360 	fdrop(nfp);
361 	fdrop(lfp);
362 	return (error);
363 }
364 
365 /*
366  * accept(int s, caddr_t name, int *anamelen)
367  */
368 int
369 sys_accept(struct accept_args *uap)
370 {
371 	struct sockaddr *sa = NULL;
372 	int sa_len;
373 	int error;
374 
375 	if (uap->name) {
376 		error = copyin(uap->anamelen, &sa_len, sizeof(sa_len));
377 		if (error)
378 			return (error);
379 
380 		error = kern_accept(uap->s, 0, &sa, &sa_len, &uap->sysmsg_result);
381 
382 		if (error == 0)
383 			error = copyout(sa, uap->name, sa_len);
384 		if (error == 0) {
385 			error = copyout(&sa_len, uap->anamelen,
386 			    sizeof(*uap->anamelen));
387 		}
388 		if (sa)
389 			FREE(sa, M_SONAME);
390 	} else {
391 		error = kern_accept(uap->s, 0, NULL, 0, &uap->sysmsg_result);
392 	}
393 	return (error);
394 }
395 
396 /*
397  * extaccept(int s, int fflags, caddr_t name, int *anamelen)
398  */
399 int
400 sys_extaccept(struct extaccept_args *uap)
401 {
402 	struct sockaddr *sa = NULL;
403 	int sa_len;
404 	int error;
405 	int fflags = uap->flags & O_FMASK;
406 
407 	if (uap->name) {
408 		error = copyin(uap->anamelen, &sa_len, sizeof(sa_len));
409 		if (error)
410 			return (error);
411 
412 		error = kern_accept(uap->s, fflags, &sa, &sa_len, &uap->sysmsg_result);
413 
414 		if (error == 0)
415 			error = copyout(sa, uap->name, sa_len);
416 		if (error == 0) {
417 			error = copyout(&sa_len, uap->anamelen,
418 			    sizeof(*uap->anamelen));
419 		}
420 		if (sa)
421 			FREE(sa, M_SONAME);
422 	} else {
423 		error = kern_accept(uap->s, fflags, NULL, 0, &uap->sysmsg_result);
424 	}
425 	return (error);
426 }
427 
428 
429 /*
430  * Returns TRUE if predicate satisfied.
431  */
432 static boolean_t
433 soconnected_predicate(struct netmsg *msg0)
434 {
435 	struct netmsg_so_notify *msg = (struct netmsg_so_notify *)msg0;
436 	struct socket *so = msg->nm_so;
437 
438 	/* check predicate */
439 	if (!(so->so_state & SS_ISCONNECTING) || so->so_error != 0) {
440 		msg->nm_lmsg.ms_error = so->so_error;
441 		return (TRUE);
442 	}
443 
444 	return (FALSE);
445 }
446 
447 int
448 kern_connect(int s, int fflags, struct sockaddr *sa)
449 {
450 	struct thread *td = curthread;
451 	struct proc *p = td->td_proc;
452 	struct file *fp;
453 	struct socket *so;
454 	int error;
455 
456 	error = holdsock(p->p_fd, s, &fp);
457 	if (error)
458 		return (error);
459 	so = (struct socket *)fp->f_data;
460 
461 	if (fflags & O_FBLOCKING)
462 		/* fflags &= ~FNONBLOCK; */;
463 	else if (fflags & O_FNONBLOCKING)
464 		fflags |= FNONBLOCK;
465 	else
466 		fflags = fp->f_flag;
467 
468 	if ((fflags & FNONBLOCK) && (so->so_state & SS_ISCONNECTING)) {
469 		error = EALREADY;
470 		goto done;
471 	}
472 	error = soconnect(so, sa, td);
473 	if (error)
474 		goto bad;
475 	if ((fflags & FNONBLOCK) && (so->so_state & SS_ISCONNECTING)) {
476 		error = EINPROGRESS;
477 		goto done;
478 	}
479 	if ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
480 		struct netmsg_so_notify msg;
481 		lwkt_port_t port;
482 
483 		port = so->so_proto->pr_mport(so, sa, PRU_PRED);
484 		lwkt_initmsg(&msg.nm_lmsg,
485 			    &curthread->td_msgport,
486 			    MSGF_PCATCH | MSGF_ABORTABLE,
487 			    lwkt_cmd_func(netmsg_so_notify),
488 			    lwkt_cmd_func(netmsg_so_notify_abort));
489 		msg.nm_predicate = soconnected_predicate;
490 		msg.nm_so = so;
491 		msg.nm_etype = NM_REVENT;
492 		error = lwkt_domsg(port, &msg.nm_lmsg);
493 	}
494 	if (error == 0) {
495 		error = so->so_error;
496 		so->so_error = 0;
497 	}
498 bad:
499 	so->so_state &= ~SS_ISCONNECTING;
500 	if (error == ERESTART)
501 		error = EINTR;
502 done:
503 	fdrop(fp);
504 	return (error);
505 }
506 
507 /*
508  * connect_args(int s, caddr_t name, int namelen)
509  */
510 int
511 sys_connect(struct connect_args *uap)
512 {
513 	struct sockaddr *sa;
514 	int error;
515 
516 	error = getsockaddr(&sa, uap->name, uap->namelen);
517 	if (error)
518 		return (error);
519 	error = kern_connect(uap->s, 0, sa);
520 	FREE(sa, M_SONAME);
521 
522 	return (error);
523 }
524 
525 /*
526  * connect_args(int s, int fflags, caddr_t name, int namelen)
527  */
528 int
529 sys_extconnect(struct extconnect_args *uap)
530 {
531 	struct sockaddr *sa;
532 	int error;
533 	int fflags = uap->flags & O_FMASK;
534 
535 	error = getsockaddr(&sa, uap->name, uap->namelen);
536 	if (error)
537 		return (error);
538 	error = kern_connect(uap->s, fflags, sa);
539 	FREE(sa, M_SONAME);
540 
541 	return (error);
542 }
543 
544 int
545 kern_socketpair(int domain, int type, int protocol, int *sv)
546 {
547 	struct thread *td = curthread;
548 	struct proc *p = td->td_proc;
549 	struct file *fp1, *fp2;
550 	struct socket *so1, *so2;
551 	int fd1, fd2, error;
552 
553 	KKASSERT(p);
554 	error = socreate(domain, &so1, type, protocol, td);
555 	if (error)
556 		return (error);
557 	error = socreate(domain, &so2, type, protocol, td);
558 	if (error)
559 		goto free1;
560 	error = falloc(p, &fp1, &fd1);
561 	if (error)
562 		goto free2;
563 	sv[0] = fd1;
564 	fp1->f_data = so1;
565 	error = falloc(p, &fp2, &fd2);
566 	if (error)
567 		goto free3;
568 	fp2->f_data = so2;
569 	sv[1] = fd2;
570 	error = soconnect2(so1, so2);
571 	if (error)
572 		goto free4;
573 	if (type == SOCK_DGRAM) {
574 		/*
575 		 * Datagram socket connection is asymmetric.
576 		 */
577 		 error = soconnect2(so2, so1);
578 		 if (error)
579 			goto free4;
580 	}
581 	fp1->f_type = fp2->f_type = DTYPE_SOCKET;
582 	fp1->f_flag = fp2->f_flag = FREAD|FWRITE;
583 	fp1->f_ops = fp2->f_ops = &socketops;
584 	fsetfd(p, fp1, fd1);
585 	fsetfd(p, fp2, fd2);
586 	fdrop(fp1);
587 	fdrop(fp2);
588 	return (error);
589 free4:
590 	fsetfd(p, NULL, fd2);
591 	fdrop(fp2);
592 free3:
593 	fsetfd(p, NULL, fd1);
594 	fdrop(fp1);
595 free2:
596 	(void)soclose(so2, 0);
597 free1:
598 	(void)soclose(so1, 0);
599 	return (error);
600 }
601 
602 /*
603  * socketpair(int domain, int type, int protocol, int *rsv)
604  */
605 int
606 sys_socketpair(struct socketpair_args *uap)
607 {
608 	int error, sockv[2];
609 
610 	error = kern_socketpair(uap->domain, uap->type, uap->protocol, sockv);
611 
612 	if (error == 0)
613 		error = copyout(sockv, uap->rsv, sizeof(sockv));
614 	return (error);
615 }
616 
617 int
618 kern_sendmsg(int s, struct sockaddr *sa, struct uio *auio,
619     struct mbuf *control, int flags, int *res)
620 {
621 	struct thread *td = curthread;
622 	struct lwp *lp = td->td_lwp;
623 	struct proc *p = td->td_proc;
624 	struct file *fp;
625 	int len, error;
626 	struct socket *so;
627 #ifdef KTRACE
628 	struct iovec *ktriov = NULL;
629 	struct uio ktruio;
630 #endif
631 
632 	error = holdsock(p->p_fd, s, &fp);
633 	if (error)
634 		return (error);
635 	if (auio->uio_resid < 0) {
636 		error = EINVAL;
637 		goto done;
638 	}
639 #ifdef KTRACE
640 	if (KTRPOINT(td, KTR_GENIO)) {
641 		int iovlen = auio->uio_iovcnt * sizeof (struct iovec);
642 
643 		MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
644 		bcopy((caddr_t)auio->uio_iov, (caddr_t)ktriov, iovlen);
645 		ktruio = *auio;
646 	}
647 #endif
648 	len = auio->uio_resid;
649 	so = (struct socket *)fp->f_data;
650 	if ((flags & (MSG_FNONBLOCKING|MSG_FBLOCKING)) == 0) {
651 		if (fp->f_flag & FNONBLOCK)
652 			flags |= MSG_FNONBLOCKING;
653 	}
654 	error = so_pru_sosend(so, sa, auio, NULL, control, flags, td);
655 	if (error) {
656 		if (auio->uio_resid != len && (error == ERESTART ||
657 		    error == EINTR || error == EWOULDBLOCK))
658 			error = 0;
659 		if (error == EPIPE)
660 			lwpsignal(p, lp, SIGPIPE);
661 	}
662 #ifdef KTRACE
663 	if (ktriov != NULL) {
664 		if (error == 0) {
665 			ktruio.uio_iov = ktriov;
666 			ktruio.uio_resid = len - auio->uio_resid;
667 			ktrgenio(p, s, UIO_WRITE, &ktruio, error);
668 		}
669 		FREE(ktriov, M_TEMP);
670 	}
671 #endif
672 	if (error == 0)
673 		*res  = len - auio->uio_resid;
674 done:
675 	fdrop(fp);
676 	return (error);
677 }
678 
679 /*
680  * sendto_args(int s, caddr_t buf, size_t len, int flags, caddr_t to, int tolen)
681  */
682 int
683 sys_sendto(struct sendto_args *uap)
684 {
685 	struct thread *td = curthread;
686 	struct uio auio;
687 	struct iovec aiov;
688 	struct sockaddr *sa = NULL;
689 	int error;
690 
691 	if (uap->to) {
692 		error = getsockaddr(&sa, uap->to, uap->tolen);
693 		if (error)
694 			return (error);
695 	}
696 	aiov.iov_base = uap->buf;
697 	aiov.iov_len = uap->len;
698 	auio.uio_iov = &aiov;
699 	auio.uio_iovcnt = 1;
700 	auio.uio_offset = 0;
701 	auio.uio_resid = uap->len;
702 	auio.uio_segflg = UIO_USERSPACE;
703 	auio.uio_rw = UIO_WRITE;
704 	auio.uio_td = td;
705 
706 	error = kern_sendmsg(uap->s, sa, &auio, NULL, uap->flags,
707 	    &uap->sysmsg_result);
708 
709 	if (sa)
710 		FREE(sa, M_SONAME);
711 	return (error);
712 }
713 
714 /*
715  * sendmsg_args(int s, caddr_t msg, int flags)
716  */
717 int
718 sys_sendmsg(struct sendmsg_args *uap)
719 {
720 	struct thread *td = curthread;
721 	struct msghdr msg;
722 	struct uio auio;
723 	struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
724 	struct sockaddr *sa = NULL;
725 	struct mbuf *control = NULL;
726 	int error;
727 
728 	error = copyin(uap->msg, (caddr_t)&msg, sizeof(msg));
729 	if (error)
730 		return (error);
731 
732 	/*
733 	 * Conditionally copyin msg.msg_name.
734 	 */
735 	if (msg.msg_name) {
736 		error = getsockaddr(&sa, msg.msg_name, msg.msg_namelen);
737 		if (error)
738 			return (error);
739 	}
740 
741 	/*
742 	 * Populate auio.
743 	 */
744 	error = iovec_copyin(msg.msg_iov, &iov, aiov, msg.msg_iovlen,
745 			     &auio.uio_resid);
746 	if (error)
747 		goto cleanup2;
748 	auio.uio_iov = iov;
749 	auio.uio_iovcnt = msg.msg_iovlen;
750 	auio.uio_offset = 0;
751 	auio.uio_segflg = UIO_USERSPACE;
752 	auio.uio_rw = UIO_WRITE;
753 	auio.uio_td = td;
754 
755 	/*
756 	 * Conditionally copyin msg.msg_control.
757 	 */
758 	if (msg.msg_control) {
759 		if (msg.msg_controllen < sizeof(struct cmsghdr) ||
760 		    msg.msg_controllen > MLEN) {
761 			error = EINVAL;
762 			goto cleanup;
763 		}
764 		control = m_get(MB_WAIT, MT_CONTROL);
765 		if (control == NULL) {
766 			error = ENOBUFS;
767 			goto cleanup;
768 		}
769 		control->m_len = msg.msg_controllen;
770 		error = copyin(msg.msg_control, mtod(control, caddr_t),
771 		    msg.msg_controllen);
772 		if (error) {
773 			m_free(control);
774 			goto cleanup;
775 		}
776 	}
777 
778 	error = kern_sendmsg(uap->s, sa, &auio, control, uap->flags,
779 	    &uap->sysmsg_result);
780 
781 cleanup:
782 	iovec_free(&iov, aiov);
783 cleanup2:
784 	if (sa)
785 		FREE(sa, M_SONAME);
786 	return (error);
787 }
788 
789 /*
790  * kern_recvmsg() takes a handle to sa and control.  If the handle is non-
791  * null, it returns a dynamically allocated struct sockaddr and an mbuf.
792  * Don't forget to FREE() and m_free() these if they are returned.
793  */
794 int
795 kern_recvmsg(int s, struct sockaddr **sa, struct uio *auio,
796     struct mbuf **control, int *flags, int *res)
797 {
798 	struct thread *td = curthread;
799 	struct proc *p = td->td_proc;
800 	struct file *fp;
801 	int len, error;
802 	int lflags;
803 	struct socket *so;
804 #ifdef KTRACE
805 	struct iovec *ktriov = NULL;
806 	struct uio ktruio;
807 #endif
808 
809 	error = holdsock(p->p_fd, s, &fp);
810 	if (error)
811 		return (error);
812 	if (auio->uio_resid < 0) {
813 		error = EINVAL;
814 		goto done;
815 	}
816 #ifdef KTRACE
817 	if (KTRPOINT(td, KTR_GENIO)) {
818 		int iovlen = auio->uio_iovcnt * sizeof (struct iovec);
819 
820 		MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
821 		bcopy(auio->uio_iov, ktriov, iovlen);
822 		ktruio = *auio;
823 	}
824 #endif
825 	len = auio->uio_resid;
826 	so = (struct socket *)fp->f_data;
827 
828 	if (flags == NULL || (*flags & (MSG_FNONBLOCKING|MSG_FBLOCKING)) == 0) {
829 		if (fp->f_flag & FNONBLOCK) {
830 			if (flags) {
831 				*flags |= MSG_FNONBLOCKING;
832 			} else {
833 				lflags = MSG_FNONBLOCKING;
834 				flags = &lflags;
835 			}
836 		}
837 	}
838 
839 	error = so_pru_soreceive(so, sa, auio, NULL, control, flags);
840 	if (error) {
841 		if (auio->uio_resid != len && (error == ERESTART ||
842 		    error == EINTR || error == EWOULDBLOCK))
843 			error = 0;
844 	}
845 #ifdef KTRACE
846 	if (ktriov != NULL) {
847 		if (error == 0) {
848 			ktruio.uio_iov = ktriov;
849 			ktruio.uio_resid = len - auio->uio_resid;
850 			ktrgenio(p, s, UIO_READ, &ktruio, error);
851 		}
852 		FREE(ktriov, M_TEMP);
853 	}
854 #endif
855 	if (error == 0)
856 		*res = len - auio->uio_resid;
857 done:
858 	fdrop(fp);
859 	return (error);
860 }
861 
862 /*
863  * recvfrom_args(int s, caddr_t buf, size_t len, int flags,
864  *			caddr_t from, int *fromlenaddr)
865  */
866 int
867 sys_recvfrom(struct recvfrom_args *uap)
868 {
869 	struct thread *td = curthread;
870 	struct uio auio;
871 	struct iovec aiov;
872 	struct sockaddr *sa = NULL;
873 	int error, fromlen;
874 
875 	if (uap->from && uap->fromlenaddr) {
876 		error = copyin(uap->fromlenaddr, &fromlen, sizeof(fromlen));
877 		if (error)
878 			return (error);
879 		if (fromlen < 0)
880 			return (EINVAL);
881 	} else {
882 		fromlen = 0;
883 	}
884 	aiov.iov_base = uap->buf;
885 	aiov.iov_len = uap->len;
886 	auio.uio_iov = &aiov;
887 	auio.uio_iovcnt = 1;
888 	auio.uio_offset = 0;
889 	auio.uio_resid = uap->len;
890 	auio.uio_segflg = UIO_USERSPACE;
891 	auio.uio_rw = UIO_READ;
892 	auio.uio_td = td;
893 
894 	error = kern_recvmsg(uap->s, uap->from ? &sa : NULL, &auio, NULL,
895 	    &uap->flags, &uap->sysmsg_result);
896 
897 	if (error == 0 && uap->from) {
898 		/* note: sa may still be NULL */
899 		if (sa) {
900 			fromlen = MIN(fromlen, sa->sa_len);
901 			error = copyout(sa, uap->from, fromlen);
902 		} else {
903 			fromlen = 0;
904 		}
905 		if (error == 0) {
906 			error = copyout(&fromlen, uap->fromlenaddr,
907 					sizeof(fromlen));
908 		}
909 	}
910 	if (sa)
911 		FREE(sa, M_SONAME);
912 
913 	return (error);
914 }
915 
916 /*
917  * recvmsg_args(int s, struct msghdr *msg, int flags)
918  */
919 int
920 sys_recvmsg(struct recvmsg_args *uap)
921 {
922 	struct thread *td = curthread;
923 	struct msghdr msg;
924 	struct uio auio;
925 	struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
926 	struct mbuf *m, *control = NULL;
927 	struct sockaddr *sa = NULL;
928 	caddr_t ctlbuf;
929 	socklen_t *ufromlenp, *ucontrollenp;
930 	int error, fromlen, controllen, len, flags, *uflagsp;
931 
932 	/*
933 	 * This copyin handles everything except the iovec.
934 	 */
935 	error = copyin(uap->msg, &msg, sizeof(msg));
936 	if (error)
937 		return (error);
938 
939 	if (msg.msg_name && msg.msg_namelen < 0)
940 		return (EINVAL);
941 	if (msg.msg_control && msg.msg_controllen < 0)
942 		return (EINVAL);
943 
944 	ufromlenp = (socklen_t *)((caddr_t)uap->msg + offsetof(struct msghdr,
945 	    msg_namelen));
946 	ucontrollenp = (socklen_t *)((caddr_t)uap->msg + offsetof(struct msghdr,
947 	    msg_controllen));
948 	uflagsp = (int *)((caddr_t)uap->msg + offsetof(struct msghdr,
949 	    msg_flags));
950 
951 	/*
952 	 * Populate auio.
953 	 */
954 	error = iovec_copyin(msg.msg_iov, &iov, aiov, msg.msg_iovlen,
955 			     &auio.uio_resid);
956 	if (error)
957 		return (error);
958 	auio.uio_iov = iov;
959 	auio.uio_iovcnt = msg.msg_iovlen;
960 	auio.uio_offset = 0;
961 	auio.uio_segflg = UIO_USERSPACE;
962 	auio.uio_rw = UIO_READ;
963 	auio.uio_td = td;
964 
965 	flags = uap->flags;
966 
967 	error = kern_recvmsg(uap->s, msg.msg_name ? &sa : NULL, &auio,
968 	    msg.msg_control ? &control : NULL, &flags, &uap->sysmsg_result);
969 
970 	/*
971 	 * Conditionally copyout the name and populate the namelen field.
972 	 */
973 	if (error == 0 && msg.msg_name) {
974 		/* note: sa may still be NULL */
975 		if (sa != NULL) {
976 			fromlen = MIN(msg.msg_namelen, sa->sa_len);
977 			error = copyout(sa, msg.msg_name, fromlen);
978 		} else
979 			fromlen = 0;
980 		if (error == 0)
981 			error = copyout(&fromlen, ufromlenp,
982 			    sizeof(*ufromlenp));
983 	}
984 
985 	/*
986 	 * Copyout msg.msg_control and msg.msg_controllen.
987 	 */
988 	if (error == 0 && msg.msg_control) {
989 		len = msg.msg_controllen;
990 		m = control;
991 		ctlbuf = (caddr_t)msg.msg_control;
992 
993 		while(m && len > 0) {
994 			unsigned int tocopy;
995 
996 			if (len >= m->m_len) {
997 				tocopy = m->m_len;
998 			} else {
999 				msg.msg_flags |= MSG_CTRUNC;
1000 				tocopy = len;
1001 			}
1002 
1003 			error = copyout(mtod(m, caddr_t), ctlbuf, tocopy);
1004 			if (error)
1005 				goto cleanup;
1006 
1007 			ctlbuf += tocopy;
1008 			len -= tocopy;
1009 			m = m->m_next;
1010 		}
1011 		controllen = ctlbuf - (caddr_t)msg.msg_control;
1012 		error = copyout(&controllen, ucontrollenp,
1013 		    sizeof(*ucontrollenp));
1014 	}
1015 
1016 	if (error == 0)
1017 		error = copyout(&flags, uflagsp, sizeof(*uflagsp));
1018 
1019 cleanup:
1020 	if (sa)
1021 		FREE(sa, M_SONAME);
1022 	iovec_free(&iov, aiov);
1023 	if (control)
1024 		m_freem(control);
1025 	return (error);
1026 }
1027 
1028 /*
1029  * If sopt->sopt_td == NULL, then sopt->sopt_val is treated as an
1030  * in kernel pointer instead of a userland pointer.  This allows us
1031  * to manipulate socket options in the emulation code.
1032  */
1033 int
1034 kern_setsockopt(int s, struct sockopt *sopt)
1035 {
1036 	struct thread *td = curthread;
1037 	struct proc *p = td->td_proc;
1038 	struct file *fp;
1039 	int error;
1040 
1041 	if (sopt->sopt_val == 0 && sopt->sopt_valsize != 0)
1042 		return (EFAULT);
1043 	if (sopt->sopt_valsize < 0)
1044 		return (EINVAL);
1045 
1046 	error = holdsock(p->p_fd, s, &fp);
1047 	if (error)
1048 		return (error);
1049 
1050 	error = sosetopt((struct socket *)fp->f_data, sopt);
1051 	fdrop(fp);
1052 	return (error);
1053 }
1054 
1055 /*
1056  * setsockopt_args(int s, int level, int name, caddr_t val, int valsize)
1057  */
1058 int
1059 sys_setsockopt(struct setsockopt_args *uap)
1060 {
1061 	struct thread *td = curthread;
1062 	struct sockopt sopt;
1063 	int error;
1064 
1065 	sopt.sopt_level = uap->level;
1066 	sopt.sopt_name = uap->name;
1067 	sopt.sopt_val = uap->val;
1068 	sopt.sopt_valsize = uap->valsize;
1069 	sopt.sopt_td = td;
1070 
1071 	error = kern_setsockopt(uap->s, &sopt);
1072 	return(error);
1073 }
1074 
1075 /*
1076  * If sopt->sopt_td == NULL, then sopt->sopt_val is treated as an
1077  * in kernel pointer instead of a userland pointer.  This allows us
1078  * to manipulate socket options in the emulation code.
1079  */
1080 int
1081 kern_getsockopt(int s, struct sockopt *sopt)
1082 {
1083 	struct thread *td = curthread;
1084 	struct proc *p = td->td_proc;
1085 	struct file *fp;
1086 	int error;
1087 
1088 	if (sopt->sopt_val == 0 && sopt->sopt_valsize != 0)
1089 		return (EFAULT);
1090 	if (sopt->sopt_valsize < 0)
1091 		return (EINVAL);
1092 
1093 	error = holdsock(p->p_fd, s, &fp);
1094 	if (error)
1095 		return (error);
1096 
1097 	error = sogetopt((struct socket *)fp->f_data, sopt);
1098 	fdrop(fp);
1099 	return (error);
1100 }
1101 
1102 /*
1103  * getsockopt_Args(int s, int level, int name, caddr_t val, int *avalsize)
1104  */
1105 int
1106 sys_getsockopt(struct getsockopt_args *uap)
1107 {
1108 	struct thread *td = curthread;
1109 	struct	sockopt sopt;
1110 	int	error, valsize;
1111 
1112 	if (uap->val) {
1113 		error = copyin(uap->avalsize, &valsize, sizeof(valsize));
1114 		if (error)
1115 			return (error);
1116 		if (valsize < 0)
1117 			return (EINVAL);
1118 	} else {
1119 		valsize = 0;
1120 	}
1121 
1122 	sopt.sopt_level = uap->level;
1123 	sopt.sopt_name = uap->name;
1124 	sopt.sopt_val = uap->val;
1125 	sopt.sopt_valsize = valsize;
1126 	sopt.sopt_td = td;
1127 
1128 	error = kern_getsockopt(uap->s, &sopt);
1129 	if (error == 0) {
1130 		valsize = sopt.sopt_valsize;
1131 		error = copyout(&valsize, uap->avalsize, sizeof(valsize));
1132 	}
1133 	return (error);
1134 }
1135 
1136 /*
1137  * The second argument to kern_getsockname() is a handle to a struct sockaddr.
1138  * This allows kern_getsockname() to return a pointer to an allocated struct
1139  * sockaddr which must be freed later with FREE().  The caller must
1140  * initialize *name to NULL.
1141  */
1142 int
1143 kern_getsockname(int s, struct sockaddr **name, int *namelen)
1144 {
1145 	struct thread *td = curthread;
1146 	struct proc *p = td->td_proc;
1147 	struct file *fp;
1148 	struct socket *so;
1149 	struct sockaddr *sa = NULL;
1150 	int error;
1151 
1152 	error = holdsock(p->p_fd, s, &fp);
1153 	if (error)
1154 		return (error);
1155 	if (*namelen < 0) {
1156 		fdrop(fp);
1157 		return (EINVAL);
1158 	}
1159 	so = (struct socket *)fp->f_data;
1160 	error = so_pru_sockaddr(so, &sa);
1161 	if (error == 0) {
1162 		if (sa == 0) {
1163 			*namelen = 0;
1164 		} else {
1165 			*namelen = MIN(*namelen, sa->sa_len);
1166 			*name = sa;
1167 		}
1168 	}
1169 
1170 	fdrop(fp);
1171 	return (error);
1172 }
1173 
1174 /*
1175  * getsockname_args(int fdes, caddr_t asa, int *alen)
1176  *
1177  * Get socket name.
1178  */
1179 int
1180 sys_getsockname(struct getsockname_args *uap)
1181 {
1182 	struct sockaddr *sa = NULL;
1183 	int error, sa_len;
1184 
1185 	error = copyin(uap->alen, &sa_len, sizeof(sa_len));
1186 	if (error)
1187 		return (error);
1188 
1189 	error = kern_getsockname(uap->fdes, &sa, &sa_len);
1190 
1191 	if (error == 0)
1192 		error = copyout(sa, uap->asa, sa_len);
1193 	if (error == 0)
1194 		error = copyout(&sa_len, uap->alen, sizeof(*uap->alen));
1195 	if (sa)
1196 		FREE(sa, M_SONAME);
1197 	return (error);
1198 }
1199 
1200 /*
1201  * The second argument to kern_getpeername() is a handle to a struct sockaddr.
1202  * This allows kern_getpeername() to return a pointer to an allocated struct
1203  * sockaddr which must be freed later with FREE().  The caller must
1204  * initialize *name to NULL.
1205  */
1206 int
1207 kern_getpeername(int s, struct sockaddr **name, int *namelen)
1208 {
1209 	struct thread *td = curthread;
1210 	struct proc *p = td->td_proc;
1211 	struct file *fp;
1212 	struct socket *so;
1213 	struct sockaddr *sa = NULL;
1214 	int error;
1215 
1216 	error = holdsock(p->p_fd, s, &fp);
1217 	if (error)
1218 		return (error);
1219 	if (*namelen < 0) {
1220 		fdrop(fp);
1221 		return (EINVAL);
1222 	}
1223 	so = (struct socket *)fp->f_data;
1224 	if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) {
1225 		fdrop(fp);
1226 		return (ENOTCONN);
1227 	}
1228 	error = so_pru_peeraddr(so, &sa);
1229 	if (error == 0) {
1230 		if (sa == 0) {
1231 			*namelen = 0;
1232 		} else {
1233 			*namelen = MIN(*namelen, sa->sa_len);
1234 			*name = sa;
1235 		}
1236 	}
1237 
1238 	fdrop(fp);
1239 	return (error);
1240 }
1241 
1242 /*
1243  * getpeername_args(int fdes, caddr_t asa, int *alen)
1244  *
1245  * Get name of peer for connected socket.
1246  */
1247 int
1248 sys_getpeername(struct getpeername_args *uap)
1249 {
1250 	struct sockaddr *sa = NULL;
1251 	int error, sa_len;
1252 
1253 	error = copyin(uap->alen, &sa_len, sizeof(sa_len));
1254 	if (error)
1255 		return (error);
1256 
1257 	error = kern_getpeername(uap->fdes, &sa, &sa_len);
1258 
1259 	if (error == 0)
1260 		error = copyout(sa, uap->asa, sa_len);
1261 	if (error == 0)
1262 		error = copyout(&sa_len, uap->alen, sizeof(*uap->alen));
1263 	if (sa)
1264 		FREE(sa, M_SONAME);
1265 	return (error);
1266 }
1267 
1268 int
1269 getsockaddr(struct sockaddr **namp, caddr_t uaddr, size_t len)
1270 {
1271 	struct sockaddr *sa;
1272 	int error;
1273 
1274 	*namp = NULL;
1275 	if (len > SOCK_MAXADDRLEN)
1276 		return ENAMETOOLONG;
1277 	if (len < offsetof(struct sockaddr, sa_data[0]))
1278 		return EDOM;
1279 	MALLOC(sa, struct sockaddr *, len, M_SONAME, M_WAITOK);
1280 	error = copyin(uaddr, sa, len);
1281 	if (error) {
1282 		FREE(sa, M_SONAME);
1283 	} else {
1284 #if BYTE_ORDER != BIG_ENDIAN
1285 		/*
1286 		 * The bind(), connect(), and sendto() syscalls were not
1287 		 * versioned for COMPAT_43.  Thus, this check must stay.
1288 		 */
1289 		if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
1290 			sa->sa_family = sa->sa_len;
1291 #endif
1292 		sa->sa_len = len;
1293 		*namp = sa;
1294 	}
1295 	return error;
1296 }
1297 
1298 /*
1299  * Detach a mapped page and release resources back to the system.
1300  * We must release our wiring and if the object is ripped out
1301  * from under the vm_page we become responsible for freeing the
1302  * page.  These routines must be MPSAFE.
1303  *
1304  * XXX HACK XXX TEMPORARY UNTIL WE IMPLEMENT EXT MBUF REFERENCE COUNTING
1305  *
1306  * XXX vm_page_*() routines are not MPSAFE yet, the MP lock is required.
1307  */
1308 static void
1309 sf_buf_mref(void *arg)
1310 {
1311 	struct sfbuf_mref *sfm = arg;
1312 
1313 	/*
1314 	 * We must already hold a ref so there is no race to 0, just
1315 	 * atomically increment the count.
1316 	 */
1317 	atomic_add_int(&sfm->mref_count, 1);
1318 }
1319 
1320 static void
1321 sf_buf_mfree(void *arg)
1322 {
1323 	struct sfbuf_mref *sfm = arg;
1324 	vm_page_t m;
1325 
1326 	KKASSERT(sfm->mref_count > 0);
1327 	if (sfm->mref_count == 1) {
1328 		/*
1329 		 * We are the only holder so no further locking is required,
1330 		 * the sfbuf can simply be freed.
1331 		 */
1332 		sfm->mref_count = 0;
1333 		goto freeit;
1334 	} else {
1335 		/*
1336 		 * There may be other holders, we must obtain the serializer
1337 		 * to protect against a sf_buf_mfree() race to 0.  An atomic
1338 		 * operation is still required for races against
1339 		 * sf_buf_mref().
1340 		 *
1341 		 * XXX vm_page_*() and SFBUF routines not MPSAFE yet.
1342 		 */
1343 		lwkt_serialize_enter(&sfm->serializer);
1344 		atomic_subtract_int(&sfm->mref_count, 1);
1345 		if (sfm->mref_count == 0) {
1346 			lwkt_serialize_exit(&sfm->serializer);
1347 freeit:
1348 			get_mplock();
1349 			crit_enter();
1350 			m = sf_buf_page(sfm->sf);
1351 			sf_buf_free(sfm->sf);
1352 			vm_page_unwire(m, 0);
1353 			if (m->wire_count == 0 && m->object == NULL)
1354 				vm_page_try_to_free(m);
1355 			crit_exit();
1356 			rel_mplock();
1357 			kfree(sfm, M_SENDFILE);
1358 		} else {
1359 			lwkt_serialize_exit(&sfm->serializer);
1360 		}
1361 	}
1362 }
1363 
1364 /*
1365  * sendfile(2).
1366  * int sendfile(int fd, int s, off_t offset, size_t nbytes,
1367  *	 struct sf_hdtr *hdtr, off_t *sbytes, int flags)
1368  *
1369  * Send a file specified by 'fd' and starting at 'offset' to a socket
1370  * specified by 's'. Send only 'nbytes' of the file or until EOF if
1371  * nbytes == 0. Optionally add a header and/or trailer to the socket
1372  * output. If specified, write the total number of bytes sent into *sbytes.
1373  *
1374  * In FreeBSD kern/uipc_syscalls.c,v 1.103, a bug was fixed that caused
1375  * the headers to count against the remaining bytes to be sent from
1376  * the file descriptor.  We may wish to implement a compatibility syscall
1377  * in the future.
1378  */
1379 int
1380 sys_sendfile(struct sendfile_args *uap)
1381 {
1382 	struct thread *td = curthread;
1383 	struct proc *p = td->td_proc;
1384 	struct file *fp;
1385 	struct vnode *vp = NULL;
1386 	struct sf_hdtr hdtr;
1387 	struct iovec aiov[UIO_SMALLIOV], *iov = NULL;
1388 	struct uio auio;
1389 	struct mbuf *mheader = NULL;
1390 	off_t hdtr_size = 0, sbytes;
1391 	int error, hbytes = 0, tbytes;
1392 
1393 	KKASSERT(p);
1394 
1395 	/*
1396 	 * Do argument checking. Must be a regular file in, stream
1397 	 * type and connected socket out, positive offset.
1398 	 */
1399 	fp = holdfp(p->p_fd, uap->fd, FREAD);
1400 	if (fp == NULL) {
1401 		return (EBADF);
1402 	}
1403 	if (fp->f_type != DTYPE_VNODE) {
1404 		fdrop(fp);
1405 		return (EINVAL);
1406 	}
1407 	vp = (struct vnode *)fp->f_data;
1408 	vref(vp);
1409 	fdrop(fp);
1410 
1411 	/*
1412 	 * If specified, get the pointer to the sf_hdtr struct for
1413 	 * any headers/trailers.
1414 	 */
1415 	if (uap->hdtr) {
1416 		error = copyin(uap->hdtr, &hdtr, sizeof(hdtr));
1417 		if (error)
1418 			goto done;
1419 		/*
1420 		 * Send any headers.
1421 		 */
1422 		if (hdtr.headers) {
1423 			error = iovec_copyin(hdtr.headers, &iov, aiov,
1424 					     hdtr.hdr_cnt, &hbytes);
1425 			if (error)
1426 				goto done;
1427 			auio.uio_iov = iov;
1428 			auio.uio_iovcnt = hdtr.hdr_cnt;
1429 			auio.uio_offset = 0;
1430 			auio.uio_segflg = UIO_USERSPACE;
1431 			auio.uio_rw = UIO_WRITE;
1432 			auio.uio_td = td;
1433 			auio.uio_resid = hbytes;
1434 
1435 			mheader = m_uiomove(&auio);
1436 
1437 			iovec_free(&iov, aiov);
1438 			if (mheader == NULL)
1439 				goto done;
1440 		}
1441 	}
1442 
1443 	error = kern_sendfile(vp, uap->s, uap->offset, uap->nbytes, mheader,
1444 	    &sbytes, uap->flags);
1445 	if (error)
1446 		goto done;
1447 
1448 	/*
1449 	 * Send trailers. Wimp out and use writev(2).
1450 	 */
1451 	if (uap->hdtr != NULL && hdtr.trailers != NULL) {
1452 		error = iovec_copyin(hdtr.trailers, &iov, aiov,
1453 				     hdtr.trl_cnt, &auio.uio_resid);
1454 		if (error)
1455 			goto done;
1456 		auio.uio_iov = iov;
1457 		auio.uio_iovcnt = hdtr.trl_cnt;
1458 		auio.uio_offset = 0;
1459 		auio.uio_segflg = UIO_USERSPACE;
1460 		auio.uio_rw = UIO_WRITE;
1461 		auio.uio_td = td;
1462 
1463 		error = kern_sendmsg(uap->s, NULL, &auio, NULL, 0, &tbytes);
1464 
1465 		iovec_free(&iov, aiov);
1466 		if (error)
1467 			goto done;
1468 		hdtr_size += tbytes;	/* trailer bytes successfully sent */
1469 	}
1470 
1471 done:
1472 	if (uap->sbytes != NULL) {
1473 		sbytes += hdtr_size;
1474 		copyout(&sbytes, uap->sbytes, sizeof(off_t));
1475 	}
1476 	if (vp)
1477 		vrele(vp);
1478 	return (error);
1479 }
1480 
1481 int
1482 kern_sendfile(struct vnode *vp, int sfd, off_t offset, size_t nbytes,
1483     struct mbuf *mheader, off_t *sbytes, int flags)
1484 {
1485 	struct thread *td = curthread;
1486 	struct proc *p = td->td_proc;
1487 	struct vm_object *obj;
1488 	struct socket *so;
1489 	struct file *fp;
1490 	struct mbuf *m;
1491 	struct sf_buf *sf;
1492 	struct sfbuf_mref *sfm;
1493 	struct vm_page *pg;
1494 	off_t off, xfsize;
1495 	off_t hbytes = 0;
1496 	int error = 0;
1497 
1498 	if (vp->v_type != VREG) {
1499 		error = EINVAL;
1500 		goto done0;
1501 	}
1502 	if ((obj = vp->v_object) == NULL) {
1503 		error = EINVAL;
1504 		goto done0;
1505 	}
1506 	error = holdsock(p->p_fd, sfd, &fp);
1507 	if (error)
1508 		goto done0;
1509 	so = (struct socket *)fp->f_data;
1510 	if (so->so_type != SOCK_STREAM) {
1511 		error = EINVAL;
1512 		goto done;
1513 	}
1514 	if ((so->so_state & SS_ISCONNECTED) == 0) {
1515 		error = ENOTCONN;
1516 		goto done;
1517 	}
1518 	if (offset < 0) {
1519 		error = EINVAL;
1520 		goto done;
1521 	}
1522 
1523 	*sbytes = 0;
1524 	/*
1525 	 * Protect against multiple writers to the socket.
1526 	 */
1527 	ssb_lock(&so->so_snd, M_WAITOK);
1528 
1529 	/*
1530 	 * Loop through the pages in the file, starting with the requested
1531 	 * offset. Get a file page (do I/O if necessary), map the file page
1532 	 * into an sf_buf, attach an mbuf header to the sf_buf, and queue
1533 	 * it on the socket.
1534 	 */
1535 	for (off = offset; ; off += xfsize, *sbytes += xfsize + hbytes) {
1536 		vm_pindex_t pindex;
1537 		vm_offset_t pgoff;
1538 
1539 		pindex = OFF_TO_IDX(off);
1540 retry_lookup:
1541 		/*
1542 		 * Calculate the amount to transfer. Not to exceed a page,
1543 		 * the EOF, or the passed in nbytes.
1544 		 */
1545 		xfsize = vp->v_filesize - off;
1546 		if (xfsize > PAGE_SIZE)
1547 			xfsize = PAGE_SIZE;
1548 		pgoff = (vm_offset_t)(off & PAGE_MASK);
1549 		if (PAGE_SIZE - pgoff < xfsize)
1550 			xfsize = PAGE_SIZE - pgoff;
1551 		if (nbytes && xfsize > (nbytes - *sbytes))
1552 			xfsize = nbytes - *sbytes;
1553 		if (xfsize <= 0)
1554 			break;
1555 		/*
1556 		 * Optimize the non-blocking case by looking at the socket space
1557 		 * before going to the extra work of constituting the sf_buf.
1558 		 */
1559 		if ((fp->f_flag & FNONBLOCK) && ssb_space(&so->so_snd) <= 0) {
1560 			if (so->so_state & SS_CANTSENDMORE)
1561 				error = EPIPE;
1562 			else
1563 				error = EAGAIN;
1564 			ssb_unlock(&so->so_snd);
1565 			goto done;
1566 		}
1567 		/*
1568 		 * Attempt to look up the page.
1569 		 *
1570 		 *	Allocate if not found, wait and loop if busy, then
1571 		 *	wire the page.  critical section protection is
1572 		 * 	required to maintain the object association (an
1573 		 *	interrupt can free the page) through to the
1574 		 *	vm_page_wire() call.
1575 		 */
1576 		crit_enter();
1577 		pg = vm_page_lookup(obj, pindex);
1578 		if (pg == NULL) {
1579 			pg = vm_page_alloc(obj, pindex, VM_ALLOC_NORMAL);
1580 			if (pg == NULL) {
1581 				vm_wait();
1582 				crit_exit();
1583 				goto retry_lookup;
1584 			}
1585 			vm_page_wakeup(pg);
1586 		} else if (vm_page_sleep_busy(pg, TRUE, "sfpbsy")) {
1587 			crit_exit();
1588 			goto retry_lookup;
1589 		}
1590 		vm_page_wire(pg);
1591 		crit_exit();
1592 
1593 		/*
1594 		 * If page is not valid for what we need, initiate I/O
1595 		 */
1596 
1597 		if (!pg->valid || !vm_page_is_valid(pg, pgoff, xfsize)) {
1598 			struct uio auio;
1599 			struct iovec aiov;
1600 			int bsize;
1601 
1602 			/*
1603 			 * Ensure that our page is still around when the I/O
1604 			 * completes.
1605 			 */
1606 			vm_page_io_start(pg);
1607 
1608 			/*
1609 			 * Get the page from backing store.
1610 			 */
1611 			bsize = vp->v_mount->mnt_stat.f_iosize;
1612 			auio.uio_iov = &aiov;
1613 			auio.uio_iovcnt = 1;
1614 			aiov.iov_base = 0;
1615 			aiov.iov_len = MAXBSIZE;
1616 			auio.uio_resid = MAXBSIZE;
1617 			auio.uio_offset = trunc_page(off);
1618 			auio.uio_segflg = UIO_NOCOPY;
1619 			auio.uio_rw = UIO_READ;
1620 			auio.uio_td = td;
1621 			vn_lock(vp, LK_SHARED | LK_RETRY);
1622 			error = VOP_READ(vp, &auio,
1623 				    IO_VMIO | ((MAXBSIZE / bsize) << 16),
1624 				    p->p_ucred);
1625 			vn_unlock(vp);
1626 			vm_page_flag_clear(pg, PG_ZERO);
1627 			vm_page_io_finish(pg);
1628 			if (error) {
1629 				crit_enter();
1630 				vm_page_unwire(pg, 0);
1631 				vm_page_try_to_free(pg);
1632 				crit_exit();
1633 				ssb_unlock(&so->so_snd);
1634 				goto done;
1635 			}
1636 		}
1637 
1638 
1639 		/*
1640 		 * Get a sendfile buf. We usually wait as long as necessary,
1641 		 * but this wait can be interrupted.
1642 		 */
1643 		if ((sf = sf_buf_alloc(pg, SFB_CATCH)) == NULL) {
1644 			crit_enter();
1645 			vm_page_unwire(pg, 0);
1646 			vm_page_try_to_free(pg);
1647 			crit_exit();
1648 			ssb_unlock(&so->so_snd);
1649 			error = EINTR;
1650 			goto done;
1651 		}
1652 
1653 		/*
1654 		 * Get an mbuf header and set it up as having external storage.
1655 		 */
1656 		MGETHDR(m, MB_WAIT, MT_DATA);
1657 		if (m == NULL) {
1658 			error = ENOBUFS;
1659 			sf_buf_free(sf);
1660 			ssb_unlock(&so->so_snd);
1661 			goto done;
1662 		}
1663 
1664 		/*
1665 		 * sfm is a temporary hack, use a per-cpu cache for this.
1666 		 */
1667 		sfm = kmalloc(sizeof(struct sfbuf_mref), M_SENDFILE, M_WAITOK);
1668 		sfm->sf = sf;
1669 		sfm->mref_count = 1;
1670 		lwkt_serialize_init(&sfm->serializer);
1671 
1672 		m->m_ext.ext_free = sf_buf_mfree;
1673 		m->m_ext.ext_ref = sf_buf_mref;
1674 		m->m_ext.ext_arg = sfm;
1675 		m->m_ext.ext_buf = (void *)sf->kva;
1676 		m->m_ext.ext_size = PAGE_SIZE;
1677 		m->m_data = (char *) sf->kva + pgoff;
1678 		m->m_flags |= M_EXT;
1679 		m->m_pkthdr.len = m->m_len = xfsize;
1680 		KKASSERT((m->m_flags & (M_EXT_CLUSTER)) == 0);
1681 
1682 		if (mheader != NULL) {
1683 			hbytes = mheader->m_pkthdr.len;
1684 			mheader->m_pkthdr.len += m->m_pkthdr.len;
1685 			m_cat(mheader, m);
1686 			m = mheader;
1687 			mheader = NULL;
1688 		} else
1689 			hbytes = 0;
1690 
1691 		/*
1692 		 * Add the buffer to the socket buffer chain.
1693 		 */
1694 		crit_enter();
1695 retry_space:
1696 		/*
1697 		 * Make sure that the socket is still able to take more data.
1698 		 * CANTSENDMORE being true usually means that the connection
1699 		 * was closed. so_error is true when an error was sensed after
1700 		 * a previous send.
1701 		 * The state is checked after the page mapping and buffer
1702 		 * allocation above since those operations may block and make
1703 		 * any socket checks stale. From this point forward, nothing
1704 		 * blocks before the pru_send (or more accurately, any blocking
1705 		 * results in a loop back to here to re-check).
1706 		 */
1707 		if ((so->so_state & SS_CANTSENDMORE) || so->so_error) {
1708 			if (so->so_state & SS_CANTSENDMORE) {
1709 				error = EPIPE;
1710 			} else {
1711 				error = so->so_error;
1712 				so->so_error = 0;
1713 			}
1714 			m_freem(m);
1715 			ssb_unlock(&so->so_snd);
1716 			crit_exit();
1717 			goto done;
1718 		}
1719 		/*
1720 		 * Wait for socket space to become available. We do this just
1721 		 * after checking the connection state above in order to avoid
1722 		 * a race condition with ssb_wait().
1723 		 */
1724 		if (ssb_space(&so->so_snd) < so->so_snd.ssb_lowat) {
1725 			if (fp->f_flag & FNONBLOCK) {
1726 				m_freem(m);
1727 				ssb_unlock(&so->so_snd);
1728 				crit_exit();
1729 				error = EAGAIN;
1730 				goto done;
1731 			}
1732 			error = ssb_wait(&so->so_snd);
1733 			/*
1734 			 * An error from ssb_wait usually indicates that we've
1735 			 * been interrupted by a signal. If we've sent anything
1736 			 * then return bytes sent, otherwise return the error.
1737 			 */
1738 			if (error) {
1739 				m_freem(m);
1740 				ssb_unlock(&so->so_snd);
1741 				crit_exit();
1742 				goto done;
1743 			}
1744 			goto retry_space;
1745 		}
1746 		error = so_pru_send(so, 0, m, NULL, NULL, td);
1747 		crit_exit();
1748 		if (error) {
1749 			ssb_unlock(&so->so_snd);
1750 			goto done;
1751 		}
1752 	}
1753 	if (mheader != NULL) {
1754 		*sbytes += mheader->m_pkthdr.len;
1755 		error = so_pru_send(so, 0, mheader, NULL, NULL, td);
1756 		mheader = NULL;
1757 	}
1758 	ssb_unlock(&so->so_snd);
1759 
1760 done:
1761 	fdrop(fp);
1762 done0:
1763 	if (mheader != NULL)
1764 		m_freem(mheader);
1765 	return (error);
1766 }
1767 
1768 int
1769 sys_sctp_peeloff(struct sctp_peeloff_args *uap)
1770 {
1771 #ifdef SCTP
1772 	struct thread *td = curthread;
1773 	struct proc *p = td->td_proc;
1774 	struct file *lfp = NULL;
1775 	struct file *nfp = NULL;
1776 	int error;
1777 	struct socket *head, *so;
1778 	caddr_t assoc_id;
1779 	int fd;
1780 	short fflag;		/* type must match fp->f_flag */
1781 
1782 	assoc_id = uap->name;
1783 	error = holdsock(p->p_fd, uap->sd, &lfp);
1784 	if (error) {
1785 		return (error);
1786 	}
1787 	crit_enter();
1788 	head = (struct socket *)lfp->f_data;
1789 	error = sctp_can_peel_off(head, assoc_id);
1790 	if (error) {
1791 		crit_exit();
1792 		goto done;
1793 	}
1794 	/*
1795 	 * At this point we know we do have a assoc to pull
1796 	 * we proceed to get the fd setup. This may block
1797 	 * but that is ok.
1798 	 */
1799 
1800 	fflag = lfp->f_flag;
1801 	error = falloc(p, &nfp, &fd);
1802 	if (error) {
1803 		/*
1804 		 * Probably ran out of file descriptors. Put the
1805 		 * unaccepted connection back onto the queue and
1806 		 * do another wakeup so some other process might
1807 		 * have a chance at it.
1808 		 */
1809 		crit_exit();
1810 		goto done;
1811 	}
1812 	uap->sysmsg_result = fd;
1813 
1814 	so = sctp_get_peeloff(head, assoc_id, &error);
1815 	if (so == NULL) {
1816 		/*
1817 		 * Either someone else peeled it off OR
1818 		 * we can't get a socket.
1819 		 */
1820 		goto noconnection;
1821 	}
1822 	so->so_state &= ~SS_COMP;
1823 	so->so_state &= ~SS_NOFDREF;
1824 	so->so_head = NULL;
1825 	if (head->so_sigio != NULL)
1826 		fsetown(fgetown(head->so_sigio), &so->so_sigio);
1827 
1828 	nfp->f_type = DTYPE_SOCKET;
1829 	nfp->f_flag = fflag;
1830 	nfp->f_ops = &socketops;
1831 	nfp->f_data = so;
1832 
1833 noconnection:
1834 	/*
1835 	 * Assign the file pointer to the reserved descriptor, or clear
1836 	 * the reserved descriptor if an error occured.
1837 	 */
1838 	if (error)
1839 		fsetfd(p, NULL, fd);
1840 	else
1841 		fsetfd(p, nfp, fd);
1842 	crit_exit();
1843 	/*
1844 	 * Release explicitly held references before returning.
1845 	 */
1846 done:
1847 	if (nfp != NULL)
1848 		fdrop(nfp);
1849 	fdrop(lfp);
1850 	return (error);
1851 #else /* SCTP */
1852 	return(EOPNOTSUPP);
1853 #endif /* SCTP */
1854 }
1855