xref: /original-bsd/sys/kern/uipc_socket.c (revision 252ddc1c)
1 /*
2  * Copyright (c) 1982, 1986, 1988, 1990 Regents of the University of California.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms are permitted
6  * provided that the above copyright notice and this paragraph are
7  * duplicated in all such forms and that any documentation,
8  * advertising materials, and other materials related to such
9  * distribution and use acknowledge that the software was developed
10  * by the University of California, Berkeley.  The name of the
11  * University may not be used to endorse or promote products derived
12  * from this software without specific prior written permission.
13  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
14  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
15  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
16  *
17  *	@(#)uipc_socket.c	7.20 (Berkeley) 06/22/90
18  */
19 
20 #include "param.h"
21 #include "user.h"
22 #include "proc.h"
23 #include "file.h"
24 #include "malloc.h"
25 #include "mbuf.h"
26 #include "domain.h"
27 #include "protosw.h"
28 #include "socket.h"
29 #include "socketvar.h"
30 
31 /*
32  * Socket operation routines.
33  * These routines are called by the routines in
34  * sys_socket.c or from a system process, and
35  * implement the semantics of socket operations by
36  * switching out to the protocol specific routines.
37  *
38  * TODO:
39  *	test socketpair
40  *	clean up async
41  *	out-of-band is a kludge
42  */
43 /*ARGSUSED*/
44 socreate(dom, aso, type, proto)
45 	struct socket **aso;
46 	register int type;
47 	int proto;
48 {
49 	register struct protosw *prp;
50 	register struct socket *so;
51 	register int error;
52 
53 	if (proto)
54 		prp = pffindproto(dom, proto, type);
55 	else
56 		prp = pffindtype(dom, type);
57 	if (prp == 0)
58 		return (EPROTONOSUPPORT);
59 	if (prp->pr_type != type)
60 		return (EPROTOTYPE);
61 	MALLOC(so, struct socket *, sizeof(*so), M_SOCKET, M_WAIT);
62 	bzero((caddr_t)so, sizeof(*so));
63 	so->so_type = type;
64 	if (u.u_uid == 0)
65 		so->so_state = SS_PRIV;
66 	so->so_proto = prp;
67 	error =
68 	    (*prp->pr_usrreq)(so, PRU_ATTACH,
69 		(struct mbuf *)0, (struct mbuf *)proto, (struct mbuf *)0);
70 	if (error) {
71 		so->so_state |= SS_NOFDREF;
72 		sofree(so);
73 		return (error);
74 	}
75 	*aso = so;
76 	return (0);
77 }
78 
79 sobind(so, nam)
80 	struct socket *so;
81 	struct mbuf *nam;
82 {
83 	int s = splnet();
84 	int error;
85 
86 	error =
87 	    (*so->so_proto->pr_usrreq)(so, PRU_BIND,
88 		(struct mbuf *)0, nam, (struct mbuf *)0);
89 	splx(s);
90 	return (error);
91 }
92 
93 solisten(so, backlog)
94 	register struct socket *so;
95 	int backlog;
96 {
97 	int s = splnet(), error;
98 
99 	error =
100 	    (*so->so_proto->pr_usrreq)(so, PRU_LISTEN,
101 		(struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0);
102 	if (error) {
103 		splx(s);
104 		return (error);
105 	}
106 	if (so->so_q == 0)
107 		so->so_options |= SO_ACCEPTCONN;
108 	if (backlog < 0)
109 		backlog = 0;
110 	so->so_qlimit = min(backlog, SOMAXCONN);
111 	splx(s);
112 	return (0);
113 }
114 
115 sofree(so)
116 	register struct socket *so;
117 {
118 
119 	if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0)
120 		return;
121 	if (so->so_head) {
122 		if (!soqremque(so, 0) && !soqremque(so, 1))
123 			panic("sofree dq");
124 		so->so_head = 0;
125 	}
126 	sbrelease(&so->so_snd);
127 	sorflush(so);
128 	FREE(so, M_SOCKET);
129 }
130 
131 /*
132  * Close a socket on last file table reference removal.
133  * Initiate disconnect if connected.
134  * Free socket when disconnect complete.
135  */
136 soclose(so)
137 	register struct socket *so;
138 {
139 	int s = splnet();		/* conservative */
140 	int error = 0;
141 
142 	if (so->so_options & SO_ACCEPTCONN) {
143 		while (so->so_q0)
144 			(void) soabort(so->so_q0);
145 		while (so->so_q)
146 			(void) soabort(so->so_q);
147 	}
148 	if (so->so_pcb == 0)
149 		goto discard;
150 	if (so->so_state & SS_ISCONNECTED) {
151 		if ((so->so_state & SS_ISDISCONNECTING) == 0) {
152 			error = sodisconnect(so);
153 			if (error)
154 				goto drop;
155 		}
156 		if (so->so_options & SO_LINGER) {
157 			if ((so->so_state & SS_ISDISCONNECTING) &&
158 			    (so->so_state & SS_NBIO))
159 				goto drop;
160 			while (so->so_state & SS_ISCONNECTED)
161 				if (error = tsleep((caddr_t)&so->so_timeo,
162 				    PSOCK | PCATCH, netcls, so->so_linger))
163 					break;
164 		}
165 	}
166 drop:
167 	if (so->so_pcb) {
168 		int error2 =
169 		    (*so->so_proto->pr_usrreq)(so, PRU_DETACH,
170 			(struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0);
171 		if (error == 0)
172 			error = error2;
173 	}
174 discard:
175 	if (so->so_state & SS_NOFDREF)
176 		panic("soclose: NOFDREF");
177 	so->so_state |= SS_NOFDREF;
178 	sofree(so);
179 	splx(s);
180 	return (error);
181 }
182 
183 /*
184  * Must be called at splnet...
185  */
186 soabort(so)
187 	struct socket *so;
188 {
189 
190 	return (
191 	    (*so->so_proto->pr_usrreq)(so, PRU_ABORT,
192 		(struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0));
193 }
194 
195 soaccept(so, nam)
196 	register struct socket *so;
197 	struct mbuf *nam;
198 {
199 	int s = splnet();
200 	int error;
201 
202 	if ((so->so_state & SS_NOFDREF) == 0)
203 		panic("soaccept: !NOFDREF");
204 	so->so_state &= ~SS_NOFDREF;
205 	error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT,
206 	    (struct mbuf *)0, nam, (struct mbuf *)0);
207 	splx(s);
208 	return (error);
209 }
210 
211 soconnect(so, nam)
212 	register struct socket *so;
213 	struct mbuf *nam;
214 {
215 	int s;
216 	int error;
217 
218 	if (so->so_options & SO_ACCEPTCONN)
219 		return (EOPNOTSUPP);
220 	s = splnet();
221 	/*
222 	 * If protocol is connection-based, can only connect once.
223 	 * Otherwise, if connected, try to disconnect first.
224 	 * This allows user to disconnect by connecting to, e.g.,
225 	 * a null address.
226 	 */
227 	if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
228 	    ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
229 	    (error = sodisconnect(so))))
230 		error = EISCONN;
231 	else
232 		error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT,
233 		    (struct mbuf *)0, nam, (struct mbuf *)0);
234 	splx(s);
235 	return (error);
236 }
237 
238 soconnect2(so1, so2)
239 	register struct socket *so1;
240 	struct socket *so2;
241 {
242 	int s = splnet();
243 	int error;
244 
245 	error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2,
246 	    (struct mbuf *)0, (struct mbuf *)so2, (struct mbuf *)0);
247 	splx(s);
248 	return (error);
249 }
250 
251 sodisconnect(so)
252 	register struct socket *so;
253 {
254 	int s = splnet();
255 	int error;
256 
257 	if ((so->so_state & SS_ISCONNECTED) == 0) {
258 		error = ENOTCONN;
259 		goto bad;
260 	}
261 	if (so->so_state & SS_ISDISCONNECTING) {
262 		error = EALREADY;
263 		goto bad;
264 	}
265 	error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT,
266 	    (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0);
267 bad:
268 	splx(s);
269 	return (error);
270 }
271 
272 /*
273  * Send on a socket.
274  * If send must go all at once and message is larger than
275  * send buffering, then hard error.
276  * Lock against other senders.
277  * If must go all at once and not enough room now, then
278  * inform user that this would block and do nothing.
279  * Otherwise, if nonblocking, send as much as possible.
280  * The data to be sent is described by "uio" if nonzero,
281  * otherwise by the mbuf chain "top" (which must be null
282  * if uio is not).  Data provided in mbuf chain must be small
283  * enough to send all at once.
284  *
285  * Returns nonzero on error, timeout or signal; callers
286  * must check for short counts if EINTR/ERESTART are returned.
287  * Data and control buffers are freed on return.
288  */
289 sosend(so, addr, uio, top, control, flags)
290 	register struct socket *so;
291 	struct mbuf *addr;
292 	struct uio *uio;
293 	struct mbuf *top;
294 	struct mbuf *control;
295 	int flags;
296 {
297 	struct mbuf **mp;
298 	register struct mbuf *m;
299 	register long space, len, resid;
300 	int clen = 0, error, s, dontroute, mlen;
301 	int atomic = sosendallatonce(so) || top;
302 
303 	if (uio)
304 		resid = uio->uio_resid;
305 	else
306 		resid = top->m_pkthdr.len;
307 	dontroute =
308 	    (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
309 	    (so->so_proto->pr_flags & PR_ATOMIC);
310 	u.u_ru.ru_msgsnd++;
311 	if (control)
312 		clen = control->m_len;
313 #define	snderr(errno)	{ error = errno; splx(s); goto release; }
314 
315 restart:
316 	if (error = sblock(&so->so_snd))
317 		goto out;
318 	do {
319 		s = splnet();
320 		if (so->so_state & SS_CANTSENDMORE)
321 			snderr(EPIPE);
322 		if (so->so_error)
323 			snderr(so->so_error);
324 		if ((so->so_state & SS_ISCONNECTED) == 0) {
325 			if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
326 				if ((so->so_state & SS_ISCONFIRMING) == 0)
327 					snderr(ENOTCONN);
328 			} else if (addr == 0)
329 				snderr(EDESTADDRREQ);
330 		}
331 		space = sbspace(&so->so_snd);
332 		if (flags & MSG_OOB)
333 			space += 1024;
334 		if (space < resid + clen &&
335 		    (atomic || space < so->so_snd.sb_lowat || space < clen)) {
336 			if (atomic && resid > so->so_snd.sb_hiwat ||
337 			    clen > so->so_snd.sb_hiwat)
338 				snderr(EMSGSIZE);
339 			if (so->so_state & SS_NBIO)
340 				snderr(EWOULDBLOCK);
341 			sbunlock(&so->so_snd);
342 			error = sbwait(&so->so_snd);
343 			splx(s);
344 			if (error)
345 				goto out;
346 			goto restart;
347 		}
348 		splx(s);
349 		mp = &top;
350 		space -= clen;
351 		do {
352 		    if (uio == NULL) {
353 			/*
354 			 * Data is prepackaged in "top".
355 			 */
356 			resid = 0;
357 			if (flags & MSG_EOR)
358 				top->m_flags |= M_EOR;
359 		    } else do {
360 			if (top == 0) {
361 				MGETHDR(m, M_WAIT, MT_DATA);
362 				mlen = MHLEN;
363 				m->m_pkthdr.len = 0;
364 				m->m_pkthdr.rcvif = (struct ifnet *)0;
365 			} else {
366 				MGET(m, M_WAIT, MT_DATA);
367 				mlen = MLEN;
368 			}
369 			if (resid >= MINCLSIZE && space >= MCLBYTES) {
370 				MCLGET(m, M_WAIT);
371 				if ((m->m_flags & M_EXT) == 0)
372 					goto nopages;
373 				mlen = MCLBYTES;
374 #ifdef	MAPPED_MBUFS
375 				len = min(MCLBYTES, resid);
376 #else
377 				if (top == 0) {
378 					len = min(MCLBYTES - max_hdr, resid);
379 					m->m_data += max_hdr;
380 				}
381 #endif
382 				space -= MCLBYTES;
383 			} else {
384 nopages:
385 				len = min(min(mlen, resid), space);
386 				space -= len;
387 				/*
388 				 * For datagram protocols, leave room
389 				 * for protocol headers in first mbuf.
390 				 */
391 				if (atomic && top == 0 && len < mlen)
392 					MH_ALIGN(m, len);
393 			}
394 			error = uiomove(mtod(m, caddr_t), len, uio);
395 			resid = uio->uio_resid;
396 			m->m_len = len;
397 			*mp = m;
398 			top->m_pkthdr.len += len;
399 			if (error)
400 				goto release;
401 			mp = &m->m_next;
402 			if (resid <= 0) {
403 				if (flags & MSG_EOR)
404 					top->m_flags |= M_EOR;
405 				break;
406 			}
407 		    } while (space > 0 && atomic);
408 		    if (dontroute)
409 			    so->so_options |= SO_DONTROUTE;
410 		    s = splnet();				/* XXX */
411 		    error = (*so->so_proto->pr_usrreq)(so,
412 			(flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND,
413 			top, addr, control);
414 		    splx(s);
415 		    if (dontroute)
416 			    so->so_options &= ~SO_DONTROUTE;
417 		    clen = 0;
418 		    control = 0;
419 		    top = 0;
420 		    mp = &top;
421 		    if (error)
422 			goto release;
423 		} while (resid && space > 0);
424 	} while (resid);
425 
426 release:
427 	sbunlock(&so->so_snd);
428 out:
429 	if (top)
430 		m_freem(top);
431 	if (control)
432 		m_freem(control);
433 	return (error);
434 }
435 
436 /*
437  * Implement receive operations on a socket.
438  * We depend on the way that records are added to the sockbuf
439  * by sbappend*.  In particular, each record (mbufs linked through m_next)
440  * must begin with an address if the protocol so specifies,
441  * followed by an optional mbuf or mbufs containing ancillary data,
442  * and then zero or more mbufs of data.
443  * In order to avoid blocking network interrupts for the entire time here,
444  * we splx() while doing the actual copy to user space.
445  * Although the sockbuf is locked, new data may still be appended,
446  * and thus we must maintain consistency of the sockbuf during that time.
447  *
448  * The caller may receive the data as a single mbuf chain by supplying
449  * an mbuf **mp0 for use in returning the chain.  The uio is then used
450  * only for the count in uio_resid.
451  */
452 soreceive(so, paddr, uio, mp0, controlp, flagsp)
453 	register struct socket *so;
454 	struct mbuf **paddr;
455 	struct uio *uio;
456 	struct mbuf **mp0;
457 	struct mbuf **controlp;
458 	int *flagsp;
459 {
460 	register struct mbuf *m, **mp;
461 	register int flags, len, error, s, offset;
462 	struct protosw *pr = so->so_proto;
463 	struct mbuf *nextrecord;
464 	int moff, type;
465 
466 	mp = mp0;
467 	if (paddr)
468 		*paddr = 0;
469 	if (controlp)
470 		*controlp = 0;
471 	if (flagsp)
472 		flags = *flagsp &~ MSG_EOR;
473 	else
474 		flags = 0;
475 	if (flags & MSG_OOB) {
476 		m = m_get(M_WAIT, MT_DATA);
477 		error = (*pr->pr_usrreq)(so, PRU_RCVOOB,
478 		    m, (struct mbuf *)(flags & MSG_PEEK), (struct mbuf *)0);
479 		if (error)
480 			goto bad;
481 		do {
482 			error = uiomove(mtod(m, caddr_t),
483 			    (int) min(uio->uio_resid, m->m_len), uio);
484 			m = m_free(m);
485 		} while (uio->uio_resid && error == 0 && m);
486 bad:
487 		if (m)
488 			m_freem(m);
489 		return (error);
490 	}
491 	if (mp)
492 		*mp = (struct mbuf *)0;
493 	if (so->so_state & SS_ISCONFIRMING && uio->uio_resid)
494 		(*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0,
495 		    (struct mbuf *)0, (struct mbuf *)0);
496 
497 restart:
498 	if (error = sblock(&so->so_rcv))
499 		return (error);
500 	s = splnet();
501 
502 	m = so->so_rcv.sb_mb;
503 	if (m == 0 || (so->so_rcv.sb_cc < uio->uio_resid &&
504 	    so->so_rcv.sb_cc < so->so_rcv.sb_lowat) ||
505 	    ((flags & MSG_WAITALL) && so->so_rcv.sb_cc < uio->uio_resid &&
506 	    so->so_rcv.sb_hiwat >= uio->uio_resid && !sosendallatonce(so))) {
507 #ifdef DIAGNOSTIC
508 		if (m == 0 && so->so_rcv.sb_cc)
509 			panic("receive 1");
510 #endif
511 		if (so->so_error) {
512 			error = so->so_error;
513 			so->so_error = 0;
514 			goto release;
515 		}
516 		if (so->so_state & SS_CANTRCVMORE)
517 			goto release;
518 		if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
519 		    (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
520 			error = ENOTCONN;
521 			goto release;
522 		}
523 		if (uio->uio_resid == 0)
524 			goto release;
525 		if (so->so_state & SS_NBIO) {
526 			error = EWOULDBLOCK;
527 			goto release;
528 		}
529 		sbunlock(&so->so_rcv);
530 		error = sbwait(&so->so_rcv);
531 		splx(s);
532 		if (error)
533 			return (error);
534 		goto restart;
535 	}
536 	u.u_ru.ru_msgrcv++;
537 #ifdef DIAGNOSTIC
538 if (m->m_type == 0)
539 panic("receive 3a");
540 #endif
541 	nextrecord = m->m_nextpkt;
542 	if (pr->pr_flags & PR_ADDR) {
543 #ifdef DIAGNOSTIC
544 		if (m->m_type != MT_SONAME)
545 			panic("receive 1a");
546 #endif
547 		if (flags & MSG_PEEK) {
548 			if (paddr)
549 				*paddr = m_copy(m, 0, m->m_len);
550 			m = m->m_next;
551 		} else {
552 			sbfree(&so->so_rcv, m);
553 			if (paddr) {
554 				*paddr = m;
555 				so->so_rcv.sb_mb = m->m_next;
556 				m->m_next = 0;
557 				m = so->so_rcv.sb_mb;
558 			} else {
559 				MFREE(m, so->so_rcv.sb_mb);
560 				m = so->so_rcv.sb_mb;
561 			}
562 		}
563 	}
564 	while (m && m->m_type == MT_CONTROL && error == 0) {
565 		if (flags & MSG_PEEK) {
566 			if (controlp)
567 				*controlp = m_copy(m, 0, m->m_len);
568 			m = m->m_next;
569 		} else {
570 			sbfree(&so->so_rcv, m);
571 			if (controlp) {
572 				if (pr->pr_domain->dom_externalize &&
573 				    mtod(m, struct cmsghdr *)->cmsg_type ==
574 				    SCM_RIGHTS)
575 				   error = (*pr->pr_domain->dom_externalize)(m);
576 				*controlp = m;
577 				so->so_rcv.sb_mb = m->m_next;
578 				m->m_next = 0;
579 				m = so->so_rcv.sb_mb;
580 			} else {
581 				MFREE(m, so->so_rcv.sb_mb);
582 				m = so->so_rcv.sb_mb;
583 			}
584 		}
585 		if (controlp)
586 			controlp = &(*controlp)->m_next;
587 	}
588 	if (m) {
589 		m->m_nextpkt = nextrecord;
590 		type = m->m_type;
591 	}
592 	moff = 0;
593 	offset = 0;
594 	while (m && m->m_type == type && uio->uio_resid > 0 && error == 0) {
595 		if (m->m_type == MT_OOBDATA)
596 			flags |= MSG_OOB;
597 #ifdef DIAGNOSTIC
598 		else if (m->m_type != MT_DATA && m->m_type != MT_HEADER)
599 			panic("receive 3");
600 #endif
601 		type = m->m_type;
602 		so->so_state &= ~SS_RCVATMARK;
603 		len = uio->uio_resid;
604 		if (so->so_oobmark && len > so->so_oobmark - offset)
605 			len = so->so_oobmark - offset;
606 		if (len > m->m_len - moff)
607 			len = m->m_len - moff;
608 		/*
609 		 * If mp is set, just pass back the mbufs.
610 		 * Otherwise copy them out via the uio, then free.
611 		 * Sockbuf must be consistent here (points to current mbuf,
612 		 * it points to next record) when we drop priority;
613 		 * we must note any additions to the sockbuf when we
614 		 * block interrupts again.
615 		 */
616 		if (mp == 0) {
617 			splx(s);
618 			error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio);
619 			s = splnet();
620 		} else
621 			uio->uio_resid -= len;
622 		if (len == m->m_len - moff) {
623 			if (m->m_flags & M_EOR)
624 				flags |= MSG_EOR;
625 			if (flags & MSG_PEEK) {
626 				m = m->m_next;
627 				moff = 0;
628 			} else {
629 				nextrecord = m->m_nextpkt;
630 				sbfree(&so->so_rcv, m);
631 				if (mp) {
632 					*mp = m;
633 					mp = &m->m_next;
634 					so->so_rcv.sb_mb = m = m->m_next;
635 					*mp = (struct mbuf *)0;
636 				} else {
637 					MFREE(m, so->so_rcv.sb_mb);
638 					m = so->so_rcv.sb_mb;
639 				}
640 				if (m)
641 					m->m_nextpkt = nextrecord;
642 			}
643 		} else {
644 			if (flags & MSG_PEEK)
645 				moff += len;
646 			else {
647 				if (mp)
648 					*mp = m_copym(m, 0, len, M_WAIT);
649 				m->m_data += len;
650 				m->m_len -= len;
651 				so->so_rcv.sb_cc -= len;
652 			}
653 		}
654 		if (so->so_oobmark) {
655 			if ((flags & MSG_PEEK) == 0) {
656 				so->so_oobmark -= len;
657 				if (so->so_oobmark == 0) {
658 					so->so_state |= SS_RCVATMARK;
659 					break;
660 				}
661 			} else
662 				offset += len;
663 		}
664 		if (flags & MSG_EOR)
665 			break;
666 		/*
667 		 * If the MSG_WAITALL flag is set (for non-atomic socket),
668 		 * we must not quit until "uio->uio_resid == 0" or an error
669 		 * termination.  If a signal/timeout occurs, return
670 		 * with a short count but without error.
671 		 * Keep sockbuf locked against other readers.
672 		 */
673 		while (flags & MSG_WAITALL && m == 0 && uio->uio_resid > 0 &&
674 		    !sosendallatonce(so)) {
675 			error = sbwait(&so->so_rcv);
676 			if (error) {
677 				sbunlock(&so->so_rcv);
678 				splx(s);
679 				return (0);
680 			}
681 			if (m = so->so_rcv.sb_mb)
682 				nextrecord = m->m_nextpkt;
683 			if (so->so_error || so->so_state & SS_CANTRCVMORE)
684 				break;
685 			continue;
686 		}
687 	}
688 	if ((flags & MSG_PEEK) == 0) {
689 		if (m == 0)
690 			so->so_rcv.sb_mb = nextrecord;
691 		else if (pr->pr_flags & PR_ATOMIC) {
692 			flags |= MSG_TRUNC;
693 			(void) sbdroprecord(&so->so_rcv);
694 		}
695 		if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
696 			(*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0,
697 			    (struct mbuf *)flags, (struct mbuf *)0,
698 			    (struct mbuf *)0);
699 	}
700 	if (flagsp)
701 		*flagsp |= flags;
702 release:
703 	sbunlock(&so->so_rcv);
704 	splx(s);
705 	return (error);
706 }
707 
708 soshutdown(so, how)
709 	register struct socket *so;
710 	register int how;
711 {
712 	register struct protosw *pr = so->so_proto;
713 
714 	how++;
715 	if (how & FREAD)
716 		sorflush(so);
717 	if (how & FWRITE)
718 		return ((*pr->pr_usrreq)(so, PRU_SHUTDOWN,
719 		    (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0));
720 	return (0);
721 }
722 
723 sorflush(so)
724 	register struct socket *so;
725 {
726 	register struct sockbuf *sb = &so->so_rcv;
727 	register struct protosw *pr = so->so_proto;
728 	register int s;
729 	struct sockbuf asb;
730 
731 	sb->sb_flags |= SB_NOINTR;
732 	(void) sblock(sb);
733 	s = splimp();
734 	socantrcvmore(so);
735 	sbunlock(sb);
736 	asb = *sb;
737 	bzero((caddr_t)sb, sizeof (*sb));
738 	splx(s);
739 	if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose)
740 		(*pr->pr_domain->dom_dispose)(asb.sb_mb);
741 	sbrelease(&asb);
742 }
743 
744 sosetopt(so, level, optname, m0)
745 	register struct socket *so;
746 	int level, optname;
747 	struct mbuf *m0;
748 {
749 	int error = 0;
750 	register struct mbuf *m = m0;
751 
752 	if (level != SOL_SOCKET) {
753 		if (so->so_proto && so->so_proto->pr_ctloutput)
754 			return ((*so->so_proto->pr_ctloutput)
755 				  (PRCO_SETOPT, so, level, optname, &m0));
756 		error = ENOPROTOOPT;
757 	} else {
758 		switch (optname) {
759 
760 		case SO_LINGER:
761 			if (m == NULL || m->m_len != sizeof (struct linger)) {
762 				error = EINVAL;
763 				goto bad;
764 			}
765 			so->so_linger = mtod(m, struct linger *)->l_linger;
766 			/* fall thru... */
767 
768 		case SO_DEBUG:
769 		case SO_KEEPALIVE:
770 		case SO_DONTROUTE:
771 		case SO_USELOOPBACK:
772 		case SO_BROADCAST:
773 		case SO_REUSEADDR:
774 		case SO_OOBINLINE:
775 			if (m == NULL || m->m_len < sizeof (int)) {
776 				error = EINVAL;
777 				goto bad;
778 			}
779 			if (*mtod(m, int *))
780 				so->so_options |= optname;
781 			else
782 				so->so_options &= ~optname;
783 			break;
784 
785 		case SO_SNDBUF:
786 		case SO_RCVBUF:
787 		case SO_SNDLOWAT:
788 		case SO_RCVLOWAT:
789 		case SO_SNDTIMEO:
790 		case SO_RCVTIMEO:
791 			if (m == NULL || m->m_len < sizeof (int)) {
792 				error = EINVAL;
793 				goto bad;
794 			}
795 			switch (optname) {
796 
797 			case SO_SNDBUF:
798 			case SO_RCVBUF:
799 				if (sbreserve(optname == SO_SNDBUF ?
800 				    &so->so_snd : &so->so_rcv,
801 				    (u_long) *mtod(m, int *)) == 0) {
802 					error = ENOBUFS;
803 					goto bad;
804 				}
805 				break;
806 
807 			case SO_SNDLOWAT:
808 				so->so_snd.sb_lowat = *mtod(m, int *);
809 				break;
810 			case SO_RCVLOWAT:
811 				so->so_rcv.sb_lowat = *mtod(m, int *);
812 				break;
813 			case SO_SNDTIMEO:
814 				so->so_snd.sb_timeo = *mtod(m, int *);
815 				break;
816 			case SO_RCVTIMEO:
817 				so->so_rcv.sb_timeo = *mtod(m, int *);
818 				break;
819 			}
820 			break;
821 
822 		default:
823 			error = ENOPROTOOPT;
824 			break;
825 		}
826 	}
827 bad:
828 	if (m)
829 		(void) m_free(m);
830 	return (error);
831 }
832 
833 sogetopt(so, level, optname, mp)
834 	register struct socket *so;
835 	int level, optname;
836 	struct mbuf **mp;
837 {
838 	register struct mbuf *m;
839 
840 	if (level != SOL_SOCKET) {
841 		if (so->so_proto && so->so_proto->pr_ctloutput) {
842 			return ((*so->so_proto->pr_ctloutput)
843 				  (PRCO_GETOPT, so, level, optname, mp));
844 		} else
845 			return (ENOPROTOOPT);
846 	} else {
847 		m = m_get(M_WAIT, MT_SOOPTS);
848 		m->m_len = sizeof (int);
849 
850 		switch (optname) {
851 
852 		case SO_LINGER:
853 			m->m_len = sizeof (struct linger);
854 			mtod(m, struct linger *)->l_onoff =
855 				so->so_options & SO_LINGER;
856 			mtod(m, struct linger *)->l_linger = so->so_linger;
857 			break;
858 
859 		case SO_USELOOPBACK:
860 		case SO_DONTROUTE:
861 		case SO_DEBUG:
862 		case SO_KEEPALIVE:
863 		case SO_REUSEADDR:
864 		case SO_BROADCAST:
865 		case SO_OOBINLINE:
866 			*mtod(m, int *) = so->so_options & optname;
867 			break;
868 
869 		case SO_TYPE:
870 			*mtod(m, int *) = so->so_type;
871 			break;
872 
873 		case SO_ERROR:
874 			*mtod(m, int *) = so->so_error;
875 			so->so_error = 0;
876 			break;
877 
878 		case SO_SNDBUF:
879 			*mtod(m, int *) = so->so_snd.sb_hiwat;
880 			break;
881 
882 		case SO_RCVBUF:
883 			*mtod(m, int *) = so->so_rcv.sb_hiwat;
884 			break;
885 
886 		case SO_SNDLOWAT:
887 			*mtod(m, int *) = so->so_snd.sb_lowat;
888 			break;
889 
890 		case SO_RCVLOWAT:
891 			*mtod(m, int *) = so->so_rcv.sb_lowat;
892 			break;
893 
894 		case SO_SNDTIMEO:
895 			*mtod(m, int *) = so->so_snd.sb_timeo;
896 			break;
897 
898 		case SO_RCVTIMEO:
899 			*mtod(m, int *) = so->so_rcv.sb_timeo;
900 			break;
901 
902 		default:
903 			(void)m_free(m);
904 			return (ENOPROTOOPT);
905 		}
906 		*mp = m;
907 		return (0);
908 	}
909 }
910 
911 sohasoutofband(so)
912 	register struct socket *so;
913 {
914 	struct proc *p;
915 
916 	if (so->so_pgid < 0)
917 		gsignal(-so->so_pgid, SIGURG);
918 	else if (so->so_pgid > 0 && (p = pfind(so->so_pgid)) != 0)
919 		psignal(p, SIGURG);
920 	if (so->so_rcv.sb_sel) {
921 		selwakeup(so->so_rcv.sb_sel, so->so_rcv.sb_flags & SB_COLL);
922 		so->so_rcv.sb_sel = 0;
923 		so->so_rcv.sb_flags &= ~SB_COLL;
924 	}
925 }
926