xref: /original-bsd/sys/kern/uipc_socket.c (revision f72a343a)
1 /*
2  * Copyright (c) 1982 Regents of the University of California.
3  * All rights reserved.  The Berkeley software License Agreement
4  * specifies the terms and conditions for redistribution.
5  *
6  *	@(#)uipc_socket.c	6.26 (Berkeley) 04/19/86
7  */
8 
9 #include "param.h"
10 #include "systm.h"
11 #include "dir.h"
12 #include "user.h"
13 #include "proc.h"
14 #include "file.h"
15 #include "inode.h"
16 #include "buf.h"
17 #include "mbuf.h"
18 #include "un.h"
19 #include "domain.h"
20 #include "protosw.h"
21 #include "socket.h"
22 #include "socketvar.h"
23 #include "stat.h"
24 #include "ioctl.h"
25 #include "uio.h"
26 #include "../net/route.h"
27 #include "../netinet/in.h"
28 #include "../net/if.h"
29 
30 /*
31  * Socket operation routines.
32  * These routines are called by the routines in
33  * sys_socket.c or from a system process, and
34  * implement the semantics of socket operations by
35  * switching out to the protocol specific routines.
36  *
37  * TODO:
38  *	test socketpair
39  *	clean up async
40  *	out-of-band is a kludge
41  */
42 /*ARGSUSED*/
43 socreate(dom, aso, type, proto)
44 	struct socket **aso;
45 	register int type;
46 	int proto;
47 {
48 	register struct protosw *prp;
49 	register struct socket *so;
50 	register struct mbuf *m;
51 	register int error;
52 
53 	if (proto)
54 		prp = pffindproto(dom, proto, type);
55 	else
56 		prp = pffindtype(dom, type);
57 	if (prp == 0)
58 		return (EPROTONOSUPPORT);
59 	if (prp->pr_type != type)
60 		return (EPROTOTYPE);
61 	m = m_getclr(M_WAIT, MT_SOCKET);
62 	so = mtod(m, struct socket *);
63 	so->so_options = 0;
64 	so->so_state = 0;
65 	so->so_type = type;
66 	if (u.u_uid == 0)
67 		so->so_state = SS_PRIV;
68 	so->so_proto = prp;
69 	error =
70 	    (*prp->pr_usrreq)(so, PRU_ATTACH,
71 		(struct mbuf *)0, (struct mbuf *)proto, (struct mbuf *)0);
72 	if (error) {
73 		so->so_state |= SS_NOFDREF;
74 		sofree(so);
75 		return (error);
76 	}
77 	*aso = so;
78 	return (0);
79 }
80 
81 sobind(so, nam)
82 	struct socket *so;
83 	struct mbuf *nam;
84 {
85 	int s = splnet();
86 	int error;
87 
88 	error =
89 	    (*so->so_proto->pr_usrreq)(so, PRU_BIND,
90 		(struct mbuf *)0, nam, (struct mbuf *)0);
91 	splx(s);
92 	return (error);
93 }
94 
95 solisten(so, backlog)
96 	register struct socket *so;
97 	int backlog;
98 {
99 	int s = splnet(), error;
100 
101 	error =
102 	    (*so->so_proto->pr_usrreq)(so, PRU_LISTEN,
103 		(struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0);
104 	if (error) {
105 		splx(s);
106 		return (error);
107 	}
108 	if (so->so_q == 0) {
109 		so->so_q = so;
110 		so->so_q0 = so;
111 		so->so_options |= SO_ACCEPTCONN;
112 	}
113 	if (backlog < 0)
114 		backlog = 0;
115 	so->so_qlimit = MIN(backlog, SOMAXCONN);
116 	splx(s);
117 	return (0);
118 }
119 
120 sofree(so)
121 	register struct socket *so;
122 {
123 
124 	if (so->so_head) {
125 		if (!soqremque(so, 0) && !soqremque(so, 1))
126 			panic("sofree dq");
127 		so->so_head = 0;
128 	}
129 	if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0)
130 		return;
131 	sbrelease(&so->so_snd);
132 	sorflush(so);
133 	(void) m_free(dtom(so));
134 }
135 
136 /*
137  * Close a socket on last file table reference removal.
138  * Initiate disconnect if connected.
139  * Free socket when disconnect complete.
140  */
141 soclose(so)
142 	register struct socket *so;
143 {
144 	int s = splnet();		/* conservative */
145 	int error;
146 
147 	if (so->so_options & SO_ACCEPTCONN) {
148 		while (so->so_q0 != so)
149 			(void) soabort(so->so_q0);
150 		while (so->so_q != so)
151 			(void) soabort(so->so_q);
152 	}
153 	if (so->so_pcb == 0)
154 		goto discard;
155 	if (so->so_state & SS_ISCONNECTED) {
156 		if ((so->so_state & SS_ISDISCONNECTING) == 0) {
157 			error = sodisconnect(so);
158 			if (error)
159 				goto drop;
160 		}
161 		if (so->so_options & SO_LINGER) {
162 			if ((so->so_state & SS_ISDISCONNECTING) &&
163 			    (so->so_state & SS_NBIO))
164 				goto drop;
165 			while (so->so_state & SS_ISCONNECTED)
166 				sleep((caddr_t)&so->so_timeo, PZERO+1);
167 		}
168 	}
169 drop:
170 	if (so->so_pcb) {
171 		int error2 =
172 		    (*so->so_proto->pr_usrreq)(so, PRU_DETACH,
173 			(struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0);
174 		if (error == 0)
175 			error = error2;
176 	}
177 discard:
178 	if (so->so_state & SS_NOFDREF)
179 		panic("soclose: NOFDREF");
180 	so->so_state |= SS_NOFDREF;
181 	sofree(so);
182 	splx(s);
183 	return (error);
184 }
185 
186 /*
187  * Must be called at splnet...
188  */
189 soabort(so)
190 	struct socket *so;
191 {
192 
193 	return (
194 	    (*so->so_proto->pr_usrreq)(so, PRU_ABORT,
195 		(struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0));
196 }
197 
198 soaccept(so, nam)
199 	register struct socket *so;
200 	struct mbuf *nam;
201 {
202 	int s = splnet();
203 	int error;
204 
205 	if ((so->so_state & SS_NOFDREF) == 0)
206 		panic("soaccept: !NOFDREF");
207 	so->so_state &= ~SS_NOFDREF;
208 	error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT,
209 	    (struct mbuf *)0, nam, (struct mbuf *)0);
210 	splx(s);
211 	return (error);
212 }
213 
214 soconnect(so, nam)
215 	register struct socket *so;
216 	struct mbuf *nam;
217 {
218 	int s = splnet();
219 	int error;
220 
221 	/*
222 	 * If protocol is connection-based, can only connect once.
223 	 * Otherwise, if connected, try to disconnect first.
224 	 * This allows user to disconnect by connecting to, e.g.,
225 	 * a null address.
226 	 */
227 	if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
228 	    ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
229 	    (error = sodisconnect(so))))
230 		error = EISCONN;
231 	else
232 		error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT,
233 		    (struct mbuf *)0, nam, (struct mbuf *)0);
234 	splx(s);
235 	return (error);
236 }
237 
238 soconnect2(so1, so2)
239 	register struct socket *so1;
240 	struct socket *so2;
241 {
242 	int s = splnet();
243 	int error;
244 
245 	error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2,
246 	    (struct mbuf *)0, (struct mbuf *)so2, (struct mbuf *)0);
247 	splx(s);
248 	return (error);
249 }
250 
251 sodisconnect(so)
252 	register struct socket *so;
253 {
254 	int s = splnet();
255 	int error;
256 
257 	if ((so->so_state & SS_ISCONNECTED) == 0) {
258 		error = ENOTCONN;
259 		goto bad;
260 	}
261 	if (so->so_state & SS_ISDISCONNECTING) {
262 		error = EALREADY;
263 		goto bad;
264 	}
265 	error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT,
266 	    (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0);
267 bad:
268 	splx(s);
269 	return (error);
270 }
271 
272 /*
273  * Send on a socket.
274  * If send must go all at once and message is larger than
275  * send buffering, then hard error.
276  * Lock against other senders.
277  * If must go all at once and not enough room now, then
278  * inform user that this would block and do nothing.
279  * Otherwise, if nonblocking, send as much as possible.
280  */
281 sosend(so, nam, uio, flags, rights)
282 	register struct socket *so;
283 	struct mbuf *nam;
284 	register struct uio *uio;
285 	int flags;
286 	struct mbuf *rights;
287 {
288 	struct mbuf *top = 0;
289 	register struct mbuf *m, **mp;
290 	register int space;
291 	int len, rlen = 0, error = 0, s, dontroute, first = 1;
292 
293 	if (sosendallatonce(so) && uio->uio_resid > so->so_snd.sb_hiwat)
294 		return (EMSGSIZE);
295 	dontroute =
296 	    (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
297 	    (so->so_proto->pr_flags & PR_ATOMIC);
298 	u.u_ru.ru_msgsnd++;
299 	if (rights)
300 		rlen = rights->m_len;
301 #define	snderr(errno)	{ error = errno; splx(s); goto release; }
302 
303 restart:
304 	sblock(&so->so_snd);
305 	do {
306 		s = splnet();
307 		if (so->so_state & SS_CANTSENDMORE)
308 			snderr(EPIPE);
309 		if (so->so_error) {
310 			error = so->so_error;
311 			so->so_error = 0;			/* ??? */
312 			splx(s);
313 			goto release;
314 		}
315 		if ((so->so_state & SS_ISCONNECTED) == 0) {
316 			if (so->so_proto->pr_flags & PR_CONNREQUIRED)
317 				snderr(ENOTCONN);
318 			if (nam == 0)
319 				snderr(EDESTADDRREQ);
320 		}
321 		if (flags & MSG_OOB)
322 			space = 1024;
323 		else {
324 			space = sbspace(&so->so_snd);
325 			if (space <= rlen ||
326 			   (sosendallatonce(so) &&
327 				space < uio->uio_resid + rlen) ||
328 			   (uio->uio_resid >= CLBYTES && space < CLBYTES &&
329 			   so->so_snd.sb_cc >= CLBYTES &&
330 			   (so->so_state & SS_NBIO) == 0)) {
331 				if (so->so_state & SS_NBIO) {
332 					if (first)
333 						error = EWOULDBLOCK;
334 					splx(s);
335 					goto release;
336 				}
337 				sbunlock(&so->so_snd);
338 				sbwait(&so->so_snd);
339 				splx(s);
340 				goto restart;
341 			}
342 		}
343 		splx(s);
344 		mp = &top;
345 		space -= rlen;
346 		while (space > 0) {
347 			MGET(m, M_WAIT, MT_DATA);
348 			if (uio->uio_resid >= CLBYTES / 2 && space >= CLBYTES) {
349 				MCLGET(m);
350 				if (m->m_len != CLBYTES)
351 					goto nopages;
352 				len = MIN(CLBYTES, uio->uio_resid);
353 				space -= CLBYTES;
354 			} else {
355 nopages:
356 				len = MIN(MIN(MLEN, uio->uio_resid), space);
357 				space -= len;
358 			}
359 			error = uiomove(mtod(m, caddr_t), len, UIO_WRITE, uio);
360 			m->m_len = len;
361 			*mp = m;
362 			if (error)
363 				goto release;
364 			mp = &m->m_next;
365 			if (uio->uio_resid <= 0)
366 				break;
367 		}
368 		if (dontroute)
369 			so->so_options |= SO_DONTROUTE;
370 		s = splnet();					/* XXX */
371 		error = (*so->so_proto->pr_usrreq)(so,
372 		    (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND,
373 		    top, (caddr_t)nam, rights);
374 		splx(s);
375 		if (dontroute)
376 			so->so_options &= ~SO_DONTROUTE;
377 		rights = 0;
378 		rlen = 0;
379 		top = 0;
380 		first = 0;
381 		if (error)
382 			break;
383 	} while (uio->uio_resid);
384 
385 release:
386 	sbunlock(&so->so_snd);
387 	if (top)
388 		m_freem(top);
389 	if (error == EPIPE)
390 		psignal(u.u_procp, SIGPIPE);
391 	return (error);
392 }
393 
394 /*
395  * Implement receive operations on a socket.
396  * We depend on the way that records are added to the sockbuf
397  * by sbappend*.  In particular, each record (mbufs linked through m_next)
398  * must begin with an address if the protocol so specifies,
399  * followed by an optional mbuf containing access rights if supported
400  * by the protocol, and then zero or more mbufs of data.
401  * In order to avoid blocking network interrupts for the entire time here,
402  * we splx() while doing the actual copy to user space.
403  * Although the sockbuf is locked, new data may still be appended,
404  * and thus we must maintain consistency of the sockbuf during that time.
405  */
406 soreceive(so, aname, uio, flags, rightsp)
407 	register struct socket *so;
408 	struct mbuf **aname;
409 	register struct uio *uio;
410 	int flags;
411 	struct mbuf **rightsp;
412 {
413 	register struct mbuf *m;
414 	register int len, error = 0, s, tomark;
415 	struct protosw *pr = so->so_proto;
416 	struct mbuf *nextrecord;
417 	int moff;
418 
419 	if (rightsp)
420 		*rightsp = 0;
421 	if (aname)
422 		*aname = 0;
423 	if (flags & MSG_OOB) {
424 		m = m_get(M_WAIT, MT_DATA);
425 		error = (*pr->pr_usrreq)(so, PRU_RCVOOB,
426 		    m, (struct mbuf *)(flags & MSG_PEEK), (struct mbuf *)0);
427 		if (error)
428 			goto bad;
429 		do {
430 			len = uio->uio_resid;
431 			if (len > m->m_len)
432 				len = m->m_len;
433 			error =
434 			    uiomove(mtod(m, caddr_t), (int)len, UIO_READ, uio);
435 			m = m_free(m);
436 		} while (uio->uio_resid && error == 0 && m);
437 bad:
438 		if (m)
439 			m_freem(m);
440 		return (error);
441 	}
442 
443 restart:
444 	sblock(&so->so_rcv);
445 	s = splnet();
446 
447 #define	rcverr(errno)	{ error = errno; splx(s); goto release; }
448 	if (so->so_rcv.sb_cc == 0) {
449 		if (so->so_error) {
450 			error = so->so_error;
451 			so->so_error = 0;
452 			splx(s);
453 			goto release;
454 		}
455 		if (so->so_state & SS_CANTRCVMORE) {
456 			splx(s);
457 			goto release;
458 		}
459 		if ((so->so_state & SS_ISCONNECTED) == 0 &&
460 		    (so->so_proto->pr_flags & PR_CONNREQUIRED))
461 			rcverr(ENOTCONN);
462 		if (uio->uio_resid == 0)
463 			goto release;
464 		if (so->so_state & SS_NBIO)
465 			rcverr(EWOULDBLOCK);
466 		sbunlock(&so->so_rcv);
467 		sbwait(&so->so_rcv);
468 		splx(s);
469 		goto restart;
470 	}
471 	u.u_ru.ru_msgrcv++;
472 	m = so->so_rcv.sb_mb;
473 	if (m == 0)
474 		panic("receive 1");
475 	nextrecord = m->m_act;
476 	if (pr->pr_flags & PR_ADDR) {
477 		if (m->m_type != MT_SONAME)
478 			panic("receive 1a");
479 		if (flags & MSG_PEEK) {
480 			if (aname)
481 				*aname = m_copy(m, 0, m->m_len);
482 			m = m->m_next;
483 		} else {
484 			sbfree(&so->so_rcv, m);
485 			if (aname) {
486 				*aname = m;
487 				m = m->m_next;
488 				(*aname)->m_next = 0;
489 				so->so_rcv.sb_mb = m;
490 			} else {
491 				MFREE(m, so->so_rcv.sb_mb);
492 				m = so->so_rcv.sb_mb;
493 			}
494 			if (m)
495 				m->m_act = nextrecord;
496 		}
497 	}
498 	if (m && m->m_type == MT_RIGHTS) {
499 		if ((pr->pr_flags & PR_RIGHTS) == 0)
500 			panic("receive 2");
501 		if (flags & MSG_PEEK) {
502 			if (rightsp)
503 				*rightsp = m_copy(m, 0, m->m_len);
504 			m = m->m_next;
505 		} else {
506 			sbfree(&so->so_rcv, m);
507 			if (rightsp) {
508 				*rightsp = m;
509 				so->so_rcv.sb_mb = m->m_next;
510 				m->m_next = 0;
511 				m = so->so_rcv.sb_mb;
512 			} else {
513 				MFREE(m, so->so_rcv.sb_mb);
514 				m = so->so_rcv.sb_mb;
515 			}
516 			if (m)
517 				m->m_act = nextrecord;
518 		}
519 	}
520 	moff = 0;
521 	tomark = so->so_oobmark;
522 	while (m && uio->uio_resid > 0 && error == 0) {
523 		if (m->m_type != MT_DATA && m->m_type != MT_HEADER)
524 			panic("receive 3");
525 		len = uio->uio_resid;
526 		so->so_state &= ~SS_RCVATMARK;
527 		if (tomark && len > tomark)
528 			len = tomark;
529 		if (len > m->m_len - moff)
530 			len = m->m_len - moff;
531 		splx(s);
532 		error =
533 		    uiomove(mtod(m, caddr_t) + moff, (int)len, UIO_READ, uio);
534 		s = splnet();
535 		if (len == m->m_len - moff) {
536 			if (flags & MSG_PEEK) {
537 				m = m->m_next;
538 				moff = 0;
539 			} else {
540 				nextrecord = m->m_act;
541 				sbfree(&so->so_rcv, m);
542 				MFREE(m, so->so_rcv.sb_mb);
543 				m = so->so_rcv.sb_mb;
544 				if (m)
545 					m->m_act = nextrecord;
546 			}
547 		} else {
548 			if (flags & MSG_PEEK)
549 				moff += len;
550 			else {
551 				m->m_off += len;
552 				m->m_len -= len;
553 				so->so_rcv.sb_cc -= len;
554 			}
555 		}
556 		if ((flags & MSG_PEEK) == 0 && so->so_oobmark) {
557 			so->so_oobmark -= len;
558 			if (so->so_oobmark == 0) {
559 				so->so_state |= SS_RCVATMARK;
560 				break;
561 			}
562 		}
563 		if (tomark) {
564 			tomark -= len;
565 			if (tomark == 0)
566 				break;
567 		}
568 	}
569 	if ((flags & MSG_PEEK) == 0) {
570 		if (m == 0)
571 			so->so_rcv.sb_mb = nextrecord;
572 		else if (pr->pr_flags & PR_ATOMIC)
573 			(void) sbdroprecord(&so->so_rcv);
574 		if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
575 			(*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0,
576 			    (struct mbuf *)0, (struct mbuf *)0);
577 		if (error == 0 && rightsp && *rightsp &&
578 		    pr->pr_domain->dom_externalize)
579 			error = (*pr->pr_domain->dom_externalize)(*rightsp);
580 	}
581 release:
582 	sbunlock(&so->so_rcv);
583 	splx(s);
584 	return (error);
585 }
586 
587 soshutdown(so, how)
588 	register struct socket *so;
589 	register int how;
590 {
591 	register struct protosw *pr = so->so_proto;
592 
593 	how++;
594 	if (how & FREAD)
595 		sorflush(so);
596 	if (how & FWRITE)
597 		return ((*pr->pr_usrreq)(so, PRU_SHUTDOWN,
598 		    (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0));
599 	return (0);
600 }
601 
602 sorflush(so)
603 	register struct socket *so;
604 {
605 	register struct sockbuf *sb = &so->so_rcv;
606 	register struct protosw *pr = so->so_proto;
607 	register int s;
608 	struct sockbuf asb;
609 
610 	sblock(sb);
611 	s = splimp();
612 	socantrcvmore(so);
613 	sbunlock(sb);
614 	asb = *sb;
615 	bzero((caddr_t)sb, sizeof (*sb));
616 	splx(s);
617 	if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose)
618 		(*pr->pr_domain->dom_dispose)(asb.sb_mb);
619 	sbrelease(&asb);
620 }
621 
622 sosetopt(so, level, optname, m0)
623 	register struct socket *so;
624 	int level, optname;
625 	struct mbuf *m0;
626 {
627 	int error = 0;
628 	register struct mbuf *m = m0;
629 
630 	if (level != SOL_SOCKET) {
631 		if (so->so_proto && so->so_proto->pr_ctloutput)
632 			return ((*so->so_proto->pr_ctloutput)
633 				  (PRCO_SETOPT, so, level, optname, &m0));
634 		error = ENOPROTOOPT;
635 	} else {
636 		switch (optname) {
637 
638 		case SO_LINGER:
639 			if (m == NULL || m->m_len != sizeof (struct linger)) {
640 				error = EINVAL;
641 				goto bad;
642 			}
643 			so->so_linger = mtod(m, struct linger *)->l_linger;
644 			/* fall thru... */
645 
646 		case SO_DEBUG:
647 		case SO_KEEPALIVE:
648 		case SO_DONTROUTE:
649 		case SO_USELOOPBACK:
650 		case SO_BROADCAST:
651 		case SO_REUSEADDR:
652 		case SO_OOBINLINE:
653 			if (m == NULL || m->m_len < sizeof (int)) {
654 				error = EINVAL;
655 				goto bad;
656 			}
657 			if (*mtod(m, int *))
658 				so->so_options |= optname;
659 			else
660 				so->so_options &= ~optname;
661 			break;
662 
663 		case SO_SNDBUF:
664 		case SO_RCVBUF:
665 		case SO_SNDLOWAT:
666 		case SO_RCVLOWAT:
667 		case SO_SNDTIMEO:
668 		case SO_RCVTIMEO:
669 			if (m == NULL || m->m_len < sizeof (int)) {
670 				error = EINVAL;
671 				goto bad;
672 			}
673 			switch (optname) {
674 
675 			case SO_SNDBUF:
676 			case SO_RCVBUF:
677 				if (sbreserve(optname == SO_SNDBUF ? &so->so_snd :
678 				    &so->so_rcv, *mtod(m, int *)) == 0) {
679 					error = ENOBUFS;
680 					goto bad;
681 				}
682 				break;
683 
684 			case SO_SNDLOWAT:
685 				so->so_snd.sb_lowat = *mtod(m, int *);
686 				break;
687 			case SO_RCVLOWAT:
688 				so->so_rcv.sb_lowat = *mtod(m, int *);
689 				break;
690 			case SO_SNDTIMEO:
691 				so->so_snd.sb_timeo = *mtod(m, int *);
692 				break;
693 			case SO_RCVTIMEO:
694 				so->so_rcv.sb_timeo = *mtod(m, int *);
695 				break;
696 			}
697 			break;
698 
699 		default:
700 			error = ENOPROTOOPT;
701 			break;
702 		}
703 	}
704 bad:
705 	if (m)
706 		(void) m_free(m);
707 	return (error);
708 }
709 
710 sogetopt(so, level, optname, mp)
711 	register struct socket *so;
712 	int level, optname;
713 	struct mbuf **mp;
714 {
715 	register struct mbuf *m;
716 
717 	if (level != SOL_SOCKET) {
718 		if (so->so_proto && so->so_proto->pr_ctloutput) {
719 			return ((*so->so_proto->pr_ctloutput)
720 				  (PRCO_GETOPT, so, level, optname, mp));
721 		} else
722 			return (ENOPROTOOPT);
723 	} else {
724 		m = m_get(M_WAIT, MT_SOOPTS);
725 		m->m_len = sizeof (int);
726 
727 		switch (optname) {
728 
729 		case SO_LINGER:
730 			m->m_len = sizeof (struct linger);
731 			mtod(m, struct linger *)->l_onoff =
732 				so->so_options & SO_LINGER;
733 			mtod(m, struct linger *)->l_linger = so->so_linger;
734 			break;
735 
736 		case SO_USELOOPBACK:
737 		case SO_DONTROUTE:
738 		case SO_DEBUG:
739 		case SO_KEEPALIVE:
740 		case SO_REUSEADDR:
741 		case SO_BROADCAST:
742 		case SO_OOBINLINE:
743 			*mtod(m, int *) = so->so_options & optname;
744 			break;
745 
746 		case SO_TYPE:
747 			*mtod(m, int *) = so->so_type;
748 			break;
749 
750 		case SO_ERROR:
751 			*mtod(m, int *) = so->so_error;
752 			so->so_error = 0;
753 			break;
754 
755 		case SO_SNDBUF:
756 			*mtod(m, int *) = so->so_snd.sb_hiwat;
757 			break;
758 
759 		case SO_RCVBUF:
760 			*mtod(m, int *) = so->so_rcv.sb_hiwat;
761 			break;
762 
763 		case SO_SNDLOWAT:
764 			*mtod(m, int *) = so->so_snd.sb_lowat;
765 			break;
766 
767 		case SO_RCVLOWAT:
768 			*mtod(m, int *) = so->so_rcv.sb_lowat;
769 			break;
770 
771 		case SO_SNDTIMEO:
772 			*mtod(m, int *) = so->so_snd.sb_timeo;
773 			break;
774 
775 		case SO_RCVTIMEO:
776 			*mtod(m, int *) = so->so_rcv.sb_timeo;
777 			break;
778 
779 		default:
780 			(void)m_free(m);
781 			return (ENOPROTOOPT);
782 		}
783 		*mp = m;
784 		return (0);
785 	}
786 }
787 
788 sohasoutofband(so)
789 	register struct socket *so;
790 {
791 	struct proc *p;
792 
793 	if (so->so_pgrp < 0)
794 		gsignal(-so->so_pgrp, SIGURG);
795 	else if (so->so_pgrp > 0 && (p = pfind(so->so_pgrp)) != 0)
796 		psignal(p, SIGURG);
797 	if (so->so_rcv.sb_sel) {
798 		selwakeup(so->so_rcv.sb_sel, so->so_rcv.sb_flags & SB_COLL);
799 		so->so_rcv.sb_sel = 0;
800 		so->so_rcv.sb_flags &= ~SB_COLL;
801 	}
802 }
803