1 /* $OpenBSD: uipc_socket2.c,v 1.154 2024/05/07 15:54:23 claudio Exp $ */
2 /* $NetBSD: uipc_socket2.c,v 1.11 1996/02/04 02:17:55 christos Exp $ */
3
4 /*
5 * Copyright (c) 1982, 1986, 1988, 1990, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * @(#)uipc_socket2.c 8.1 (Berkeley) 6/10/93
33 */
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/malloc.h>
38 #include <sys/mbuf.h>
39 #include <sys/protosw.h>
40 #include <sys/domain.h>
41 #include <sys/socket.h>
42 #include <sys/socketvar.h>
43 #include <sys/signalvar.h>
44 #include <sys/pool.h>
45
46 /*
47 * Primitive routines for operating on sockets and socket buffers
48 */
49
50 u_long sb_max = SB_MAX; /* patchable */
51
52 extern struct pool mclpools[];
53 extern struct pool mbpool;
54
55 /*
56 * Procedures to manipulate state flags of socket
57 * and do appropriate wakeups. Normal sequence from the
58 * active (originating) side is that soisconnecting() is
59 * called during processing of connect() call,
60 * resulting in an eventual call to soisconnected() if/when the
61 * connection is established. When the connection is torn down
62 * soisdisconnecting() is called during processing of disconnect() call,
63 * and soisdisconnected() is called when the connection to the peer
64 * is totally severed. The semantics of these routines are such that
65 * connectionless protocols can call soisconnected() and soisdisconnected()
66 * only, bypassing the in-progress calls when setting up a ``connection''
67 * takes no time.
68 *
69 * From the passive side, a socket is created with
70 * two queues of sockets: so_q0 for connections in progress
71 * and so_q for connections already made and awaiting user acceptance.
72 * As a protocol is preparing incoming connections, it creates a socket
73 * structure queued on so_q0 by calling sonewconn(). When the connection
74 * is established, soisconnected() is called, and transfers the
75 * socket structure to so_q, making it available to accept().
76 *
77 * If a socket is closed with sockets on either
78 * so_q0 or so_q, these sockets are dropped.
79 *
80 * If higher level protocols are implemented in
81 * the kernel, the wakeups done here will sometimes
82 * cause software-interrupt process scheduling.
83 */
84
85 void
soisconnecting(struct socket * so)86 soisconnecting(struct socket *so)
87 {
88 soassertlocked(so);
89 so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING);
90 so->so_state |= SS_ISCONNECTING;
91 }
92
93 void
soisconnected(struct socket * so)94 soisconnected(struct socket *so)
95 {
96 struct socket *head = so->so_head;
97
98 soassertlocked(so);
99 so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING);
100 so->so_state |= SS_ISCONNECTED;
101
102 if (head != NULL && so->so_onq == &head->so_q0) {
103 int persocket = solock_persocket(so);
104
105 if (persocket) {
106 soref(so);
107 soref(head);
108
109 sounlock(so);
110 solock(head);
111 solock(so);
112
113 if (so->so_onq != &head->so_q0) {
114 sounlock(head);
115 sorele(head);
116 sorele(so);
117
118 return;
119 }
120
121 sorele(head);
122 sorele(so);
123 }
124
125 soqremque(so, 0);
126 soqinsque(head, so, 1);
127 sorwakeup(head);
128 wakeup_one(&head->so_timeo);
129
130 if (persocket)
131 sounlock(head);
132 } else {
133 wakeup(&so->so_timeo);
134 sorwakeup(so);
135 sowwakeup(so);
136 }
137 }
138
139 void
soisdisconnecting(struct socket * so)140 soisdisconnecting(struct socket *so)
141 {
142 soassertlocked(so);
143 so->so_state &= ~SS_ISCONNECTING;
144 so->so_state |= SS_ISDISCONNECTING;
145
146 mtx_enter(&so->so_rcv.sb_mtx);
147 so->so_rcv.sb_state |= SS_CANTRCVMORE;
148 mtx_leave(&so->so_rcv.sb_mtx);
149
150 mtx_enter(&so->so_snd.sb_mtx);
151 so->so_snd.sb_state |= SS_CANTSENDMORE;
152 mtx_leave(&so->so_snd.sb_mtx);
153
154 wakeup(&so->so_timeo);
155 sowwakeup(so);
156 sorwakeup(so);
157 }
158
159 void
soisdisconnected(struct socket * so)160 soisdisconnected(struct socket *so)
161 {
162 soassertlocked(so);
163 so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
164 so->so_state |= SS_ISDISCONNECTED;
165
166 mtx_enter(&so->so_rcv.sb_mtx);
167 so->so_rcv.sb_state |= SS_CANTRCVMORE;
168 mtx_leave(&so->so_rcv.sb_mtx);
169
170 mtx_enter(&so->so_snd.sb_mtx);
171 so->so_snd.sb_state |= SS_CANTSENDMORE;
172 mtx_leave(&so->so_snd.sb_mtx);
173
174 wakeup(&so->so_timeo);
175 sowwakeup(so);
176 sorwakeup(so);
177 }
178
179 /*
180 * When an attempt at a new connection is noted on a socket
181 * which accepts connections, sonewconn is called. If the
182 * connection is possible (subject to space constraints, etc.)
183 * then we allocate a new structure, properly linked into the
184 * data structure of the original socket, and return this.
185 * Connstatus may be 0 or SS_ISCONNECTED.
186 */
187 struct socket *
sonewconn(struct socket * head,int connstatus,int wait)188 sonewconn(struct socket *head, int connstatus, int wait)
189 {
190 struct socket *so;
191 int persocket = solock_persocket(head);
192 int soqueue = connstatus ? 1 : 0;
193
194 /*
195 * XXXSMP as long as `so' and `head' share the same lock, we
196 * can call soreserve() and pr_attach() below w/o explicitly
197 * locking `so'.
198 */
199 soassertlocked(head);
200
201 if (m_pool_used() > 95)
202 return (NULL);
203 if (head->so_qlen + head->so_q0len > head->so_qlimit * 3)
204 return (NULL);
205 so = soalloc(head->so_proto, wait);
206 if (so == NULL)
207 return (NULL);
208 so->so_type = head->so_type;
209 so->so_options = head->so_options &~ SO_ACCEPTCONN;
210 so->so_linger = head->so_linger;
211 so->so_state = head->so_state | SS_NOFDREF;
212 so->so_proto = head->so_proto;
213 so->so_timeo = head->so_timeo;
214 so->so_euid = head->so_euid;
215 so->so_ruid = head->so_ruid;
216 so->so_egid = head->so_egid;
217 so->so_rgid = head->so_rgid;
218 so->so_cpid = head->so_cpid;
219
220 /*
221 * Lock order will be `head' -> `so' while these sockets are linked.
222 */
223 if (persocket)
224 solock(so);
225
226 /*
227 * Inherit watermarks but those may get clamped in low mem situations.
228 */
229 if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat))
230 goto fail;
231
232 mtx_enter(&head->so_snd.sb_mtx);
233 so->so_snd.sb_wat = head->so_snd.sb_wat;
234 so->so_snd.sb_lowat = head->so_snd.sb_lowat;
235 so->so_snd.sb_timeo_nsecs = head->so_snd.sb_timeo_nsecs;
236 mtx_leave(&head->so_snd.sb_mtx);
237
238 mtx_enter(&head->so_rcv.sb_mtx);
239 so->so_rcv.sb_wat = head->so_rcv.sb_wat;
240 so->so_rcv.sb_lowat = head->so_rcv.sb_lowat;
241 so->so_rcv.sb_timeo_nsecs = head->so_rcv.sb_timeo_nsecs;
242 mtx_leave(&head->so_rcv.sb_mtx);
243
244 sigio_copy(&so->so_sigio, &head->so_sigio);
245
246 soqinsque(head, so, soqueue);
247 if (pru_attach(so, 0, wait) != 0) {
248 soqremque(so, soqueue);
249 goto fail;
250 }
251 if (connstatus) {
252 so->so_state |= connstatus;
253 sorwakeup(head);
254 wakeup(&head->so_timeo);
255 }
256
257 if (persocket)
258 sounlock(so);
259
260 return (so);
261
262 fail:
263 if (persocket)
264 sounlock(so);
265 sigio_free(&so->so_sigio);
266 klist_free(&so->so_rcv.sb_klist);
267 klist_free(&so->so_snd.sb_klist);
268 pool_put(&socket_pool, so);
269
270 return (NULL);
271 }
272
273 void
soqinsque(struct socket * head,struct socket * so,int q)274 soqinsque(struct socket *head, struct socket *so, int q)
275 {
276 soassertlocked(head);
277 soassertlocked(so);
278
279 KASSERT(so->so_onq == NULL);
280
281 so->so_head = head;
282 if (q == 0) {
283 head->so_q0len++;
284 so->so_onq = &head->so_q0;
285 } else {
286 head->so_qlen++;
287 so->so_onq = &head->so_q;
288 }
289 TAILQ_INSERT_TAIL(so->so_onq, so, so_qe);
290 }
291
292 int
soqremque(struct socket * so,int q)293 soqremque(struct socket *so, int q)
294 {
295 struct socket *head = so->so_head;
296
297 soassertlocked(so);
298 soassertlocked(head);
299
300 if (q == 0) {
301 if (so->so_onq != &head->so_q0)
302 return (0);
303 head->so_q0len--;
304 } else {
305 if (so->so_onq != &head->so_q)
306 return (0);
307 head->so_qlen--;
308 }
309 TAILQ_REMOVE(so->so_onq, so, so_qe);
310 so->so_onq = NULL;
311 so->so_head = NULL;
312 return (1);
313 }
314
315 /*
316 * Socantsendmore indicates that no more data will be sent on the
317 * socket; it would normally be applied to a socket when the user
318 * informs the system that no more data is to be sent, by the protocol
319 * code (in case PRU_SHUTDOWN). Socantrcvmore indicates that no more data
320 * will be received, and will normally be applied to the socket by a
321 * protocol when it detects that the peer will send no more data.
322 * Data queued for reading in the socket may yet be read.
323 */
324
325 void
socantsendmore(struct socket * so)326 socantsendmore(struct socket *so)
327 {
328 soassertlocked(so);
329 mtx_enter(&so->so_snd.sb_mtx);
330 so->so_snd.sb_state |= SS_CANTSENDMORE;
331 mtx_leave(&so->so_snd.sb_mtx);
332 sowwakeup(so);
333 }
334
335 void
socantrcvmore(struct socket * so)336 socantrcvmore(struct socket *so)
337 {
338 if ((so->so_rcv.sb_flags & SB_MTXLOCK) == 0)
339 soassertlocked(so);
340
341 mtx_enter(&so->so_rcv.sb_mtx);
342 so->so_rcv.sb_state |= SS_CANTRCVMORE;
343 mtx_leave(&so->so_rcv.sb_mtx);
344 sorwakeup(so);
345 }
346
347 void
solock(struct socket * so)348 solock(struct socket *so)
349 {
350 switch (so->so_proto->pr_domain->dom_family) {
351 case PF_INET:
352 case PF_INET6:
353 NET_LOCK();
354 break;
355 default:
356 rw_enter_write(&so->so_lock);
357 break;
358 }
359 }
360
361 void
solock_shared(struct socket * so)362 solock_shared(struct socket *so)
363 {
364 switch (so->so_proto->pr_domain->dom_family) {
365 case PF_INET:
366 case PF_INET6:
367 if (so->so_proto->pr_usrreqs->pru_lock != NULL) {
368 NET_LOCK_SHARED();
369 rw_enter_write(&so->so_lock);
370 } else
371 NET_LOCK();
372 break;
373 default:
374 rw_enter_write(&so->so_lock);
375 break;
376 }
377 }
378
379 int
solock_persocket(struct socket * so)380 solock_persocket(struct socket *so)
381 {
382 switch (so->so_proto->pr_domain->dom_family) {
383 case PF_INET:
384 case PF_INET6:
385 return 0;
386 default:
387 return 1;
388 }
389 }
390
391 void
solock_pair(struct socket * so1,struct socket * so2)392 solock_pair(struct socket *so1, struct socket *so2)
393 {
394 KASSERT(so1 != so2);
395 KASSERT(so1->so_type == so2->so_type);
396 KASSERT(solock_persocket(so1));
397
398 if (so1 < so2) {
399 solock(so1);
400 solock(so2);
401 } else {
402 solock(so2);
403 solock(so1);
404 }
405 }
406
407 void
sounlock(struct socket * so)408 sounlock(struct socket *so)
409 {
410 switch (so->so_proto->pr_domain->dom_family) {
411 case PF_INET:
412 case PF_INET6:
413 NET_UNLOCK();
414 break;
415 default:
416 rw_exit_write(&so->so_lock);
417 break;
418 }
419 }
420
421 void
sounlock_shared(struct socket * so)422 sounlock_shared(struct socket *so)
423 {
424 switch (so->so_proto->pr_domain->dom_family) {
425 case PF_INET:
426 case PF_INET6:
427 if (so->so_proto->pr_usrreqs->pru_unlock != NULL) {
428 rw_exit_write(&so->so_lock);
429 NET_UNLOCK_SHARED();
430 } else
431 NET_UNLOCK();
432 break;
433 default:
434 rw_exit_write(&so->so_lock);
435 break;
436 }
437 }
438
439 void
soassertlocked_readonly(struct socket * so)440 soassertlocked_readonly(struct socket *so)
441 {
442 switch (so->so_proto->pr_domain->dom_family) {
443 case PF_INET:
444 case PF_INET6:
445 NET_ASSERT_LOCKED();
446 break;
447 default:
448 rw_assert_wrlock(&so->so_lock);
449 break;
450 }
451 }
452
453 void
soassertlocked(struct socket * so)454 soassertlocked(struct socket *so)
455 {
456 switch (so->so_proto->pr_domain->dom_family) {
457 case PF_INET:
458 case PF_INET6:
459 if (rw_status(&netlock) == RW_READ) {
460 NET_ASSERT_LOCKED();
461
462 if (splassert_ctl > 0 && pru_locked(so) == 0 &&
463 rw_status(&so->so_lock) != RW_WRITE)
464 splassert_fail(0, RW_WRITE, __func__);
465 } else
466 NET_ASSERT_LOCKED_EXCLUSIVE();
467 break;
468 default:
469 rw_assert_wrlock(&so->so_lock);
470 break;
471 }
472 }
473
474 int
sosleep_nsec(struct socket * so,void * ident,int prio,const char * wmesg,uint64_t nsecs)475 sosleep_nsec(struct socket *so, void *ident, int prio, const char *wmesg,
476 uint64_t nsecs)
477 {
478 int ret;
479
480 switch (so->so_proto->pr_domain->dom_family) {
481 case PF_INET:
482 case PF_INET6:
483 if (so->so_proto->pr_usrreqs->pru_unlock != NULL &&
484 rw_status(&netlock) == RW_READ) {
485 rw_exit_write(&so->so_lock);
486 }
487 ret = rwsleep_nsec(ident, &netlock, prio, wmesg, nsecs);
488 if (so->so_proto->pr_usrreqs->pru_lock != NULL &&
489 rw_status(&netlock) == RW_READ) {
490 rw_enter_write(&so->so_lock);
491 }
492 break;
493 default:
494 ret = rwsleep_nsec(ident, &so->so_lock, prio, wmesg, nsecs);
495 break;
496 }
497
498 return ret;
499 }
500
501 void
sbmtxassertlocked(struct socket * so,struct sockbuf * sb)502 sbmtxassertlocked(struct socket *so, struct sockbuf *sb)
503 {
504 if (sb->sb_flags & SB_MTXLOCK) {
505 if (splassert_ctl > 0 && mtx_owned(&sb->sb_mtx) == 0)
506 splassert_fail(0, RW_WRITE, __func__);
507 } else
508 soassertlocked(so);
509 }
510
511 /*
512 * Wait for data to arrive at/drain from a socket buffer.
513 */
514 int
sbwait_locked(struct socket * so,struct sockbuf * sb)515 sbwait_locked(struct socket *so, struct sockbuf *sb)
516 {
517 int prio = (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK | PCATCH;
518
519 MUTEX_ASSERT_LOCKED(&sb->sb_mtx);
520
521 sb->sb_flags |= SB_WAIT;
522 return msleep_nsec(&sb->sb_cc, &sb->sb_mtx, prio, "sbwait",
523 sb->sb_timeo_nsecs);
524 }
525
526 int
sbwait(struct socket * so,struct sockbuf * sb)527 sbwait(struct socket *so, struct sockbuf *sb)
528 {
529 uint64_t timeo_nsecs;
530 int prio = (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK | PCATCH;
531
532 soassertlocked(so);
533
534 mtx_enter(&sb->sb_mtx);
535 timeo_nsecs = sb->sb_timeo_nsecs;
536 sb->sb_flags |= SB_WAIT;
537 mtx_leave(&sb->sb_mtx);
538
539 return sosleep_nsec(so, &sb->sb_cc, prio, "netio", timeo_nsecs);
540 }
541
542 int
sblock(struct socket * so,struct sockbuf * sb,int flags)543 sblock(struct socket *so, struct sockbuf *sb, int flags)
544 {
545 int error = 0, prio = PSOCK;
546
547 if (sb->sb_flags & SB_MTXLOCK) {
548 int rwflags = RW_WRITE;
549
550 if (!(flags & SBL_NOINTR || sb->sb_flags & SB_NOINTR))
551 rwflags |= RW_INTR;
552 if (!(flags & SBL_WAIT))
553 rwflags |= RW_NOSLEEP;
554
555 error = rw_enter(&sb->sb_lock, rwflags);
556 if (error == EBUSY)
557 error = EWOULDBLOCK;
558 return error;
559 }
560
561 soassertlocked(so);
562
563 mtx_enter(&sb->sb_mtx);
564 if ((sb->sb_flags & SB_LOCK) == 0) {
565 sb->sb_flags |= SB_LOCK;
566 goto out;
567 }
568 if ((flags & SBL_WAIT) == 0) {
569 error = EWOULDBLOCK;
570 goto out;
571 }
572 if (!(flags & SBL_NOINTR || sb->sb_flags & SB_NOINTR))
573 prio |= PCATCH;
574
575 while (sb->sb_flags & SB_LOCK) {
576 sb->sb_flags |= SB_WANT;
577 mtx_leave(&sb->sb_mtx);
578 error = sosleep_nsec(so, &sb->sb_flags, prio, "sblock", INFSLP);
579 if (error)
580 return (error);
581 mtx_enter(&sb->sb_mtx);
582 }
583 sb->sb_flags |= SB_LOCK;
584 out:
585 mtx_leave(&sb->sb_mtx);
586
587 return (error);
588 }
589
590 void
sbunlock_locked(struct socket * so,struct sockbuf * sb)591 sbunlock_locked(struct socket *so, struct sockbuf *sb)
592 {
593 if (sb->sb_flags & SB_MTXLOCK) {
594 rw_exit(&sb->sb_lock);
595 return;
596 }
597
598 MUTEX_ASSERT_LOCKED(&sb->sb_mtx);
599
600 sb->sb_flags &= ~SB_LOCK;
601 if (sb->sb_flags & SB_WANT) {
602 sb->sb_flags &= ~SB_WANT;
603 wakeup(&sb->sb_flags);
604 }
605 }
606
607 void
sbunlock(struct socket * so,struct sockbuf * sb)608 sbunlock(struct socket *so, struct sockbuf *sb)
609 {
610 if (sb->sb_flags & SB_MTXLOCK) {
611 rw_exit(&sb->sb_lock);
612 return;
613 }
614
615 mtx_enter(&sb->sb_mtx);
616 sbunlock_locked(so, sb);
617 mtx_leave(&sb->sb_mtx);
618 }
619
620 /*
621 * Wakeup processes waiting on a socket buffer.
622 * Do asynchronous notification via SIGIO
623 * if the socket buffer has the SB_ASYNC flag set.
624 */
625 void
sowakeup(struct socket * so,struct sockbuf * sb)626 sowakeup(struct socket *so, struct sockbuf *sb)
627 {
628 int dowakeup = 0, dopgsigio = 0;
629
630 mtx_enter(&sb->sb_mtx);
631 if (sb->sb_flags & SB_WAIT) {
632 sb->sb_flags &= ~SB_WAIT;
633 dowakeup = 1;
634 }
635 if (sb->sb_flags & SB_ASYNC)
636 dopgsigio = 1;
637
638 knote_locked(&sb->sb_klist, 0);
639 mtx_leave(&sb->sb_mtx);
640
641 if (dowakeup)
642 wakeup(&sb->sb_cc);
643
644 if (dopgsigio)
645 pgsigio(&so->so_sigio, SIGIO, 0);
646 }
647
648 /*
649 * Socket buffer (struct sockbuf) utility routines.
650 *
651 * Each socket contains two socket buffers: one for sending data and
652 * one for receiving data. Each buffer contains a queue of mbufs,
653 * information about the number of mbufs and amount of data in the
654 * queue, and other fields allowing select() statements and notification
655 * on data availability to be implemented.
656 *
657 * Data stored in a socket buffer is maintained as a list of records.
658 * Each record is a list of mbufs chained together with the m_next
659 * field. Records are chained together with the m_nextpkt field. The upper
660 * level routine soreceive() expects the following conventions to be
661 * observed when placing information in the receive buffer:
662 *
663 * 1. If the protocol requires each message be preceded by the sender's
664 * name, then a record containing that name must be present before
665 * any associated data (mbuf's must be of type MT_SONAME).
666 * 2. If the protocol supports the exchange of ``access rights'' (really
667 * just additional data associated with the message), and there are
668 * ``rights'' to be received, then a record containing this data
669 * should be present (mbuf's must be of type MT_CONTROL).
670 * 3. If a name or rights record exists, then it must be followed by
671 * a data record, perhaps of zero length.
672 *
673 * Before using a new socket structure it is first necessary to reserve
674 * buffer space to the socket, by calling sbreserve(). This should commit
675 * some of the available buffer space in the system buffer pool for the
676 * socket (currently, it does nothing but enforce limits). The space
677 * should be released by calling sbrelease() when the socket is destroyed.
678 */
679
680 int
soreserve(struct socket * so,u_long sndcc,u_long rcvcc)681 soreserve(struct socket *so, u_long sndcc, u_long rcvcc)
682 {
683 soassertlocked(so);
684
685 mtx_enter(&so->so_rcv.sb_mtx);
686 mtx_enter(&so->so_snd.sb_mtx);
687 if (sbreserve(so, &so->so_snd, sndcc))
688 goto bad;
689 so->so_snd.sb_wat = sndcc;
690 if (so->so_snd.sb_lowat == 0)
691 so->so_snd.sb_lowat = MCLBYTES;
692 if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat)
693 so->so_snd.sb_lowat = so->so_snd.sb_hiwat;
694 if (sbreserve(so, &so->so_rcv, rcvcc))
695 goto bad2;
696 so->so_rcv.sb_wat = rcvcc;
697 if (so->so_rcv.sb_lowat == 0)
698 so->so_rcv.sb_lowat = 1;
699 mtx_leave(&so->so_snd.sb_mtx);
700 mtx_leave(&so->so_rcv.sb_mtx);
701
702 return (0);
703 bad2:
704 sbrelease(so, &so->so_snd);
705 bad:
706 mtx_leave(&so->so_snd.sb_mtx);
707 mtx_leave(&so->so_rcv.sb_mtx);
708 return (ENOBUFS);
709 }
710
711 /*
712 * Allot mbufs to a sockbuf.
713 * Attempt to scale mbmax so that mbcnt doesn't become limiting
714 * if buffering efficiency is near the normal case.
715 */
716 int
sbreserve(struct socket * so,struct sockbuf * sb,u_long cc)717 sbreserve(struct socket *so, struct sockbuf *sb, u_long cc)
718 {
719 sbmtxassertlocked(so, sb);
720
721 if (cc == 0 || cc > sb_max)
722 return (1);
723 sb->sb_hiwat = cc;
724 sb->sb_mbmax = max(3 * MAXMCLBYTES, cc * 8);
725 if (sb->sb_lowat > sb->sb_hiwat)
726 sb->sb_lowat = sb->sb_hiwat;
727 return (0);
728 }
729
730 /*
731 * In low memory situation, do not accept any greater than normal request.
732 */
733 int
sbcheckreserve(u_long cnt,u_long defcnt)734 sbcheckreserve(u_long cnt, u_long defcnt)
735 {
736 if (cnt > defcnt && sbchecklowmem())
737 return (ENOBUFS);
738 return (0);
739 }
740
741 int
sbchecklowmem(void)742 sbchecklowmem(void)
743 {
744 static int sblowmem;
745 unsigned int used = m_pool_used();
746
747 if (used < 60)
748 sblowmem = 0;
749 else if (used > 80)
750 sblowmem = 1;
751
752 return (sblowmem);
753 }
754
755 /*
756 * Free mbufs held by a socket, and reserved mbuf space.
757 */
758 void
sbrelease(struct socket * so,struct sockbuf * sb)759 sbrelease(struct socket *so, struct sockbuf *sb)
760 {
761
762 sbflush(so, sb);
763 sb->sb_hiwat = sb->sb_mbmax = 0;
764 }
765
766 /*
767 * Routines to add and remove
768 * data from an mbuf queue.
769 *
770 * The routines sbappend() or sbappendrecord() are normally called to
771 * append new mbufs to a socket buffer, after checking that adequate
772 * space is available, comparing the function sbspace() with the amount
773 * of data to be added. sbappendrecord() differs from sbappend() in
774 * that data supplied is treated as the beginning of a new record.
775 * To place a sender's address, optional access rights, and data in a
776 * socket receive buffer, sbappendaddr() should be used. To place
777 * access rights and data in a socket receive buffer, sbappendrights()
778 * should be used. In either case, the new data begins a new record.
779 * Note that unlike sbappend() and sbappendrecord(), these routines check
780 * for the caller that there will be enough space to store the data.
781 * Each fails if there is not enough space, or if it cannot find mbufs
782 * to store additional information in.
783 *
784 * Reliable protocols may use the socket send buffer to hold data
785 * awaiting acknowledgement. Data is normally copied from a socket
786 * send buffer in a protocol with m_copym for output to a peer,
787 * and then removing the data from the socket buffer with sbdrop()
788 * or sbdroprecord() when the data is acknowledged by the peer.
789 */
790
791 #ifdef SOCKBUF_DEBUG
792 void
sblastrecordchk(struct sockbuf * sb,const char * where)793 sblastrecordchk(struct sockbuf *sb, const char *where)
794 {
795 struct mbuf *m = sb->sb_mb;
796
797 while (m && m->m_nextpkt)
798 m = m->m_nextpkt;
799
800 if (m != sb->sb_lastrecord) {
801 printf("sblastrecordchk: sb_mb %p sb_lastrecord %p last %p\n",
802 sb->sb_mb, sb->sb_lastrecord, m);
803 printf("packet chain:\n");
804 for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt)
805 printf("\t%p\n", m);
806 panic("sblastrecordchk from %s", where);
807 }
808 }
809
810 void
sblastmbufchk(struct sockbuf * sb,const char * where)811 sblastmbufchk(struct sockbuf *sb, const char *where)
812 {
813 struct mbuf *m = sb->sb_mb;
814 struct mbuf *n;
815
816 while (m && m->m_nextpkt)
817 m = m->m_nextpkt;
818
819 while (m && m->m_next)
820 m = m->m_next;
821
822 if (m != sb->sb_mbtail) {
823 printf("sblastmbufchk: sb_mb %p sb_mbtail %p last %p\n",
824 sb->sb_mb, sb->sb_mbtail, m);
825 printf("packet tree:\n");
826 for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt) {
827 printf("\t");
828 for (n = m; n != NULL; n = n->m_next)
829 printf("%p ", n);
830 printf("\n");
831 }
832 panic("sblastmbufchk from %s", where);
833 }
834 }
835 #endif /* SOCKBUF_DEBUG */
836
837 #define SBLINKRECORD(sb, m0) \
838 do { \
839 if ((sb)->sb_lastrecord != NULL) \
840 (sb)->sb_lastrecord->m_nextpkt = (m0); \
841 else \
842 (sb)->sb_mb = (m0); \
843 (sb)->sb_lastrecord = (m0); \
844 } while (/*CONSTCOND*/0)
845
846 /*
847 * Append mbuf chain m to the last record in the
848 * socket buffer sb. The additional space associated
849 * the mbuf chain is recorded in sb. Empty mbufs are
850 * discarded and mbufs are compacted where possible.
851 */
852 void
sbappend(struct socket * so,struct sockbuf * sb,struct mbuf * m)853 sbappend(struct socket *so, struct sockbuf *sb, struct mbuf *m)
854 {
855 struct mbuf *n;
856
857 if (m == NULL)
858 return;
859
860 sbmtxassertlocked(so, sb);
861 SBLASTRECORDCHK(sb, "sbappend 1");
862
863 if ((n = sb->sb_lastrecord) != NULL) {
864 /*
865 * XXX Would like to simply use sb_mbtail here, but
866 * XXX I need to verify that I won't miss an EOR that
867 * XXX way.
868 */
869 do {
870 if (n->m_flags & M_EOR) {
871 sbappendrecord(so, sb, m); /* XXXXXX!!!! */
872 return;
873 }
874 } while (n->m_next && (n = n->m_next));
875 } else {
876 /*
877 * If this is the first record in the socket buffer, it's
878 * also the last record.
879 */
880 sb->sb_lastrecord = m;
881 }
882 sbcompress(so, sb, m, n);
883 SBLASTRECORDCHK(sb, "sbappend 2");
884 }
885
886 /*
887 * This version of sbappend() should only be used when the caller
888 * absolutely knows that there will never be more than one record
889 * in the socket buffer, that is, a stream protocol (such as TCP).
890 */
891 void
sbappendstream(struct socket * so,struct sockbuf * sb,struct mbuf * m)892 sbappendstream(struct socket *so, struct sockbuf *sb, struct mbuf *m)
893 {
894 KASSERT(sb == &so->so_rcv || sb == &so->so_snd);
895 soassertlocked(so);
896 KDASSERT(m->m_nextpkt == NULL);
897 KASSERT(sb->sb_mb == sb->sb_lastrecord);
898
899 SBLASTMBUFCHK(sb, __func__);
900
901 sbcompress(so, sb, m, sb->sb_mbtail);
902
903 sb->sb_lastrecord = sb->sb_mb;
904 SBLASTRECORDCHK(sb, __func__);
905 }
906
907 #ifdef SOCKBUF_DEBUG
908 void
sbcheck(struct socket * so,struct sockbuf * sb)909 sbcheck(struct socket *so, struct sockbuf *sb)
910 {
911 struct mbuf *m, *n;
912 u_long len = 0, mbcnt = 0;
913
914 for (m = sb->sb_mb; m; m = m->m_nextpkt) {
915 for (n = m; n; n = n->m_next) {
916 len += n->m_len;
917 mbcnt += MSIZE;
918 if (n->m_flags & M_EXT)
919 mbcnt += n->m_ext.ext_size;
920 if (m != n && n->m_nextpkt)
921 panic("sbcheck nextpkt");
922 }
923 }
924 if (len != sb->sb_cc || mbcnt != sb->sb_mbcnt) {
925 printf("cc %lu != %lu || mbcnt %lu != %lu\n", len, sb->sb_cc,
926 mbcnt, sb->sb_mbcnt);
927 panic("sbcheck");
928 }
929 }
930 #endif
931
932 /*
933 * As above, except the mbuf chain
934 * begins a new record.
935 */
936 void
sbappendrecord(struct socket * so,struct sockbuf * sb,struct mbuf * m0)937 sbappendrecord(struct socket *so, struct sockbuf *sb, struct mbuf *m0)
938 {
939 struct mbuf *m;
940
941 sbmtxassertlocked(so, sb);
942
943 if (m0 == NULL)
944 return;
945
946 /*
947 * Put the first mbuf on the queue.
948 * Note this permits zero length records.
949 */
950 sballoc(so, sb, m0);
951 SBLASTRECORDCHK(sb, "sbappendrecord 1");
952 SBLINKRECORD(sb, m0);
953 m = m0->m_next;
954 m0->m_next = NULL;
955 if (m && (m0->m_flags & M_EOR)) {
956 m0->m_flags &= ~M_EOR;
957 m->m_flags |= M_EOR;
958 }
959 sbcompress(so, sb, m, m0);
960 SBLASTRECORDCHK(sb, "sbappendrecord 2");
961 }
962
963 /*
964 * Append address and data, and optionally, control (ancillary) data
965 * to the receive queue of a socket. If present,
966 * m0 must include a packet header with total length.
967 * Returns 0 if no space in sockbuf or insufficient mbufs.
968 */
969 int
sbappendaddr(struct socket * so,struct sockbuf * sb,const struct sockaddr * asa,struct mbuf * m0,struct mbuf * control)970 sbappendaddr(struct socket *so, struct sockbuf *sb, const struct sockaddr *asa,
971 struct mbuf *m0, struct mbuf *control)
972 {
973 struct mbuf *m, *n, *nlast;
974 int space = asa->sa_len;
975
976 sbmtxassertlocked(so, sb);
977
978 if (m0 && (m0->m_flags & M_PKTHDR) == 0)
979 panic("sbappendaddr");
980 if (m0)
981 space += m0->m_pkthdr.len;
982 for (n = control; n; n = n->m_next) {
983 space += n->m_len;
984 if (n->m_next == NULL) /* keep pointer to last control buf */
985 break;
986 }
987 if (space > sbspace(so, sb))
988 return (0);
989 if (asa->sa_len > MLEN)
990 return (0);
991 MGET(m, M_DONTWAIT, MT_SONAME);
992 if (m == NULL)
993 return (0);
994 m->m_len = asa->sa_len;
995 memcpy(mtod(m, caddr_t), asa, asa->sa_len);
996 if (n)
997 n->m_next = m0; /* concatenate data to control */
998 else
999 control = m0;
1000 m->m_next = control;
1001
1002 SBLASTRECORDCHK(sb, "sbappendaddr 1");
1003
1004 for (n = m; n->m_next != NULL; n = n->m_next)
1005 sballoc(so, sb, n);
1006 sballoc(so, sb, n);
1007 nlast = n;
1008 SBLINKRECORD(sb, m);
1009
1010 sb->sb_mbtail = nlast;
1011 SBLASTMBUFCHK(sb, "sbappendaddr");
1012
1013 SBLASTRECORDCHK(sb, "sbappendaddr 2");
1014
1015 return (1);
1016 }
1017
1018 int
sbappendcontrol(struct socket * so,struct sockbuf * sb,struct mbuf * m0,struct mbuf * control)1019 sbappendcontrol(struct socket *so, struct sockbuf *sb, struct mbuf *m0,
1020 struct mbuf *control)
1021 {
1022 struct mbuf *m, *mlast, *n;
1023 int eor = 0, space = 0;
1024
1025 sbmtxassertlocked(so, sb);
1026
1027 if (control == NULL)
1028 panic("sbappendcontrol");
1029 for (m = control; ; m = m->m_next) {
1030 space += m->m_len;
1031 if (m->m_next == NULL)
1032 break;
1033 }
1034 n = m; /* save pointer to last control buffer */
1035 for (m = m0; m; m = m->m_next) {
1036 space += m->m_len;
1037 eor |= m->m_flags & M_EOR;
1038 if (eor) {
1039 if (m->m_next == NULL)
1040 m->m_flags |= M_EOR;
1041 else
1042 m->m_flags &= ~M_EOR;
1043 }
1044 }
1045 if (space > sbspace(so, sb))
1046 return (0);
1047 n->m_next = m0; /* concatenate data to control */
1048
1049 SBLASTRECORDCHK(sb, "sbappendcontrol 1");
1050
1051 for (m = control; m->m_next != NULL; m = m->m_next)
1052 sballoc(so, sb, m);
1053 sballoc(so, sb, m);
1054 mlast = m;
1055 SBLINKRECORD(sb, control);
1056
1057 sb->sb_mbtail = mlast;
1058 SBLASTMBUFCHK(sb, "sbappendcontrol");
1059
1060 SBLASTRECORDCHK(sb, "sbappendcontrol 2");
1061
1062 return (1);
1063 }
1064
1065 /*
1066 * Compress mbuf chain m into the socket
1067 * buffer sb following mbuf n. If n
1068 * is null, the buffer is presumed empty.
1069 */
1070 void
sbcompress(struct socket * so,struct sockbuf * sb,struct mbuf * m,struct mbuf * n)1071 sbcompress(struct socket *so, struct sockbuf *sb, struct mbuf *m,
1072 struct mbuf *n)
1073 {
1074 int eor = 0;
1075 struct mbuf *o;
1076
1077 while (m) {
1078 eor |= m->m_flags & M_EOR;
1079 if (m->m_len == 0 &&
1080 (eor == 0 ||
1081 (((o = m->m_next) || (o = n)) &&
1082 o->m_type == m->m_type))) {
1083 if (sb->sb_lastrecord == m)
1084 sb->sb_lastrecord = m->m_next;
1085 m = m_free(m);
1086 continue;
1087 }
1088 if (n && (n->m_flags & M_EOR) == 0 &&
1089 /* m_trailingspace() checks buffer writeability */
1090 m->m_len <= ((n->m_flags & M_EXT)? n->m_ext.ext_size :
1091 MCLBYTES) / 4 && /* XXX Don't copy too much */
1092 m->m_len <= m_trailingspace(n) &&
1093 n->m_type == m->m_type) {
1094 memcpy(mtod(n, caddr_t) + n->m_len, mtod(m, caddr_t),
1095 m->m_len);
1096 n->m_len += m->m_len;
1097 sb->sb_cc += m->m_len;
1098 if (m->m_type != MT_CONTROL && m->m_type != MT_SONAME)
1099 sb->sb_datacc += m->m_len;
1100 m = m_free(m);
1101 continue;
1102 }
1103 if (n)
1104 n->m_next = m;
1105 else
1106 sb->sb_mb = m;
1107 sb->sb_mbtail = m;
1108 sballoc(so, sb, m);
1109 n = m;
1110 m->m_flags &= ~M_EOR;
1111 m = m->m_next;
1112 n->m_next = NULL;
1113 }
1114 if (eor) {
1115 if (n)
1116 n->m_flags |= eor;
1117 else
1118 printf("semi-panic: sbcompress");
1119 }
1120 SBLASTMBUFCHK(sb, __func__);
1121 }
1122
1123 /*
1124 * Free all mbufs in a sockbuf.
1125 * Check that all resources are reclaimed.
1126 */
1127 void
sbflush(struct socket * so,struct sockbuf * sb)1128 sbflush(struct socket *so, struct sockbuf *sb)
1129 {
1130 KASSERT(sb == &so->so_rcv || sb == &so->so_snd);
1131 KASSERT((sb->sb_flags & SB_LOCK) == 0);
1132
1133 while (sb->sb_mbcnt)
1134 sbdrop(so, sb, (int)sb->sb_cc);
1135
1136 KASSERT(sb->sb_cc == 0);
1137 KASSERT(sb->sb_datacc == 0);
1138 KASSERT(sb->sb_mb == NULL);
1139 KASSERT(sb->sb_mbtail == NULL);
1140 KASSERT(sb->sb_lastrecord == NULL);
1141 }
1142
1143 /*
1144 * Drop data from (the front of) a sockbuf.
1145 */
1146 void
sbdrop(struct socket * so,struct sockbuf * sb,int len)1147 sbdrop(struct socket *so, struct sockbuf *sb, int len)
1148 {
1149 struct mbuf *m, *mn;
1150 struct mbuf *next;
1151
1152 sbmtxassertlocked(so, sb);
1153
1154 next = (m = sb->sb_mb) ? m->m_nextpkt : NULL;
1155 while (len > 0) {
1156 if (m == NULL) {
1157 if (next == NULL)
1158 panic("sbdrop");
1159 m = next;
1160 next = m->m_nextpkt;
1161 continue;
1162 }
1163 if (m->m_len > len) {
1164 m->m_len -= len;
1165 m->m_data += len;
1166 sb->sb_cc -= len;
1167 if (m->m_type != MT_CONTROL && m->m_type != MT_SONAME)
1168 sb->sb_datacc -= len;
1169 break;
1170 }
1171 len -= m->m_len;
1172 sbfree(so, sb, m);
1173 mn = m_free(m);
1174 m = mn;
1175 }
1176 while (m && m->m_len == 0) {
1177 sbfree(so, sb, m);
1178 mn = m_free(m);
1179 m = mn;
1180 }
1181 if (m) {
1182 sb->sb_mb = m;
1183 m->m_nextpkt = next;
1184 } else
1185 sb->sb_mb = next;
1186 /*
1187 * First part is an inline SB_EMPTY_FIXUP(). Second part
1188 * makes sure sb_lastrecord is up-to-date if we dropped
1189 * part of the last record.
1190 */
1191 m = sb->sb_mb;
1192 if (m == NULL) {
1193 sb->sb_mbtail = NULL;
1194 sb->sb_lastrecord = NULL;
1195 } else if (m->m_nextpkt == NULL)
1196 sb->sb_lastrecord = m;
1197 }
1198
1199 /*
1200 * Drop a record off the front of a sockbuf
1201 * and move the next record to the front.
1202 */
1203 void
sbdroprecord(struct socket * so,struct sockbuf * sb)1204 sbdroprecord(struct socket *so, struct sockbuf *sb)
1205 {
1206 struct mbuf *m, *mn;
1207
1208 m = sb->sb_mb;
1209 if (m) {
1210 sb->sb_mb = m->m_nextpkt;
1211 do {
1212 sbfree(so, sb, m);
1213 mn = m_free(m);
1214 } while ((m = mn) != NULL);
1215 }
1216 SB_EMPTY_FIXUP(sb);
1217 }
1218
1219 /*
1220 * Create a "control" mbuf containing the specified data
1221 * with the specified type for presentation on a socket buffer.
1222 */
1223 struct mbuf *
sbcreatecontrol(const void * p,size_t size,int type,int level)1224 sbcreatecontrol(const void *p, size_t size, int type, int level)
1225 {
1226 struct cmsghdr *cp;
1227 struct mbuf *m;
1228
1229 if (CMSG_SPACE(size) > MCLBYTES) {
1230 printf("sbcreatecontrol: message too large %zu\n", size);
1231 return (NULL);
1232 }
1233
1234 if ((m = m_get(M_DONTWAIT, MT_CONTROL)) == NULL)
1235 return (NULL);
1236 if (CMSG_SPACE(size) > MLEN) {
1237 MCLGET(m, M_DONTWAIT);
1238 if ((m->m_flags & M_EXT) == 0) {
1239 m_free(m);
1240 return NULL;
1241 }
1242 }
1243 cp = mtod(m, struct cmsghdr *);
1244 memset(cp, 0, CMSG_SPACE(size));
1245 memcpy(CMSG_DATA(cp), p, size);
1246 m->m_len = CMSG_SPACE(size);
1247 cp->cmsg_len = CMSG_LEN(size);
1248 cp->cmsg_level = level;
1249 cp->cmsg_type = type;
1250 return (m);
1251 }
1252