1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1988, 1990, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)uipc_socket2.c 8.1 (Berkeley) 6/10/93 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_kern_tls.h" 38 #include "opt_param.h" 39 40 #include <sys/param.h> 41 #include <sys/aio.h> /* for aio_swake proto */ 42 #include <sys/kernel.h> 43 #include <sys/ktls.h> 44 #include <sys/lock.h> 45 #include <sys/malloc.h> 46 #include <sys/mbuf.h> 47 #include <sys/mutex.h> 48 #include <sys/proc.h> 49 #include <sys/protosw.h> 50 #include <sys/resourcevar.h> 51 #include <sys/signalvar.h> 52 #include <sys/socket.h> 53 #include <sys/socketvar.h> 54 #include <sys/sx.h> 55 #include <sys/sysctl.h> 56 57 /* 58 * Function pointer set by the AIO routines so that the socket buffer code 59 * can call back into the AIO module if it is loaded. 60 */ 61 void (*aio_swake)(struct socket *, struct sockbuf *); 62 63 /* 64 * Primitive routines for operating on socket buffers 65 */ 66 67 u_long sb_max = SB_MAX; 68 u_long sb_max_adj = 69 (quad_t)SB_MAX * MCLBYTES / (MSIZE + MCLBYTES); /* adjusted sb_max */ 70 71 static u_long sb_efficiency = 8; /* parameter for sbreserve() */ 72 73 static void sbcompress_ktls_rx(struct sockbuf *sb, struct mbuf *m, 74 struct mbuf *n); 75 static struct mbuf *sbcut_internal(struct sockbuf *sb, int len); 76 static void sbflush_internal(struct sockbuf *sb); 77 78 /* 79 * Our own version of m_clrprotoflags(), that can preserve M_NOTREADY. 80 */ 81 static void 82 sbm_clrprotoflags(struct mbuf *m, int flags) 83 { 84 int mask; 85 86 mask = ~M_PROTOFLAGS; 87 if (flags & PRUS_NOTREADY) 88 mask |= M_NOTREADY; 89 while (m) { 90 m->m_flags &= mask; 91 m = m->m_next; 92 } 93 } 94 95 /* 96 * Compress M_NOTREADY mbufs after they have been readied by sbready(). 97 * 98 * sbcompress() skips M_NOTREADY mbufs since the data is not available to 99 * be copied at the time of sbcompress(). This function combines small 100 * mbufs similar to sbcompress() once mbufs are ready. 'm0' is the first 101 * mbuf sbready() marked ready, and 'end' is the first mbuf still not 102 * ready. 103 */ 104 static void 105 sbready_compress(struct sockbuf *sb, struct mbuf *m0, struct mbuf *end) 106 { 107 struct mbuf *m, *n; 108 int ext_size; 109 110 SOCKBUF_LOCK_ASSERT(sb); 111 112 if ((sb->sb_flags & SB_NOCOALESCE) != 0) 113 return; 114 115 for (m = m0; m != end; m = m->m_next) { 116 MPASS((m->m_flags & M_NOTREADY) == 0); 117 /* 118 * NB: In sbcompress(), 'n' is the last mbuf in the 119 * socket buffer and 'm' is the new mbuf being copied 120 * into the trailing space of 'n'. Here, the roles 121 * are reversed and 'n' is the next mbuf after 'm' 122 * that is being copied into the trailing space of 123 * 'm'. 124 */ 125 n = m->m_next; 126 #ifdef KERN_TLS 127 /* Try to coalesce adjacent ktls mbuf hdr/trailers. */ 128 if ((n != NULL) && (n != end) && (m->m_flags & M_EOR) == 0 && 129 (m->m_flags & M_EXTPG) && 130 (n->m_flags & M_EXTPG) && 131 !mbuf_has_tls_session(m) && 132 !mbuf_has_tls_session(n)) { 133 int hdr_len, trail_len; 134 135 hdr_len = n->m_epg_hdrlen; 136 trail_len = m->m_epg_trllen; 137 if (trail_len != 0 && hdr_len != 0 && 138 trail_len + hdr_len <= MBUF_PEXT_TRAIL_LEN) { 139 /* copy n's header to m's trailer */ 140 memcpy(&m->m_epg_trail[trail_len], 141 n->m_epg_hdr, hdr_len); 142 m->m_epg_trllen += hdr_len; 143 m->m_len += hdr_len; 144 n->m_epg_hdrlen = 0; 145 n->m_len -= hdr_len; 146 } 147 } 148 #endif 149 150 /* Compress small unmapped mbufs into plain mbufs. */ 151 if ((m->m_flags & M_EXTPG) && m->m_len <= MLEN && 152 !mbuf_has_tls_session(m)) { 153 ext_size = m->m_ext.ext_size; 154 if (mb_unmapped_compress(m) == 0) { 155 sb->sb_mbcnt -= ext_size; 156 sb->sb_ccnt -= 1; 157 } 158 } 159 160 while ((n != NULL) && (n != end) && (m->m_flags & M_EOR) == 0 && 161 M_WRITABLE(m) && 162 (m->m_flags & M_EXTPG) == 0 && 163 !mbuf_has_tls_session(n) && 164 !mbuf_has_tls_session(m) && 165 n->m_len <= MCLBYTES / 4 && /* XXX: Don't copy too much */ 166 n->m_len <= M_TRAILINGSPACE(m) && 167 m->m_type == n->m_type) { 168 KASSERT(sb->sb_lastrecord != n, 169 ("%s: merging start of record (%p) into previous mbuf (%p)", 170 __func__, n, m)); 171 m_copydata(n, 0, n->m_len, mtodo(m, m->m_len)); 172 m->m_len += n->m_len; 173 m->m_next = n->m_next; 174 m->m_flags |= n->m_flags & M_EOR; 175 if (sb->sb_mbtail == n) 176 sb->sb_mbtail = m; 177 178 sb->sb_mbcnt -= MSIZE; 179 sb->sb_mcnt -= 1; 180 if (n->m_flags & M_EXT) { 181 sb->sb_mbcnt -= n->m_ext.ext_size; 182 sb->sb_ccnt -= 1; 183 } 184 m_free(n); 185 n = m->m_next; 186 } 187 } 188 SBLASTRECORDCHK(sb); 189 SBLASTMBUFCHK(sb); 190 } 191 192 /* 193 * Mark ready "count" units of I/O starting with "m". Most mbufs 194 * count as a single unit of I/O except for M_EXTPG mbufs which 195 * are backed by multiple pages. 196 */ 197 int 198 sbready(struct sockbuf *sb, struct mbuf *m0, int count) 199 { 200 struct mbuf *m; 201 u_int blocker; 202 203 SOCKBUF_LOCK_ASSERT(sb); 204 KASSERT(sb->sb_fnrdy != NULL, ("%s: sb %p NULL fnrdy", __func__, sb)); 205 KASSERT(count > 0, ("%s: invalid count %d", __func__, count)); 206 207 m = m0; 208 blocker = (sb->sb_fnrdy == m) ? M_BLOCKED : 0; 209 210 while (count > 0) { 211 KASSERT(m->m_flags & M_NOTREADY, 212 ("%s: m %p !M_NOTREADY", __func__, m)); 213 if ((m->m_flags & M_EXTPG) != 0) { 214 if (count < m->m_epg_nrdy) { 215 m->m_epg_nrdy -= count; 216 count = 0; 217 break; 218 } 219 count -= m->m_epg_nrdy; 220 m->m_epg_nrdy = 0; 221 } else 222 count--; 223 224 m->m_flags &= ~(M_NOTREADY | blocker); 225 if (blocker) 226 sb->sb_acc += m->m_len; 227 m = m->m_next; 228 } 229 230 /* 231 * If the first mbuf is still not fully ready because only 232 * some of its backing pages were readied, no further progress 233 * can be made. 234 */ 235 if (m0 == m) { 236 MPASS(m->m_flags & M_NOTREADY); 237 return (EINPROGRESS); 238 } 239 240 if (!blocker) { 241 sbready_compress(sb, m0, m); 242 return (EINPROGRESS); 243 } 244 245 /* This one was blocking all the queue. */ 246 for (; m && (m->m_flags & M_NOTREADY) == 0; m = m->m_next) { 247 KASSERT(m->m_flags & M_BLOCKED, 248 ("%s: m %p !M_BLOCKED", __func__, m)); 249 m->m_flags &= ~M_BLOCKED; 250 sb->sb_acc += m->m_len; 251 } 252 253 sb->sb_fnrdy = m; 254 sbready_compress(sb, m0, m); 255 256 return (0); 257 } 258 259 /* 260 * Adjust sockbuf state reflecting allocation of m. 261 */ 262 void 263 sballoc(struct sockbuf *sb, struct mbuf *m) 264 { 265 266 SOCKBUF_LOCK_ASSERT(sb); 267 268 sb->sb_ccc += m->m_len; 269 270 if (sb->sb_fnrdy == NULL) { 271 if (m->m_flags & M_NOTREADY) 272 sb->sb_fnrdy = m; 273 else 274 sb->sb_acc += m->m_len; 275 } else 276 m->m_flags |= M_BLOCKED; 277 278 if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA) 279 sb->sb_ctl += m->m_len; 280 281 sb->sb_mbcnt += MSIZE; 282 sb->sb_mcnt += 1; 283 284 if (m->m_flags & M_EXT) { 285 sb->sb_mbcnt += m->m_ext.ext_size; 286 sb->sb_ccnt += 1; 287 } 288 } 289 290 /* 291 * Adjust sockbuf state reflecting freeing of m. 292 */ 293 void 294 sbfree(struct sockbuf *sb, struct mbuf *m) 295 { 296 297 #if 0 /* XXX: not yet: soclose() call path comes here w/o lock. */ 298 SOCKBUF_LOCK_ASSERT(sb); 299 #endif 300 301 sb->sb_ccc -= m->m_len; 302 303 if (!(m->m_flags & M_NOTAVAIL)) 304 sb->sb_acc -= m->m_len; 305 306 if (m == sb->sb_fnrdy) { 307 struct mbuf *n; 308 309 KASSERT(m->m_flags & M_NOTREADY, 310 ("%s: m %p !M_NOTREADY", __func__, m)); 311 312 n = m->m_next; 313 while (n != NULL && !(n->m_flags & M_NOTREADY)) { 314 n->m_flags &= ~M_BLOCKED; 315 sb->sb_acc += n->m_len; 316 n = n->m_next; 317 } 318 sb->sb_fnrdy = n; 319 } 320 321 if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA) 322 sb->sb_ctl -= m->m_len; 323 324 sb->sb_mbcnt -= MSIZE; 325 sb->sb_mcnt -= 1; 326 if (m->m_flags & M_EXT) { 327 sb->sb_mbcnt -= m->m_ext.ext_size; 328 sb->sb_ccnt -= 1; 329 } 330 331 if (sb->sb_sndptr == m) { 332 sb->sb_sndptr = NULL; 333 sb->sb_sndptroff = 0; 334 } 335 if (sb->sb_sndptroff != 0) 336 sb->sb_sndptroff -= m->m_len; 337 } 338 339 #ifdef KERN_TLS 340 /* 341 * Similar to sballoc/sbfree but does not adjust state associated with 342 * the sb_mb chain such as sb_fnrdy or sb_sndptr*. Also assumes mbufs 343 * are not ready. 344 */ 345 void 346 sballoc_ktls_rx(struct sockbuf *sb, struct mbuf *m) 347 { 348 349 SOCKBUF_LOCK_ASSERT(sb); 350 351 sb->sb_ccc += m->m_len; 352 sb->sb_tlscc += m->m_len; 353 354 sb->sb_mbcnt += MSIZE; 355 sb->sb_mcnt += 1; 356 357 if (m->m_flags & M_EXT) { 358 sb->sb_mbcnt += m->m_ext.ext_size; 359 sb->sb_ccnt += 1; 360 } 361 } 362 363 void 364 sbfree_ktls_rx(struct sockbuf *sb, struct mbuf *m) 365 { 366 367 #if 0 /* XXX: not yet: soclose() call path comes here w/o lock. */ 368 SOCKBUF_LOCK_ASSERT(sb); 369 #endif 370 371 sb->sb_ccc -= m->m_len; 372 sb->sb_tlscc -= m->m_len; 373 374 sb->sb_mbcnt -= MSIZE; 375 sb->sb_mcnt -= 1; 376 377 if (m->m_flags & M_EXT) { 378 sb->sb_mbcnt -= m->m_ext.ext_size; 379 sb->sb_ccnt -= 1; 380 } 381 } 382 #endif 383 384 /* 385 * Socantsendmore indicates that no more data will be sent on the socket; it 386 * would normally be applied to a socket when the user informs the system 387 * that no more data is to be sent, by the protocol code (in case 388 * PRU_SHUTDOWN). Socantrcvmore indicates that no more data will be 389 * received, and will normally be applied to the socket by a protocol when it 390 * detects that the peer will send no more data. Data queued for reading in 391 * the socket may yet be read. 392 */ 393 void 394 socantsendmore_locked(struct socket *so) 395 { 396 397 SOCKBUF_LOCK_ASSERT(&so->so_snd); 398 399 so->so_snd.sb_state |= SBS_CANTSENDMORE; 400 sowwakeup_locked(so); 401 mtx_assert(SOCKBUF_MTX(&so->so_snd), MA_NOTOWNED); 402 } 403 404 void 405 socantsendmore(struct socket *so) 406 { 407 408 SOCKBUF_LOCK(&so->so_snd); 409 socantsendmore_locked(so); 410 mtx_assert(SOCKBUF_MTX(&so->so_snd), MA_NOTOWNED); 411 } 412 413 void 414 socantrcvmore_locked(struct socket *so) 415 { 416 417 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 418 419 so->so_rcv.sb_state |= SBS_CANTRCVMORE; 420 #ifdef KERN_TLS 421 if (so->so_rcv.sb_flags & SB_TLS_RX) 422 ktls_check_rx(&so->so_rcv); 423 #endif 424 sorwakeup_locked(so); 425 mtx_assert(SOCKBUF_MTX(&so->so_rcv), MA_NOTOWNED); 426 } 427 428 void 429 socantrcvmore(struct socket *so) 430 { 431 432 SOCKBUF_LOCK(&so->so_rcv); 433 socantrcvmore_locked(so); 434 mtx_assert(SOCKBUF_MTX(&so->so_rcv), MA_NOTOWNED); 435 } 436 437 /* 438 * Wait for data to arrive at/drain from a socket buffer. 439 */ 440 int 441 sbwait(struct sockbuf *sb) 442 { 443 444 SOCKBUF_LOCK_ASSERT(sb); 445 446 sb->sb_flags |= SB_WAIT; 447 return (msleep_sbt(&sb->sb_acc, &sb->sb_mtx, 448 (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK | PCATCH, "sbwait", 449 sb->sb_timeo, 0, 0)); 450 } 451 452 int 453 sblock(struct sockbuf *sb, int flags) 454 { 455 456 KASSERT((flags & SBL_VALID) == flags, 457 ("sblock: flags invalid (0x%x)", flags)); 458 459 if (flags & SBL_WAIT) { 460 if ((sb->sb_flags & SB_NOINTR) || 461 (flags & SBL_NOINTR)) { 462 sx_xlock(&sb->sb_sx); 463 return (0); 464 } 465 return (sx_xlock_sig(&sb->sb_sx)); 466 } else { 467 if (sx_try_xlock(&sb->sb_sx) == 0) 468 return (EWOULDBLOCK); 469 return (0); 470 } 471 } 472 473 void 474 sbunlock(struct sockbuf *sb) 475 { 476 477 sx_xunlock(&sb->sb_sx); 478 } 479 480 /* 481 * Wakeup processes waiting on a socket buffer. Do asynchronous notification 482 * via SIGIO if the socket has the SS_ASYNC flag set. 483 * 484 * Called with the socket buffer lock held; will release the lock by the end 485 * of the function. This allows the caller to acquire the socket buffer lock 486 * while testing for the need for various sorts of wakeup and hold it through 487 * to the point where it's no longer required. We currently hold the lock 488 * through calls out to other subsystems (with the exception of kqueue), and 489 * then release it to avoid lock order issues. It's not clear that's 490 * correct. 491 */ 492 void 493 sowakeup(struct socket *so, struct sockbuf *sb) 494 { 495 int ret; 496 497 SOCKBUF_LOCK_ASSERT(sb); 498 499 selwakeuppri(sb->sb_sel, PSOCK); 500 if (!SEL_WAITING(sb->sb_sel)) 501 sb->sb_flags &= ~SB_SEL; 502 if (sb->sb_flags & SB_WAIT) { 503 sb->sb_flags &= ~SB_WAIT; 504 wakeup(&sb->sb_acc); 505 } 506 KNOTE_LOCKED(&sb->sb_sel->si_note, 0); 507 if (sb->sb_upcall != NULL) { 508 ret = sb->sb_upcall(so, sb->sb_upcallarg, M_NOWAIT); 509 if (ret == SU_ISCONNECTED) { 510 KASSERT(sb == &so->so_rcv, 511 ("SO_SND upcall returned SU_ISCONNECTED")); 512 soupcall_clear(so, SO_RCV); 513 } 514 } else 515 ret = SU_OK; 516 if (sb->sb_flags & SB_AIO) 517 sowakeup_aio(so, sb); 518 SOCKBUF_UNLOCK(sb); 519 if (ret == SU_ISCONNECTED) 520 soisconnected(so); 521 if ((so->so_state & SS_ASYNC) && so->so_sigio != NULL) 522 pgsigio(&so->so_sigio, SIGIO, 0); 523 mtx_assert(SOCKBUF_MTX(sb), MA_NOTOWNED); 524 } 525 526 /* 527 * Socket buffer (struct sockbuf) utility routines. 528 * 529 * Each socket contains two socket buffers: one for sending data and one for 530 * receiving data. Each buffer contains a queue of mbufs, information about 531 * the number of mbufs and amount of data in the queue, and other fields 532 * allowing select() statements and notification on data availability to be 533 * implemented. 534 * 535 * Data stored in a socket buffer is maintained as a list of records. Each 536 * record is a list of mbufs chained together with the m_next field. Records 537 * are chained together with the m_nextpkt field. The upper level routine 538 * soreceive() expects the following conventions to be observed when placing 539 * information in the receive buffer: 540 * 541 * 1. If the protocol requires each message be preceded by the sender's name, 542 * then a record containing that name must be present before any 543 * associated data (mbuf's must be of type MT_SONAME). 544 * 2. If the protocol supports the exchange of ``access rights'' (really just 545 * additional data associated with the message), and there are ``rights'' 546 * to be received, then a record containing this data should be present 547 * (mbuf's must be of type MT_RIGHTS). 548 * 3. If a name or rights record exists, then it must be followed by a data 549 * record, perhaps of zero length. 550 * 551 * Before using a new socket structure it is first necessary to reserve 552 * buffer space to the socket, by calling sbreserve(). This should commit 553 * some of the available buffer space in the system buffer pool for the 554 * socket (currently, it does nothing but enforce limits). The space should 555 * be released by calling sbrelease() when the socket is destroyed. 556 */ 557 int 558 soreserve(struct socket *so, u_long sndcc, u_long rcvcc) 559 { 560 struct thread *td = curthread; 561 562 SOCKBUF_LOCK(&so->so_snd); 563 SOCKBUF_LOCK(&so->so_rcv); 564 if (sbreserve_locked(&so->so_snd, sndcc, so, td) == 0) 565 goto bad; 566 if (sbreserve_locked(&so->so_rcv, rcvcc, so, td) == 0) 567 goto bad2; 568 if (so->so_rcv.sb_lowat == 0) 569 so->so_rcv.sb_lowat = 1; 570 if (so->so_snd.sb_lowat == 0) 571 so->so_snd.sb_lowat = MCLBYTES; 572 if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat) 573 so->so_snd.sb_lowat = so->so_snd.sb_hiwat; 574 SOCKBUF_UNLOCK(&so->so_rcv); 575 SOCKBUF_UNLOCK(&so->so_snd); 576 return (0); 577 bad2: 578 sbrelease_locked(&so->so_snd, so); 579 bad: 580 SOCKBUF_UNLOCK(&so->so_rcv); 581 SOCKBUF_UNLOCK(&so->so_snd); 582 return (ENOBUFS); 583 } 584 585 static int 586 sysctl_handle_sb_max(SYSCTL_HANDLER_ARGS) 587 { 588 int error = 0; 589 u_long tmp_sb_max = sb_max; 590 591 error = sysctl_handle_long(oidp, &tmp_sb_max, arg2, req); 592 if (error || !req->newptr) 593 return (error); 594 if (tmp_sb_max < MSIZE + MCLBYTES) 595 return (EINVAL); 596 sb_max = tmp_sb_max; 597 sb_max_adj = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES); 598 return (0); 599 } 600 601 /* 602 * Allot mbufs to a sockbuf. Attempt to scale mbmax so that mbcnt doesn't 603 * become limiting if buffering efficiency is near the normal case. 604 */ 605 int 606 sbreserve_locked(struct sockbuf *sb, u_long cc, struct socket *so, 607 struct thread *td) 608 { 609 rlim_t sbsize_limit; 610 611 SOCKBUF_LOCK_ASSERT(sb); 612 613 /* 614 * When a thread is passed, we take into account the thread's socket 615 * buffer size limit. The caller will generally pass curthread, but 616 * in the TCP input path, NULL will be passed to indicate that no 617 * appropriate thread resource limits are available. In that case, 618 * we don't apply a process limit. 619 */ 620 if (cc > sb_max_adj) 621 return (0); 622 if (td != NULL) { 623 sbsize_limit = lim_cur(td, RLIMIT_SBSIZE); 624 } else 625 sbsize_limit = RLIM_INFINITY; 626 if (!chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, cc, 627 sbsize_limit)) 628 return (0); 629 sb->sb_mbmax = min(cc * sb_efficiency, sb_max); 630 if (sb->sb_lowat > sb->sb_hiwat) 631 sb->sb_lowat = sb->sb_hiwat; 632 return (1); 633 } 634 635 int 636 sbsetopt(struct socket *so, int cmd, u_long cc) 637 { 638 struct sockbuf *sb; 639 short *flags; 640 u_int *hiwat, *lowat; 641 int error; 642 643 sb = NULL; 644 SOCK_LOCK(so); 645 if (SOLISTENING(so)) { 646 switch (cmd) { 647 case SO_SNDLOWAT: 648 case SO_SNDBUF: 649 lowat = &so->sol_sbsnd_lowat; 650 hiwat = &so->sol_sbsnd_hiwat; 651 flags = &so->sol_sbsnd_flags; 652 break; 653 case SO_RCVLOWAT: 654 case SO_RCVBUF: 655 lowat = &so->sol_sbrcv_lowat; 656 hiwat = &so->sol_sbrcv_hiwat; 657 flags = &so->sol_sbrcv_flags; 658 break; 659 } 660 } else { 661 switch (cmd) { 662 case SO_SNDLOWAT: 663 case SO_SNDBUF: 664 sb = &so->so_snd; 665 break; 666 case SO_RCVLOWAT: 667 case SO_RCVBUF: 668 sb = &so->so_rcv; 669 break; 670 } 671 flags = &sb->sb_flags; 672 hiwat = &sb->sb_hiwat; 673 lowat = &sb->sb_lowat; 674 SOCKBUF_LOCK(sb); 675 } 676 677 error = 0; 678 switch (cmd) { 679 case SO_SNDBUF: 680 case SO_RCVBUF: 681 if (SOLISTENING(so)) { 682 if (cc > sb_max_adj) { 683 error = ENOBUFS; 684 break; 685 } 686 *hiwat = cc; 687 if (*lowat > *hiwat) 688 *lowat = *hiwat; 689 } else { 690 if (!sbreserve_locked(sb, cc, so, curthread)) 691 error = ENOBUFS; 692 } 693 if (error == 0) 694 *flags &= ~SB_AUTOSIZE; 695 break; 696 case SO_SNDLOWAT: 697 case SO_RCVLOWAT: 698 /* 699 * Make sure the low-water is never greater than the 700 * high-water. 701 */ 702 *lowat = (cc > *hiwat) ? *hiwat : cc; 703 break; 704 } 705 706 if (!SOLISTENING(so)) 707 SOCKBUF_UNLOCK(sb); 708 SOCK_UNLOCK(so); 709 return (error); 710 } 711 712 /* 713 * Free mbufs held by a socket, and reserved mbuf space. 714 */ 715 void 716 sbrelease_internal(struct sockbuf *sb, struct socket *so) 717 { 718 719 sbflush_internal(sb); 720 (void)chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, 0, 721 RLIM_INFINITY); 722 sb->sb_mbmax = 0; 723 } 724 725 void 726 sbrelease_locked(struct sockbuf *sb, struct socket *so) 727 { 728 729 SOCKBUF_LOCK_ASSERT(sb); 730 731 sbrelease_internal(sb, so); 732 } 733 734 void 735 sbrelease(struct sockbuf *sb, struct socket *so) 736 { 737 738 SOCKBUF_LOCK(sb); 739 sbrelease_locked(sb, so); 740 SOCKBUF_UNLOCK(sb); 741 } 742 743 void 744 sbdestroy(struct sockbuf *sb, struct socket *so) 745 { 746 747 sbrelease_internal(sb, so); 748 #ifdef KERN_TLS 749 if (sb->sb_tls_info != NULL) 750 ktls_free(sb->sb_tls_info); 751 sb->sb_tls_info = NULL; 752 #endif 753 } 754 755 /* 756 * Routines to add and remove data from an mbuf queue. 757 * 758 * The routines sbappend() or sbappendrecord() are normally called to append 759 * new mbufs to a socket buffer, after checking that adequate space is 760 * available, comparing the function sbspace() with the amount of data to be 761 * added. sbappendrecord() differs from sbappend() in that data supplied is 762 * treated as the beginning of a new record. To place a sender's address, 763 * optional access rights, and data in a socket receive buffer, 764 * sbappendaddr() should be used. To place access rights and data in a 765 * socket receive buffer, sbappendrights() should be used. In either case, 766 * the new data begins a new record. Note that unlike sbappend() and 767 * sbappendrecord(), these routines check for the caller that there will be 768 * enough space to store the data. Each fails if there is not enough space, 769 * or if it cannot find mbufs to store additional information in. 770 * 771 * Reliable protocols may use the socket send buffer to hold data awaiting 772 * acknowledgement. Data is normally copied from a socket send buffer in a 773 * protocol with m_copy for output to a peer, and then removing the data from 774 * the socket buffer with sbdrop() or sbdroprecord() when the data is 775 * acknowledged by the peer. 776 */ 777 #ifdef SOCKBUF_DEBUG 778 void 779 sblastrecordchk(struct sockbuf *sb, const char *file, int line) 780 { 781 struct mbuf *m = sb->sb_mb; 782 783 SOCKBUF_LOCK_ASSERT(sb); 784 785 while (m && m->m_nextpkt) 786 m = m->m_nextpkt; 787 788 if (m != sb->sb_lastrecord) { 789 printf("%s: sb_mb %p sb_lastrecord %p last %p\n", 790 __func__, sb->sb_mb, sb->sb_lastrecord, m); 791 printf("packet chain:\n"); 792 for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt) 793 printf("\t%p\n", m); 794 panic("%s from %s:%u", __func__, file, line); 795 } 796 } 797 798 void 799 sblastmbufchk(struct sockbuf *sb, const char *file, int line) 800 { 801 struct mbuf *m = sb->sb_mb; 802 struct mbuf *n; 803 804 SOCKBUF_LOCK_ASSERT(sb); 805 806 while (m && m->m_nextpkt) 807 m = m->m_nextpkt; 808 809 while (m && m->m_next) 810 m = m->m_next; 811 812 if (m != sb->sb_mbtail) { 813 printf("%s: sb_mb %p sb_mbtail %p last %p\n", 814 __func__, sb->sb_mb, sb->sb_mbtail, m); 815 printf("packet tree:\n"); 816 for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt) { 817 printf("\t"); 818 for (n = m; n != NULL; n = n->m_next) 819 printf("%p ", n); 820 printf("\n"); 821 } 822 panic("%s from %s:%u", __func__, file, line); 823 } 824 825 #ifdef KERN_TLS 826 m = sb->sb_mtls; 827 while (m && m->m_next) 828 m = m->m_next; 829 830 if (m != sb->sb_mtlstail) { 831 printf("%s: sb_mtls %p sb_mtlstail %p last %p\n", 832 __func__, sb->sb_mtls, sb->sb_mtlstail, m); 833 printf("TLS packet tree:\n"); 834 printf("\t"); 835 for (m = sb->sb_mtls; m != NULL; m = m->m_next) { 836 printf("%p ", m); 837 } 838 printf("\n"); 839 panic("%s from %s:%u", __func__, file, line); 840 } 841 #endif 842 } 843 #endif /* SOCKBUF_DEBUG */ 844 845 #define SBLINKRECORD(sb, m0) do { \ 846 SOCKBUF_LOCK_ASSERT(sb); \ 847 if ((sb)->sb_lastrecord != NULL) \ 848 (sb)->sb_lastrecord->m_nextpkt = (m0); \ 849 else \ 850 (sb)->sb_mb = (m0); \ 851 (sb)->sb_lastrecord = (m0); \ 852 } while (/*CONSTCOND*/0) 853 854 /* 855 * Append mbuf chain m to the last record in the socket buffer sb. The 856 * additional space associated the mbuf chain is recorded in sb. Empty mbufs 857 * are discarded and mbufs are compacted where possible. 858 */ 859 void 860 sbappend_locked(struct sockbuf *sb, struct mbuf *m, int flags) 861 { 862 struct mbuf *n; 863 864 SOCKBUF_LOCK_ASSERT(sb); 865 866 if (m == NULL) 867 return; 868 sbm_clrprotoflags(m, flags); 869 SBLASTRECORDCHK(sb); 870 n = sb->sb_mb; 871 if (n) { 872 while (n->m_nextpkt) 873 n = n->m_nextpkt; 874 do { 875 if (n->m_flags & M_EOR) { 876 sbappendrecord_locked(sb, m); /* XXXXXX!!!! */ 877 return; 878 } 879 } while (n->m_next && (n = n->m_next)); 880 } else { 881 /* 882 * XXX Would like to simply use sb_mbtail here, but 883 * XXX I need to verify that I won't miss an EOR that 884 * XXX way. 885 */ 886 if ((n = sb->sb_lastrecord) != NULL) { 887 do { 888 if (n->m_flags & M_EOR) { 889 sbappendrecord_locked(sb, m); /* XXXXXX!!!! */ 890 return; 891 } 892 } while (n->m_next && (n = n->m_next)); 893 } else { 894 /* 895 * If this is the first record in the socket buffer, 896 * it's also the last record. 897 */ 898 sb->sb_lastrecord = m; 899 } 900 } 901 sbcompress(sb, m, n); 902 SBLASTRECORDCHK(sb); 903 } 904 905 /* 906 * Append mbuf chain m to the last record in the socket buffer sb. The 907 * additional space associated the mbuf chain is recorded in sb. Empty mbufs 908 * are discarded and mbufs are compacted where possible. 909 */ 910 void 911 sbappend(struct sockbuf *sb, struct mbuf *m, int flags) 912 { 913 914 SOCKBUF_LOCK(sb); 915 sbappend_locked(sb, m, flags); 916 SOCKBUF_UNLOCK(sb); 917 } 918 919 #ifdef KERN_TLS 920 /* 921 * Append an mbuf containing encrypted TLS data. The data 922 * is marked M_NOTREADY until it has been decrypted and 923 * stored as a TLS record. 924 */ 925 static void 926 sbappend_ktls_rx(struct sockbuf *sb, struct mbuf *m) 927 { 928 struct mbuf *n; 929 930 SBLASTMBUFCHK(sb); 931 932 /* Remove all packet headers and mbuf tags to get a pure data chain. */ 933 m_demote(m, 1, 0); 934 935 for (n = m; n != NULL; n = n->m_next) 936 n->m_flags |= M_NOTREADY; 937 sbcompress_ktls_rx(sb, m, sb->sb_mtlstail); 938 ktls_check_rx(sb); 939 } 940 #endif 941 942 /* 943 * This version of sbappend() should only be used when the caller absolutely 944 * knows that there will never be more than one record in the socket buffer, 945 * that is, a stream protocol (such as TCP). 946 */ 947 void 948 sbappendstream_locked(struct sockbuf *sb, struct mbuf *m, int flags) 949 { 950 SOCKBUF_LOCK_ASSERT(sb); 951 952 KASSERT(m->m_nextpkt == NULL,("sbappendstream 0")); 953 954 #ifdef KERN_TLS 955 /* 956 * Decrypted TLS records are appended as records via 957 * sbappendrecord(). TCP passes encrypted TLS records to this 958 * function which must be scheduled for decryption. 959 */ 960 if (sb->sb_flags & SB_TLS_RX) { 961 sbappend_ktls_rx(sb, m); 962 return; 963 } 964 #endif 965 966 KASSERT(sb->sb_mb == sb->sb_lastrecord,("sbappendstream 1")); 967 968 SBLASTMBUFCHK(sb); 969 970 #ifdef KERN_TLS 971 if (sb->sb_tls_info != NULL) 972 ktls_seq(sb, m); 973 #endif 974 975 /* Remove all packet headers and mbuf tags to get a pure data chain. */ 976 m_demote(m, 1, flags & PRUS_NOTREADY ? M_NOTREADY : 0); 977 978 sbcompress(sb, m, sb->sb_mbtail); 979 980 sb->sb_lastrecord = sb->sb_mb; 981 SBLASTRECORDCHK(sb); 982 } 983 984 /* 985 * This version of sbappend() should only be used when the caller absolutely 986 * knows that there will never be more than one record in the socket buffer, 987 * that is, a stream protocol (such as TCP). 988 */ 989 void 990 sbappendstream(struct sockbuf *sb, struct mbuf *m, int flags) 991 { 992 993 SOCKBUF_LOCK(sb); 994 sbappendstream_locked(sb, m, flags); 995 SOCKBUF_UNLOCK(sb); 996 } 997 998 #ifdef SOCKBUF_DEBUG 999 void 1000 sbcheck(struct sockbuf *sb, const char *file, int line) 1001 { 1002 struct mbuf *m, *n, *fnrdy; 1003 u_long acc, ccc, mbcnt; 1004 #ifdef KERN_TLS 1005 u_long tlscc; 1006 #endif 1007 1008 SOCKBUF_LOCK_ASSERT(sb); 1009 1010 acc = ccc = mbcnt = 0; 1011 fnrdy = NULL; 1012 1013 for (m = sb->sb_mb; m; m = n) { 1014 n = m->m_nextpkt; 1015 for (; m; m = m->m_next) { 1016 if (m->m_len == 0) { 1017 printf("sb %p empty mbuf %p\n", sb, m); 1018 goto fail; 1019 } 1020 if ((m->m_flags & M_NOTREADY) && fnrdy == NULL) { 1021 if (m != sb->sb_fnrdy) { 1022 printf("sb %p: fnrdy %p != m %p\n", 1023 sb, sb->sb_fnrdy, m); 1024 goto fail; 1025 } 1026 fnrdy = m; 1027 } 1028 if (fnrdy) { 1029 if (!(m->m_flags & M_NOTAVAIL)) { 1030 printf("sb %p: fnrdy %p, m %p is avail\n", 1031 sb, sb->sb_fnrdy, m); 1032 goto fail; 1033 } 1034 } else 1035 acc += m->m_len; 1036 ccc += m->m_len; 1037 mbcnt += MSIZE; 1038 if (m->m_flags & M_EXT) /*XXX*/ /* pretty sure this is bogus */ 1039 mbcnt += m->m_ext.ext_size; 1040 } 1041 } 1042 #ifdef KERN_TLS 1043 /* 1044 * Account for mbufs "detached" by ktls_detach_record() while 1045 * they are decrypted by ktls_decrypt(). tlsdcc gives a count 1046 * of the detached bytes that are included in ccc. The mbufs 1047 * and clusters are not included in the socket buffer 1048 * accounting. 1049 */ 1050 ccc += sb->sb_tlsdcc; 1051 1052 tlscc = 0; 1053 for (m = sb->sb_mtls; m; m = m->m_next) { 1054 if (m->m_nextpkt != NULL) { 1055 printf("sb %p TLS mbuf %p with nextpkt\n", sb, m); 1056 goto fail; 1057 } 1058 if ((m->m_flags & M_NOTREADY) == 0) { 1059 printf("sb %p TLS mbuf %p ready\n", sb, m); 1060 goto fail; 1061 } 1062 tlscc += m->m_len; 1063 ccc += m->m_len; 1064 mbcnt += MSIZE; 1065 if (m->m_flags & M_EXT) /*XXX*/ /* pretty sure this is bogus */ 1066 mbcnt += m->m_ext.ext_size; 1067 } 1068 1069 if (sb->sb_tlscc != tlscc) { 1070 printf("tlscc %ld/%u dcc %u\n", tlscc, sb->sb_tlscc, 1071 sb->sb_tlsdcc); 1072 goto fail; 1073 } 1074 #endif 1075 if (acc != sb->sb_acc || ccc != sb->sb_ccc || mbcnt != sb->sb_mbcnt) { 1076 printf("acc %ld/%u ccc %ld/%u mbcnt %ld/%u\n", 1077 acc, sb->sb_acc, ccc, sb->sb_ccc, mbcnt, sb->sb_mbcnt); 1078 #ifdef KERN_TLS 1079 printf("tlscc %ld/%u dcc %u\n", tlscc, sb->sb_tlscc, 1080 sb->sb_tlsdcc); 1081 #endif 1082 goto fail; 1083 } 1084 return; 1085 fail: 1086 panic("%s from %s:%u", __func__, file, line); 1087 } 1088 #endif 1089 1090 /* 1091 * As above, except the mbuf chain begins a new record. 1092 */ 1093 void 1094 sbappendrecord_locked(struct sockbuf *sb, struct mbuf *m0) 1095 { 1096 struct mbuf *m; 1097 1098 SOCKBUF_LOCK_ASSERT(sb); 1099 1100 if (m0 == NULL) 1101 return; 1102 m_clrprotoflags(m0); 1103 /* 1104 * Put the first mbuf on the queue. Note this permits zero length 1105 * records. 1106 */ 1107 sballoc(sb, m0); 1108 SBLASTRECORDCHK(sb); 1109 SBLINKRECORD(sb, m0); 1110 sb->sb_mbtail = m0; 1111 m = m0->m_next; 1112 m0->m_next = 0; 1113 if (m && (m0->m_flags & M_EOR)) { 1114 m0->m_flags &= ~M_EOR; 1115 m->m_flags |= M_EOR; 1116 } 1117 /* always call sbcompress() so it can do SBLASTMBUFCHK() */ 1118 sbcompress(sb, m, m0); 1119 } 1120 1121 /* 1122 * As above, except the mbuf chain begins a new record. 1123 */ 1124 void 1125 sbappendrecord(struct sockbuf *sb, struct mbuf *m0) 1126 { 1127 1128 SOCKBUF_LOCK(sb); 1129 sbappendrecord_locked(sb, m0); 1130 SOCKBUF_UNLOCK(sb); 1131 } 1132 1133 /* Helper routine that appends data, control, and address to a sockbuf. */ 1134 static int 1135 sbappendaddr_locked_internal(struct sockbuf *sb, const struct sockaddr *asa, 1136 struct mbuf *m0, struct mbuf *control, struct mbuf *ctrl_last) 1137 { 1138 struct mbuf *m, *n, *nlast; 1139 #if MSIZE <= 256 1140 if (asa->sa_len > MLEN) 1141 return (0); 1142 #endif 1143 m = m_get(M_NOWAIT, MT_SONAME); 1144 if (m == NULL) 1145 return (0); 1146 m->m_len = asa->sa_len; 1147 bcopy(asa, mtod(m, caddr_t), asa->sa_len); 1148 if (m0) { 1149 m_clrprotoflags(m0); 1150 m_tag_delete_chain(m0, NULL); 1151 /* 1152 * Clear some persistent info from pkthdr. 1153 * We don't use m_demote(), because some netgraph consumers 1154 * expect M_PKTHDR presence. 1155 */ 1156 m0->m_pkthdr.rcvif = NULL; 1157 m0->m_pkthdr.flowid = 0; 1158 m0->m_pkthdr.csum_flags = 0; 1159 m0->m_pkthdr.fibnum = 0; 1160 m0->m_pkthdr.rsstype = 0; 1161 } 1162 if (ctrl_last) 1163 ctrl_last->m_next = m0; /* concatenate data to control */ 1164 else 1165 control = m0; 1166 m->m_next = control; 1167 for (n = m; n->m_next != NULL; n = n->m_next) 1168 sballoc(sb, n); 1169 sballoc(sb, n); 1170 nlast = n; 1171 SBLINKRECORD(sb, m); 1172 1173 sb->sb_mbtail = nlast; 1174 SBLASTMBUFCHK(sb); 1175 1176 SBLASTRECORDCHK(sb); 1177 return (1); 1178 } 1179 1180 /* 1181 * Append address and data, and optionally, control (ancillary) data to the 1182 * receive queue of a socket. If present, m0 must include a packet header 1183 * with total length. Returns 0 if no space in sockbuf or insufficient 1184 * mbufs. 1185 */ 1186 int 1187 sbappendaddr_locked(struct sockbuf *sb, const struct sockaddr *asa, 1188 struct mbuf *m0, struct mbuf *control) 1189 { 1190 struct mbuf *ctrl_last; 1191 int space = asa->sa_len; 1192 1193 SOCKBUF_LOCK_ASSERT(sb); 1194 1195 if (m0 && (m0->m_flags & M_PKTHDR) == 0) 1196 panic("sbappendaddr_locked"); 1197 if (m0) 1198 space += m0->m_pkthdr.len; 1199 space += m_length(control, &ctrl_last); 1200 1201 if (space > sbspace(sb)) 1202 return (0); 1203 return (sbappendaddr_locked_internal(sb, asa, m0, control, ctrl_last)); 1204 } 1205 1206 /* 1207 * Append address and data, and optionally, control (ancillary) data to the 1208 * receive queue of a socket. If present, m0 must include a packet header 1209 * with total length. Returns 0 if insufficient mbufs. Does not validate space 1210 * on the receiving sockbuf. 1211 */ 1212 int 1213 sbappendaddr_nospacecheck_locked(struct sockbuf *sb, const struct sockaddr *asa, 1214 struct mbuf *m0, struct mbuf *control) 1215 { 1216 struct mbuf *ctrl_last; 1217 1218 SOCKBUF_LOCK_ASSERT(sb); 1219 1220 ctrl_last = (control == NULL) ? NULL : m_last(control); 1221 return (sbappendaddr_locked_internal(sb, asa, m0, control, ctrl_last)); 1222 } 1223 1224 /* 1225 * Append address and data, and optionally, control (ancillary) data to the 1226 * receive queue of a socket. If present, m0 must include a packet header 1227 * with total length. Returns 0 if no space in sockbuf or insufficient 1228 * mbufs. 1229 */ 1230 int 1231 sbappendaddr(struct sockbuf *sb, const struct sockaddr *asa, 1232 struct mbuf *m0, struct mbuf *control) 1233 { 1234 int retval; 1235 1236 SOCKBUF_LOCK(sb); 1237 retval = sbappendaddr_locked(sb, asa, m0, control); 1238 SOCKBUF_UNLOCK(sb); 1239 return (retval); 1240 } 1241 1242 void 1243 sbappendcontrol_locked(struct sockbuf *sb, struct mbuf *m0, 1244 struct mbuf *control, int flags) 1245 { 1246 struct mbuf *m, *mlast; 1247 1248 sbm_clrprotoflags(m0, flags); 1249 m_last(control)->m_next = m0; 1250 1251 SBLASTRECORDCHK(sb); 1252 1253 for (m = control; m->m_next; m = m->m_next) 1254 sballoc(sb, m); 1255 sballoc(sb, m); 1256 mlast = m; 1257 SBLINKRECORD(sb, control); 1258 1259 sb->sb_mbtail = mlast; 1260 SBLASTMBUFCHK(sb); 1261 1262 SBLASTRECORDCHK(sb); 1263 } 1264 1265 void 1266 sbappendcontrol(struct sockbuf *sb, struct mbuf *m0, struct mbuf *control, 1267 int flags) 1268 { 1269 1270 SOCKBUF_LOCK(sb); 1271 sbappendcontrol_locked(sb, m0, control, flags); 1272 SOCKBUF_UNLOCK(sb); 1273 } 1274 1275 /* 1276 * Append the data in mbuf chain (m) into the socket buffer sb following mbuf 1277 * (n). If (n) is NULL, the buffer is presumed empty. 1278 * 1279 * When the data is compressed, mbufs in the chain may be handled in one of 1280 * three ways: 1281 * 1282 * (1) The mbuf may simply be dropped, if it contributes nothing (no data, no 1283 * record boundary, and no change in data type). 1284 * 1285 * (2) The mbuf may be coalesced -- i.e., data in the mbuf may be copied into 1286 * an mbuf already in the socket buffer. This can occur if an 1287 * appropriate mbuf exists, there is room, both mbufs are not marked as 1288 * not ready, and no merging of data types will occur. 1289 * 1290 * (3) The mbuf may be appended to the end of the existing mbuf chain. 1291 * 1292 * If any of the new mbufs is marked as M_EOR, mark the last mbuf appended as 1293 * end-of-record. 1294 */ 1295 void 1296 sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n) 1297 { 1298 int eor = 0; 1299 struct mbuf *o; 1300 1301 SOCKBUF_LOCK_ASSERT(sb); 1302 1303 while (m) { 1304 eor |= m->m_flags & M_EOR; 1305 if (m->m_len == 0 && 1306 (eor == 0 || 1307 (((o = m->m_next) || (o = n)) && 1308 o->m_type == m->m_type))) { 1309 if (sb->sb_lastrecord == m) 1310 sb->sb_lastrecord = m->m_next; 1311 m = m_free(m); 1312 continue; 1313 } 1314 if (n && (n->m_flags & M_EOR) == 0 && 1315 M_WRITABLE(n) && 1316 ((sb->sb_flags & SB_NOCOALESCE) == 0) && 1317 !(m->m_flags & M_NOTREADY) && 1318 !(n->m_flags & (M_NOTREADY | M_EXTPG)) && 1319 !mbuf_has_tls_session(m) && 1320 !mbuf_has_tls_session(n) && 1321 m->m_len <= MCLBYTES / 4 && /* XXX: Don't copy too much */ 1322 m->m_len <= M_TRAILINGSPACE(n) && 1323 n->m_type == m->m_type) { 1324 m_copydata(m, 0, m->m_len, mtodo(n, n->m_len)); 1325 n->m_len += m->m_len; 1326 sb->sb_ccc += m->m_len; 1327 if (sb->sb_fnrdy == NULL) 1328 sb->sb_acc += m->m_len; 1329 if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA) 1330 /* XXX: Probably don't need.*/ 1331 sb->sb_ctl += m->m_len; 1332 m = m_free(m); 1333 continue; 1334 } 1335 if (m->m_len <= MLEN && (m->m_flags & M_EXTPG) && 1336 (m->m_flags & M_NOTREADY) == 0 && 1337 !mbuf_has_tls_session(m)) 1338 (void)mb_unmapped_compress(m); 1339 if (n) 1340 n->m_next = m; 1341 else 1342 sb->sb_mb = m; 1343 sb->sb_mbtail = m; 1344 sballoc(sb, m); 1345 n = m; 1346 m->m_flags &= ~M_EOR; 1347 m = m->m_next; 1348 n->m_next = 0; 1349 } 1350 if (eor) { 1351 KASSERT(n != NULL, ("sbcompress: eor && n == NULL")); 1352 n->m_flags |= eor; 1353 } 1354 SBLASTMBUFCHK(sb); 1355 } 1356 1357 #ifdef KERN_TLS 1358 /* 1359 * A version of sbcompress() for encrypted TLS RX mbufs. These mbufs 1360 * are appended to the 'sb_mtls' chain instead of 'sb_mb' and are also 1361 * a bit simpler (no EOR markers, always MT_DATA, etc.). 1362 */ 1363 static void 1364 sbcompress_ktls_rx(struct sockbuf *sb, struct mbuf *m, struct mbuf *n) 1365 { 1366 1367 SOCKBUF_LOCK_ASSERT(sb); 1368 1369 while (m) { 1370 KASSERT((m->m_flags & M_EOR) == 0, 1371 ("TLS RX mbuf %p with EOR", m)); 1372 KASSERT(m->m_type == MT_DATA, 1373 ("TLS RX mbuf %p is not MT_DATA", m)); 1374 KASSERT((m->m_flags & M_NOTREADY) != 0, 1375 ("TLS RX mbuf %p ready", m)); 1376 KASSERT((m->m_flags & M_EXTPG) == 0, 1377 ("TLS RX mbuf %p unmapped", m)); 1378 1379 if (m->m_len == 0) { 1380 m = m_free(m); 1381 continue; 1382 } 1383 1384 /* 1385 * Even though both 'n' and 'm' are NOTREADY, it's ok 1386 * to coalesce the data. 1387 */ 1388 if (n && 1389 M_WRITABLE(n) && 1390 ((sb->sb_flags & SB_NOCOALESCE) == 0) && 1391 !(n->m_flags & (M_EXTPG)) && 1392 m->m_len <= MCLBYTES / 4 && /* XXX: Don't copy too much */ 1393 m->m_len <= M_TRAILINGSPACE(n)) { 1394 m_copydata(m, 0, m->m_len, mtodo(n, n->m_len)); 1395 n->m_len += m->m_len; 1396 sb->sb_ccc += m->m_len; 1397 sb->sb_tlscc += m->m_len; 1398 m = m_free(m); 1399 continue; 1400 } 1401 if (n) 1402 n->m_next = m; 1403 else 1404 sb->sb_mtls = m; 1405 sb->sb_mtlstail = m; 1406 sballoc_ktls_rx(sb, m); 1407 n = m; 1408 m = m->m_next; 1409 n->m_next = NULL; 1410 } 1411 SBLASTMBUFCHK(sb); 1412 } 1413 #endif 1414 1415 /* 1416 * Free all mbufs in a sockbuf. Check that all resources are reclaimed. 1417 */ 1418 static void 1419 sbflush_internal(struct sockbuf *sb) 1420 { 1421 1422 while (sb->sb_mbcnt || sb->sb_tlsdcc) { 1423 /* 1424 * Don't call sbcut(sb, 0) if the leading mbuf is non-empty: 1425 * we would loop forever. Panic instead. 1426 */ 1427 if (sb->sb_ccc == 0 && (sb->sb_mb == NULL || sb->sb_mb->m_len)) 1428 break; 1429 m_freem(sbcut_internal(sb, (int)sb->sb_ccc)); 1430 } 1431 KASSERT(sb->sb_ccc == 0 && sb->sb_mb == 0 && sb->sb_mbcnt == 0, 1432 ("%s: ccc %u mb %p mbcnt %u", __func__, 1433 sb->sb_ccc, (void *)sb->sb_mb, sb->sb_mbcnt)); 1434 } 1435 1436 void 1437 sbflush_locked(struct sockbuf *sb) 1438 { 1439 1440 SOCKBUF_LOCK_ASSERT(sb); 1441 sbflush_internal(sb); 1442 } 1443 1444 void 1445 sbflush(struct sockbuf *sb) 1446 { 1447 1448 SOCKBUF_LOCK(sb); 1449 sbflush_locked(sb); 1450 SOCKBUF_UNLOCK(sb); 1451 } 1452 1453 /* 1454 * Cut data from (the front of) a sockbuf. 1455 */ 1456 static struct mbuf * 1457 sbcut_internal(struct sockbuf *sb, int len) 1458 { 1459 struct mbuf *m, *next, *mfree; 1460 bool is_tls; 1461 1462 KASSERT(len >= 0, ("%s: len is %d but it is supposed to be >= 0", 1463 __func__, len)); 1464 KASSERT(len <= sb->sb_ccc, ("%s: len: %d is > ccc: %u", 1465 __func__, len, sb->sb_ccc)); 1466 1467 next = (m = sb->sb_mb) ? m->m_nextpkt : 0; 1468 is_tls = false; 1469 mfree = NULL; 1470 1471 while (len > 0) { 1472 if (m == NULL) { 1473 #ifdef KERN_TLS 1474 if (next == NULL && !is_tls) { 1475 if (sb->sb_tlsdcc != 0) { 1476 MPASS(len >= sb->sb_tlsdcc); 1477 len -= sb->sb_tlsdcc; 1478 sb->sb_ccc -= sb->sb_tlsdcc; 1479 sb->sb_tlsdcc = 0; 1480 if (len == 0) 1481 break; 1482 } 1483 next = sb->sb_mtls; 1484 is_tls = true; 1485 } 1486 #endif 1487 KASSERT(next, ("%s: no next, len %d", __func__, len)); 1488 m = next; 1489 next = m->m_nextpkt; 1490 } 1491 if (m->m_len > len) { 1492 KASSERT(!(m->m_flags & M_NOTAVAIL), 1493 ("%s: m %p M_NOTAVAIL", __func__, m)); 1494 m->m_len -= len; 1495 m->m_data += len; 1496 sb->sb_ccc -= len; 1497 sb->sb_acc -= len; 1498 if (sb->sb_sndptroff != 0) 1499 sb->sb_sndptroff -= len; 1500 if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA) 1501 sb->sb_ctl -= len; 1502 break; 1503 } 1504 len -= m->m_len; 1505 #ifdef KERN_TLS 1506 if (is_tls) 1507 sbfree_ktls_rx(sb, m); 1508 else 1509 #endif 1510 sbfree(sb, m); 1511 /* 1512 * Do not put M_NOTREADY buffers to the free list, they 1513 * are referenced from outside. 1514 */ 1515 if (m->m_flags & M_NOTREADY && !is_tls) 1516 m = m->m_next; 1517 else { 1518 struct mbuf *n; 1519 1520 n = m->m_next; 1521 m->m_next = mfree; 1522 mfree = m; 1523 m = n; 1524 } 1525 } 1526 /* 1527 * Free any zero-length mbufs from the buffer. 1528 * For SOCK_DGRAM sockets such mbufs represent empty records. 1529 * XXX: For SOCK_STREAM sockets such mbufs can appear in the buffer, 1530 * when sosend_generic() needs to send only control data. 1531 */ 1532 while (m && m->m_len == 0) { 1533 struct mbuf *n; 1534 1535 sbfree(sb, m); 1536 n = m->m_next; 1537 m->m_next = mfree; 1538 mfree = m; 1539 m = n; 1540 } 1541 #ifdef KERN_TLS 1542 if (is_tls) { 1543 sb->sb_mb = NULL; 1544 sb->sb_mtls = m; 1545 if (m == NULL) 1546 sb->sb_mtlstail = NULL; 1547 } else 1548 #endif 1549 if (m) { 1550 sb->sb_mb = m; 1551 m->m_nextpkt = next; 1552 } else 1553 sb->sb_mb = next; 1554 /* 1555 * First part is an inline SB_EMPTY_FIXUP(). Second part makes sure 1556 * sb_lastrecord is up-to-date if we dropped part of the last record. 1557 */ 1558 m = sb->sb_mb; 1559 if (m == NULL) { 1560 sb->sb_mbtail = NULL; 1561 sb->sb_lastrecord = NULL; 1562 } else if (m->m_nextpkt == NULL) { 1563 sb->sb_lastrecord = m; 1564 } 1565 1566 return (mfree); 1567 } 1568 1569 /* 1570 * Drop data from (the front of) a sockbuf. 1571 */ 1572 void 1573 sbdrop_locked(struct sockbuf *sb, int len) 1574 { 1575 1576 SOCKBUF_LOCK_ASSERT(sb); 1577 m_freem(sbcut_internal(sb, len)); 1578 } 1579 1580 /* 1581 * Drop data from (the front of) a sockbuf, 1582 * and return it to caller. 1583 */ 1584 struct mbuf * 1585 sbcut_locked(struct sockbuf *sb, int len) 1586 { 1587 1588 SOCKBUF_LOCK_ASSERT(sb); 1589 return (sbcut_internal(sb, len)); 1590 } 1591 1592 void 1593 sbdrop(struct sockbuf *sb, int len) 1594 { 1595 struct mbuf *mfree; 1596 1597 SOCKBUF_LOCK(sb); 1598 mfree = sbcut_internal(sb, len); 1599 SOCKBUF_UNLOCK(sb); 1600 1601 m_freem(mfree); 1602 } 1603 1604 struct mbuf * 1605 sbsndptr_noadv(struct sockbuf *sb, uint32_t off, uint32_t *moff) 1606 { 1607 struct mbuf *m; 1608 1609 KASSERT(sb->sb_mb != NULL, ("%s: sb_mb is NULL", __func__)); 1610 if (sb->sb_sndptr == NULL || sb->sb_sndptroff > off) { 1611 *moff = off; 1612 if (sb->sb_sndptr == NULL) { 1613 sb->sb_sndptr = sb->sb_mb; 1614 sb->sb_sndptroff = 0; 1615 } 1616 return (sb->sb_mb); 1617 } else { 1618 m = sb->sb_sndptr; 1619 off -= sb->sb_sndptroff; 1620 } 1621 *moff = off; 1622 return (m); 1623 } 1624 1625 void 1626 sbsndptr_adv(struct sockbuf *sb, struct mbuf *mb, uint32_t len) 1627 { 1628 /* 1629 * A small copy was done, advance forward the sb_sbsndptr to cover 1630 * it. 1631 */ 1632 struct mbuf *m; 1633 1634 if (mb != sb->sb_sndptr) { 1635 /* Did not copyout at the same mbuf */ 1636 return; 1637 } 1638 m = mb; 1639 while (m && (len > 0)) { 1640 if (len >= m->m_len) { 1641 len -= m->m_len; 1642 if (m->m_next) { 1643 sb->sb_sndptroff += m->m_len; 1644 sb->sb_sndptr = m->m_next; 1645 } 1646 m = m->m_next; 1647 } else { 1648 len = 0; 1649 } 1650 } 1651 } 1652 1653 /* 1654 * Return the first mbuf and the mbuf data offset for the provided 1655 * send offset without changing the "sb_sndptroff" field. 1656 */ 1657 struct mbuf * 1658 sbsndmbuf(struct sockbuf *sb, u_int off, u_int *moff) 1659 { 1660 struct mbuf *m; 1661 1662 KASSERT(sb->sb_mb != NULL, ("%s: sb_mb is NULL", __func__)); 1663 1664 /* 1665 * If the "off" is below the stored offset, which happens on 1666 * retransmits, just use "sb_mb": 1667 */ 1668 if (sb->sb_sndptr == NULL || sb->sb_sndptroff > off) { 1669 m = sb->sb_mb; 1670 } else { 1671 m = sb->sb_sndptr; 1672 off -= sb->sb_sndptroff; 1673 } 1674 while (off > 0 && m != NULL) { 1675 if (off < m->m_len) 1676 break; 1677 off -= m->m_len; 1678 m = m->m_next; 1679 } 1680 *moff = off; 1681 return (m); 1682 } 1683 1684 /* 1685 * Drop a record off the front of a sockbuf and move the next record to the 1686 * front. 1687 */ 1688 void 1689 sbdroprecord_locked(struct sockbuf *sb) 1690 { 1691 struct mbuf *m; 1692 1693 SOCKBUF_LOCK_ASSERT(sb); 1694 1695 m = sb->sb_mb; 1696 if (m) { 1697 sb->sb_mb = m->m_nextpkt; 1698 do { 1699 sbfree(sb, m); 1700 m = m_free(m); 1701 } while (m); 1702 } 1703 SB_EMPTY_FIXUP(sb); 1704 } 1705 1706 /* 1707 * Drop a record off the front of a sockbuf and move the next record to the 1708 * front. 1709 */ 1710 void 1711 sbdroprecord(struct sockbuf *sb) 1712 { 1713 1714 SOCKBUF_LOCK(sb); 1715 sbdroprecord_locked(sb); 1716 SOCKBUF_UNLOCK(sb); 1717 } 1718 1719 /* 1720 * Create a "control" mbuf containing the specified data with the specified 1721 * type for presentation on a socket buffer. 1722 */ 1723 struct mbuf * 1724 sbcreatecontrol_how(void *p, int size, int type, int level, int wait) 1725 { 1726 struct cmsghdr *cp; 1727 struct mbuf *m; 1728 1729 MBUF_CHECKSLEEP(wait); 1730 if (CMSG_SPACE((u_int)size) > MCLBYTES) 1731 return ((struct mbuf *) NULL); 1732 if (CMSG_SPACE((u_int)size) > MLEN) 1733 m = m_getcl(wait, MT_CONTROL, 0); 1734 else 1735 m = m_get(wait, MT_CONTROL); 1736 if (m == NULL) 1737 return ((struct mbuf *) NULL); 1738 cp = mtod(m, struct cmsghdr *); 1739 m->m_len = 0; 1740 KASSERT(CMSG_SPACE((u_int)size) <= M_TRAILINGSPACE(m), 1741 ("sbcreatecontrol: short mbuf")); 1742 /* 1743 * Don't leave the padding between the msg header and the 1744 * cmsg data and the padding after the cmsg data un-initialized. 1745 */ 1746 bzero(cp, CMSG_SPACE((u_int)size)); 1747 if (p != NULL) 1748 (void)memcpy(CMSG_DATA(cp), p, size); 1749 m->m_len = CMSG_SPACE(size); 1750 cp->cmsg_len = CMSG_LEN(size); 1751 cp->cmsg_level = level; 1752 cp->cmsg_type = type; 1753 return (m); 1754 } 1755 1756 struct mbuf * 1757 sbcreatecontrol(caddr_t p, int size, int type, int level) 1758 { 1759 1760 return (sbcreatecontrol_how(p, size, type, level, M_NOWAIT)); 1761 } 1762 1763 /* 1764 * This does the same for socket buffers that sotoxsocket does for sockets: 1765 * generate an user-format data structure describing the socket buffer. Note 1766 * that the xsockbuf structure, since it is always embedded in a socket, does 1767 * not include a self pointer nor a length. We make this entry point public 1768 * in case some other mechanism needs it. 1769 */ 1770 void 1771 sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb) 1772 { 1773 1774 xsb->sb_cc = sb->sb_ccc; 1775 xsb->sb_hiwat = sb->sb_hiwat; 1776 xsb->sb_mbcnt = sb->sb_mbcnt; 1777 xsb->sb_mcnt = sb->sb_mcnt; 1778 xsb->sb_ccnt = sb->sb_ccnt; 1779 xsb->sb_mbmax = sb->sb_mbmax; 1780 xsb->sb_lowat = sb->sb_lowat; 1781 xsb->sb_flags = sb->sb_flags; 1782 xsb->sb_timeo = sb->sb_timeo; 1783 } 1784 1785 /* This takes the place of kern.maxsockbuf, which moved to kern.ipc. */ 1786 static int dummy; 1787 SYSCTL_INT(_kern, KERN_DUMMY, dummy, CTLFLAG_RW | CTLFLAG_SKIP, &dummy, 0, ""); 1788 SYSCTL_OID(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf, 1789 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sb_max, 0, 1790 sysctl_handle_sb_max, "LU", 1791 "Maximum socket buffer size"); 1792 SYSCTL_ULONG(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor, CTLFLAG_RW, 1793 &sb_efficiency, 0, "Socket buffer size waste factor"); 1794