1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved. 5 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 6 * 7 * This code is derived from software contributed to The DragonFly Project 8 * by Jeffrey M. Hsu. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 36 /* 37 * Copyright (c) 1982, 1986, 1988, 1991, 1993 38 * The Regents of the University of California. All rights reserved. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in the 47 * documentation and/or other materials provided with the distribution. 48 * 3. Neither the name of the University nor the names of its contributors 49 * may be used to endorse or promote products derived from this software 50 * without specific prior written permission. 51 * 52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 62 * SUCH DAMAGE. 63 * 64 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 65 * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $ 66 */ 67 68 #include "opt_param.h" 69 #include "opt_mbuf_stress_test.h" 70 #include <sys/param.h> 71 #include <sys/systm.h> 72 #include <sys/file.h> 73 #include <sys/malloc.h> 74 #include <sys/mbuf.h> 75 #include <sys/kernel.h> 76 #include <sys/sysctl.h> 77 #include <sys/domain.h> 78 #include <sys/objcache.h> 79 #include <sys/tree.h> 80 #include <sys/protosw.h> 81 #include <sys/uio.h> 82 #include <sys/thread.h> 83 #include <sys/proc.h> 84 #include <sys/globaldata.h> 85 86 #include <sys/spinlock2.h> 87 88 #include <machine/atomic.h> 89 #include <machine/limits.h> 90 91 #include <vm/vm.h> 92 #include <vm/vm_kern.h> 93 #include <vm/vm_extern.h> 94 95 #ifdef INVARIANTS 96 #include <machine/cpu.h> 97 #endif 98 99 /* 100 * mbuf cluster meta-data 101 */ 102 struct mbcluster { 103 int32_t mcl_refs; 104 void *mcl_data; 105 }; 106 107 /* 108 * mbuf tracking for debugging purposes 109 */ 110 #ifdef MBUF_DEBUG 111 112 static MALLOC_DEFINE(M_MTRACK, "mtrack", "mtrack"); 113 114 struct mbctrack; 115 RB_HEAD(mbuf_rb_tree, mbtrack); 116 RB_PROTOTYPE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *); 117 118 struct mbtrack { 119 RB_ENTRY(mbtrack) rb_node; 120 int trackid; 121 struct mbuf *m; 122 }; 123 124 static int 125 mbtrack_cmp(struct mbtrack *mb1, struct mbtrack *mb2) 126 { 127 if (mb1->m < mb2->m) 128 return (-1); 129 if (mb1->m > mb2->m) 130 return (1); 131 return (0); 132 } 133 134 RB_GENERATE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *, m); 135 136 struct mbuf_rb_tree mbuf_track_root; 137 static struct spinlock mbuf_track_spin = 138 SPINLOCK_INITIALIZER(mbuf_track_spin, "mbuf_track_spin"); 139 140 static void 141 mbuftrack(struct mbuf *m) 142 { 143 struct mbtrack *mbt; 144 145 mbt = kmalloc(sizeof(*mbt), M_MTRACK, M_INTWAIT|M_ZERO); 146 spin_lock(&mbuf_track_spin); 147 mbt->m = m; 148 if (mbuf_rb_tree_RB_INSERT(&mbuf_track_root, mbt)) { 149 spin_unlock(&mbuf_track_spin); 150 panic("%s: mbuf %p already being tracked", __func__, m); 151 } 152 spin_unlock(&mbuf_track_spin); 153 } 154 155 static void 156 mbufuntrack(struct mbuf *m) 157 { 158 struct mbtrack *mbt; 159 160 spin_lock(&mbuf_track_spin); 161 mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m); 162 if (mbt == NULL) { 163 spin_unlock(&mbuf_track_spin); 164 panic("%s: mbuf %p was not tracked", __func__, m); 165 } else { 166 mbuf_rb_tree_RB_REMOVE(&mbuf_track_root, mbt); 167 spin_unlock(&mbuf_track_spin); 168 kfree(mbt, M_MTRACK); 169 } 170 } 171 172 void 173 mbuftrackid(struct mbuf *m, int trackid) 174 { 175 struct mbtrack *mbt; 176 struct mbuf *n; 177 178 spin_lock(&mbuf_track_spin); 179 while (m) { 180 n = m->m_nextpkt; 181 while (m) { 182 mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m); 183 if (mbt == NULL) { 184 spin_unlock(&mbuf_track_spin); 185 panic("%s: mbuf %p not tracked", __func__, m); 186 } 187 mbt->trackid = trackid; 188 m = m->m_next; 189 } 190 m = n; 191 } 192 spin_unlock(&mbuf_track_spin); 193 } 194 195 static int 196 mbuftrack_callback(struct mbtrack *mbt, void *arg) 197 { 198 struct sysctl_req *req = arg; 199 char buf[64]; 200 int error; 201 202 ksnprintf(buf, sizeof(buf), "mbuf %p track %d\n", mbt->m, mbt->trackid); 203 204 spin_unlock(&mbuf_track_spin); 205 error = SYSCTL_OUT(req, buf, strlen(buf)); 206 spin_lock(&mbuf_track_spin); 207 if (error) 208 return (-error); 209 return (0); 210 } 211 212 static int 213 mbuftrack_show(SYSCTL_HANDLER_ARGS) 214 { 215 int error; 216 217 spin_lock(&mbuf_track_spin); 218 error = mbuf_rb_tree_RB_SCAN(&mbuf_track_root, NULL, 219 mbuftrack_callback, req); 220 spin_unlock(&mbuf_track_spin); 221 return (-error); 222 } 223 SYSCTL_PROC(_kern_ipc, OID_AUTO, showmbufs, CTLFLAG_RD|CTLTYPE_STRING, 224 0, 0, mbuftrack_show, "A", "Show all in-use mbufs"); 225 226 #else 227 228 #define mbuftrack(m) 229 #define mbufuntrack(m) 230 231 #endif 232 233 static void mbinit(void *); 234 SYSINIT(mbuf, SI_BOOT2_MACHDEP, SI_ORDER_FIRST, mbinit, NULL); 235 236 struct mbtypes_stat { 237 u_long stats[MT_NTYPES]; 238 } __cachealign; 239 240 static struct mbtypes_stat mbtypes[SMP_MAXCPU]; 241 242 static struct mbstat mbstat[SMP_MAXCPU] __cachealign; 243 int max_linkhdr; 244 int max_protohdr; 245 int max_hdr; 246 int max_datalen; 247 int m_defragpackets; 248 int m_defragbytes; 249 int m_defraguseless; 250 int m_defragfailure; 251 #ifdef MBUF_STRESS_TEST 252 int m_defragrandomfailures; 253 #endif 254 255 struct objcache *mbuf_cache, *mbufphdr_cache; 256 struct objcache *mclmeta_cache, *mjclmeta_cache; 257 struct objcache *mbufcluster_cache, *mbufphdrcluster_cache; 258 struct objcache *mbufjcluster_cache, *mbufphdrjcluster_cache; 259 260 struct lock mbupdate_lk = LOCK_INITIALIZER("mbupdate", 0, LK_CANRECURSE); 261 262 int nmbclusters; 263 static int nmbjclusters; 264 int nmbufs; 265 266 static int mjclph_cachefrac; 267 static int mjcl_cachefrac; 268 static int mclph_cachefrac; 269 static int mcl_cachefrac; 270 271 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW, 272 &max_linkhdr, 0, "Max size of a link-level header"); 273 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW, 274 &max_protohdr, 0, "Max size of a protocol header"); 275 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, 276 "Max size of link+protocol headers"); 277 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW, 278 &max_datalen, 0, "Max data payload size without headers"); 279 280 static int do_mbstat(SYSCTL_HANDLER_ARGS); 281 SYSCTL_PROC(_kern_ipc, KIPC_MBSTAT, mbstat, CTLTYPE_STRUCT|CTLFLAG_RD, 282 0, 0, do_mbstat, "S,mbstat", "mbuf usage statistics"); 283 284 static int do_mbtypes(SYSCTL_HANDLER_ARGS); 285 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbtypes, CTLTYPE_ULONG|CTLFLAG_RD, 286 0, 0, do_mbtypes, "LU", ""); 287 288 static int 289 do_mbstat(SYSCTL_HANDLER_ARGS) 290 { 291 struct mbstat mbstat_total; 292 struct mbstat *mbstat_totalp; 293 int i; 294 295 bzero(&mbstat_total, sizeof(mbstat_total)); 296 mbstat_totalp = &mbstat_total; 297 298 for (i = 0; i < ncpus; i++) { 299 mbstat_total.m_mbufs += mbstat[i].m_mbufs; 300 mbstat_total.m_clusters += mbstat[i].m_clusters; 301 mbstat_total.m_jclusters += mbstat[i].m_jclusters; 302 mbstat_total.m_clfree += mbstat[i].m_clfree; 303 mbstat_total.m_drops += mbstat[i].m_drops; 304 mbstat_total.m_wait += mbstat[i].m_wait; 305 mbstat_total.m_drain += mbstat[i].m_drain; 306 mbstat_total.m_mcfail += mbstat[i].m_mcfail; 307 mbstat_total.m_mpfail += mbstat[i].m_mpfail; 308 } 309 310 /* 311 * The following fields are not cumulative fields so just 312 * get their values once. 313 */ 314 mbstat_total.m_msize = mbstat[0].m_msize; 315 mbstat_total.m_mclbytes = mbstat[0].m_mclbytes; 316 mbstat_total.m_minclsize = mbstat[0].m_minclsize; 317 mbstat_total.m_mlen = mbstat[0].m_mlen; 318 mbstat_total.m_mhlen = mbstat[0].m_mhlen; 319 320 return sysctl_handle_opaque(oidp, mbstat_totalp, 321 sizeof(mbstat_total), req); 322 } 323 324 static int 325 do_mbtypes(SYSCTL_HANDLER_ARGS) 326 { 327 u_long totals[MT_NTYPES]; 328 int i, j; 329 330 for (i = 0; i < MT_NTYPES; i++) 331 totals[i] = 0; 332 333 for (i = 0; i < ncpus; i++) { 334 for (j = 0; j < MT_NTYPES; j++) 335 totals[j] += mbtypes[i].stats[j]; 336 } 337 338 return sysctl_handle_opaque(oidp, totals, sizeof(totals), req); 339 } 340 341 /* 342 * The variables may be set as boot-time tunables or live. Setting these 343 * values too low can deadlock your network. Network interfaces may also 344 * adjust nmbclusters and/or nmbjclusters to account for preloading the 345 * hardware rings. 346 */ 347 static int sysctl_nmbclusters(SYSCTL_HANDLER_ARGS); 348 static int sysctl_nmbjclusters(SYSCTL_HANDLER_ARGS); 349 static int sysctl_nmbufs(SYSCTL_HANDLER_ARGS); 350 SYSCTL_PROC(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLTYPE_INT | CTLFLAG_RW, 351 0, 0, sysctl_nmbclusters, "I", 352 "Maximum number of mbuf clusters available"); 353 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjclusters, CTLTYPE_INT | CTLFLAG_RW, 354 0, 0, sysctl_nmbjclusters, "I", 355 "Maximum number of mbuf jclusters available"); 356 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbufs, CTLTYPE_INT | CTLFLAG_RW, 357 0, 0, sysctl_nmbufs, "I", 358 "Maximum number of mbufs available"); 359 360 SYSCTL_INT(_kern_ipc, OID_AUTO, mjclph_cachefrac, CTLFLAG_RD, 361 &mjclph_cachefrac, 0, 362 "Fraction of cacheable mbuf jclusters w/ pkthdr"); 363 SYSCTL_INT(_kern_ipc, OID_AUTO, mjcl_cachefrac, CTLFLAG_RD, 364 &mjcl_cachefrac, 0, 365 "Fraction of cacheable mbuf jclusters"); 366 SYSCTL_INT(_kern_ipc, OID_AUTO, mclph_cachefrac, CTLFLAG_RD, 367 &mclph_cachefrac, 0, 368 "Fraction of cacheable mbuf clusters w/ pkthdr"); 369 SYSCTL_INT(_kern_ipc, OID_AUTO, mcl_cachefrac, CTLFLAG_RD, 370 &mcl_cachefrac, 0, "Fraction of cacheable mbuf clusters"); 371 372 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD, 373 &m_defragpackets, 0, "Number of defragment packets"); 374 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD, 375 &m_defragbytes, 0, "Number of defragment bytes"); 376 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD, 377 &m_defraguseless, 0, 378 "Number of useless defragment mbuf chain operations"); 379 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD, 380 &m_defragfailure, 0, 381 "Number of failed defragment mbuf chain operations"); 382 #ifdef MBUF_STRESS_TEST 383 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW, 384 &m_defragrandomfailures, 0, ""); 385 #endif 386 387 static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf"); 388 static MALLOC_DEFINE(M_MBUFCL, "mbufcl", "mbufcl"); 389 static MALLOC_DEFINE(M_MCLMETA, "mclmeta", "mclmeta"); 390 391 static void m_reclaim (void); 392 static void m_mclref(void *arg); 393 static void m_mclfree(void *arg); 394 static void m_mjclfree(void *arg); 395 396 static void mbupdatelimits(void); 397 398 /* 399 * Generally scale default mbufs to maxproc. 400 * 401 * NOTE: Default NMBUFS must take into account a possible DOS attack 402 * using fd passing on unix domain sockets. 403 */ 404 #ifndef NMBCLUSTERS 405 #define NMBCLUSTERS (512 + maxproc * 4) 406 #endif 407 #ifndef BASE_CACHEFRAC 408 #define BASE_CACHEFRAC 16 409 #endif 410 #ifndef MJCLPH_CACHEFRAC 411 #define MJCLPH_CACHEFRAC (BASE_CACHEFRAC * 2) 412 #endif 413 #ifndef MJCL_CACHEFRAC 414 #define MJCL_CACHEFRAC (BASE_CACHEFRAC * 2) 415 #endif 416 #ifndef MCLPH_CACHEFRAC 417 #define MCLPH_CACHEFRAC (BASE_CACHEFRAC * 2) 418 #endif 419 #ifndef MCL_CACHEFRAC 420 #define MCL_CACHEFRAC (BASE_CACHEFRAC * 2) 421 #endif 422 #ifndef NMBJCLUSTERS 423 #define NMBJCLUSTERS (NMBCLUSTERS / 4) 424 #endif 425 #ifndef NMBUFS 426 #define NMBUFS (nmbclusters / 2 + maxfiles) 427 #endif 428 429 #define NMBCLUSTERS_MIN (NMBCLUSTERS / 2) 430 #define NMBJCLUSTERS_MIN (NMBJCLUSTERS / 2) 431 #define NMBUFS_MIN (NMBUFS / 2) 432 433 /* 434 * Perform sanity checks of tunables declared above. 435 */ 436 static void 437 tunable_mbinit(void *dummy __unused) 438 { 439 /* 440 * This has to be done before VM init. 441 */ 442 nmbclusters = NMBCLUSTERS; 443 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters); 444 mjclph_cachefrac = MJCLPH_CACHEFRAC; 445 TUNABLE_INT_FETCH("kern.ipc.mjclph_cachefrac", &mjclph_cachefrac); 446 mjcl_cachefrac = MJCL_CACHEFRAC; 447 TUNABLE_INT_FETCH("kern.ipc.mjcl_cachefrac", &mjcl_cachefrac); 448 mclph_cachefrac = MCLPH_CACHEFRAC; 449 TUNABLE_INT_FETCH("kern.ipc.mclph_cachefrac", &mclph_cachefrac); 450 mcl_cachefrac = MCL_CACHEFRAC; 451 TUNABLE_INT_FETCH("kern.ipc.mcl_cachefrac", &mcl_cachefrac); 452 453 /* 454 * WARNING! each mcl cache feeds two mbuf caches, so the minimum 455 * cachefrac is 2. For safety, use 3. 456 */ 457 if (mjclph_cachefrac < 3) 458 mjclph_cachefrac = 3; 459 if (mjcl_cachefrac < 3) 460 mjcl_cachefrac = 3; 461 if (mclph_cachefrac < 3) 462 mclph_cachefrac = 3; 463 if (mcl_cachefrac < 3) 464 mcl_cachefrac = 3; 465 466 nmbjclusters = NMBJCLUSTERS; 467 TUNABLE_INT_FETCH("kern.ipc.nmbjclusters", &nmbjclusters); 468 469 nmbufs = NMBUFS; 470 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs); 471 472 /* Sanity checks */ 473 if (nmbufs < nmbclusters * 2) 474 nmbufs = nmbclusters * 2; 475 } 476 SYSINIT(tunable_mbinit, SI_BOOT1_TUNABLES, SI_ORDER_ANY, 477 tunable_mbinit, NULL); 478 479 static void 480 mbinclimit(int *limit, int inc, int minlim) 481 { 482 int new_limit; 483 484 lockmgr(&mbupdate_lk, LK_EXCLUSIVE); 485 486 new_limit = *limit + inc; 487 if (new_limit < minlim) 488 new_limit = minlim; 489 490 if (*limit != new_limit) { 491 *limit = new_limit; 492 mbupdatelimits(); 493 } 494 495 lockmgr(&mbupdate_lk, LK_RELEASE); 496 } 497 498 static int 499 mbsetlimit(int *limit, int new_limit, int minlim) 500 { 501 if (new_limit < minlim) 502 return EINVAL; 503 504 lockmgr(&mbupdate_lk, LK_EXCLUSIVE); 505 mbinclimit(limit, new_limit - *limit, minlim); 506 lockmgr(&mbupdate_lk, LK_RELEASE); 507 return 0; 508 } 509 510 static int 511 sysctl_mblimit(SYSCTL_HANDLER_ARGS, int *limit, int minlim) 512 { 513 int error, value; 514 515 value = *limit; 516 error = sysctl_handle_int(oidp, &value, 0, req); 517 if (error || req->newptr == NULL) 518 return error; 519 520 return mbsetlimit(limit, value, minlim); 521 } 522 523 /* 524 * Sysctl support to update nmbclusters, nmbjclusters, and nmbufs. 525 */ 526 static int 527 sysctl_nmbclusters(SYSCTL_HANDLER_ARGS) 528 { 529 return sysctl_mblimit(oidp, arg1, arg2, req, &nmbclusters, 530 NMBCLUSTERS_MIN); 531 } 532 533 static int 534 sysctl_nmbjclusters(SYSCTL_HANDLER_ARGS) 535 { 536 return sysctl_mblimit(oidp, arg1, arg2, req, &nmbjclusters, 537 NMBJCLUSTERS_MIN); 538 } 539 540 static int 541 sysctl_nmbufs(SYSCTL_HANDLER_ARGS) 542 { 543 return sysctl_mblimit(oidp, arg1, arg2, req, &nmbufs, NMBUFS_MIN); 544 } 545 546 void 547 mcl_inclimit(int inc) 548 { 549 mbinclimit(&nmbclusters, inc, NMBCLUSTERS_MIN); 550 } 551 552 void 553 mjcl_inclimit(int inc) 554 { 555 mbinclimit(&nmbjclusters, inc, NMBJCLUSTERS_MIN); 556 } 557 558 void 559 mb_inclimit(int inc) 560 { 561 mbinclimit(&nmbufs, inc, NMBUFS_MIN); 562 } 563 564 /* 565 * The mbuf object cache only guarantees that m_next and m_nextpkt are 566 * NULL and that m_data points to the beginning of the data area. In 567 * particular, m_len and m_pkthdr.len are uninitialized. It is the 568 * responsibility of the caller to initialize those fields before use. 569 */ 570 static __inline boolean_t 571 mbuf_ctor(void *obj, void *private __unused, int ocflags __unused) 572 { 573 struct mbuf *m = obj; 574 575 m->m_next = NULL; 576 m->m_nextpkt = NULL; 577 m->m_data = m->m_dat; 578 m->m_flags = 0; 579 580 return (TRUE); 581 } 582 583 /* 584 * Initialize the mbuf and the packet header fields. 585 */ 586 static boolean_t 587 mbufphdr_ctor(void *obj, void *private __unused, int ocflags __unused) 588 { 589 struct mbuf *m = obj; 590 591 m->m_next = NULL; 592 m->m_nextpkt = NULL; 593 m->m_data = m->m_pktdat; 594 m->m_flags = M_PKTHDR | M_PHCACHE; 595 596 m->m_pkthdr.rcvif = NULL; /* eliminate XXX JH */ 597 SLIST_INIT(&m->m_pkthdr.tags); 598 m->m_pkthdr.csum_flags = 0; /* eliminate XXX JH */ 599 m->m_pkthdr.fw_flags = 0; /* eliminate XXX JH */ 600 601 return (TRUE); 602 } 603 604 /* 605 * A mbcluster object consists of 2K (MCLBYTES) cluster and a refcount. 606 */ 607 static boolean_t 608 mclmeta_ctor(void *obj, void *private __unused, int ocflags) 609 { 610 struct mbcluster *cl = obj; 611 void *buf; 612 613 if (ocflags & M_NOWAIT) 614 buf = kmalloc(MCLBYTES, M_MBUFCL, M_NOWAIT | M_ZERO); 615 else 616 buf = kmalloc(MCLBYTES, M_MBUFCL, M_INTWAIT | M_ZERO); 617 if (buf == NULL) 618 return (FALSE); 619 cl->mcl_refs = 0; 620 cl->mcl_data = buf; 621 return (TRUE); 622 } 623 624 static boolean_t 625 mjclmeta_ctor(void *obj, void *private __unused, int ocflags) 626 { 627 struct mbcluster *cl = obj; 628 void *buf; 629 630 if (ocflags & M_NOWAIT) 631 buf = kmalloc(MJUMPAGESIZE, M_MBUFCL, M_NOWAIT | M_ZERO); 632 else 633 buf = kmalloc(MJUMPAGESIZE, M_MBUFCL, M_INTWAIT | M_ZERO); 634 if (buf == NULL) 635 return (FALSE); 636 cl->mcl_refs = 0; 637 cl->mcl_data = buf; 638 return (TRUE); 639 } 640 641 static void 642 mclmeta_dtor(void *obj, void *private __unused) 643 { 644 struct mbcluster *mcl = obj; 645 646 KKASSERT(mcl->mcl_refs == 0); 647 kfree(mcl->mcl_data, M_MBUFCL); 648 } 649 650 static void 651 linkjcluster(struct mbuf *m, struct mbcluster *cl, u_int size) 652 { 653 /* 654 * Add the cluster to the mbuf. The caller will detect that the 655 * mbuf now has an attached cluster. 656 */ 657 m->m_ext.ext_arg = cl; 658 m->m_ext.ext_buf = cl->mcl_data; 659 m->m_ext.ext_ref = m_mclref; 660 if (size != MCLBYTES) 661 m->m_ext.ext_free = m_mjclfree; 662 else 663 m->m_ext.ext_free = m_mclfree; 664 m->m_ext.ext_size = size; 665 atomic_add_int(&cl->mcl_refs, 1); 666 667 m->m_data = m->m_ext.ext_buf; 668 m->m_flags |= M_EXT | M_EXT_CLUSTER; 669 } 670 671 static void 672 linkcluster(struct mbuf *m, struct mbcluster *cl) 673 { 674 linkjcluster(m, cl, MCLBYTES); 675 } 676 677 static boolean_t 678 mbufphdrcluster_ctor(void *obj, void *private, int ocflags) 679 { 680 struct mbuf *m = obj; 681 struct mbcluster *cl; 682 683 mbufphdr_ctor(obj, private, ocflags); 684 cl = objcache_get(mclmeta_cache, ocflags); 685 if (cl == NULL) { 686 ++mbstat[mycpu->gd_cpuid].m_drops; 687 return (FALSE); 688 } 689 m->m_flags |= M_CLCACHE; 690 linkcluster(m, cl); 691 return (TRUE); 692 } 693 694 static boolean_t 695 mbufphdrjcluster_ctor(void *obj, void *private, int ocflags) 696 { 697 struct mbuf *m = obj; 698 struct mbcluster *cl; 699 700 mbufphdr_ctor(obj, private, ocflags); 701 cl = objcache_get(mjclmeta_cache, ocflags); 702 if (cl == NULL) { 703 ++mbstat[mycpu->gd_cpuid].m_drops; 704 return (FALSE); 705 } 706 m->m_flags |= M_CLCACHE; 707 linkjcluster(m, cl, MJUMPAGESIZE); 708 return (TRUE); 709 } 710 711 static boolean_t 712 mbufcluster_ctor(void *obj, void *private, int ocflags) 713 { 714 struct mbuf *m = obj; 715 struct mbcluster *cl; 716 717 mbuf_ctor(obj, private, ocflags); 718 cl = objcache_get(mclmeta_cache, ocflags); 719 if (cl == NULL) { 720 ++mbstat[mycpu->gd_cpuid].m_drops; 721 return (FALSE); 722 } 723 m->m_flags |= M_CLCACHE; 724 linkcluster(m, cl); 725 return (TRUE); 726 } 727 728 static boolean_t 729 mbufjcluster_ctor(void *obj, void *private, int ocflags) 730 { 731 struct mbuf *m = obj; 732 struct mbcluster *cl; 733 734 mbuf_ctor(obj, private, ocflags); 735 cl = objcache_get(mjclmeta_cache, ocflags); 736 if (cl == NULL) { 737 ++mbstat[mycpu->gd_cpuid].m_drops; 738 return (FALSE); 739 } 740 m->m_flags |= M_CLCACHE; 741 linkjcluster(m, cl, MJUMPAGESIZE); 742 return (TRUE); 743 } 744 745 /* 746 * Used for both the cluster and cluster PHDR caches. 747 * 748 * The mbuf may have lost its cluster due to sharing, deal 749 * with the situation by checking M_EXT. 750 */ 751 static void 752 mbufcluster_dtor(void *obj, void *private) 753 { 754 struct mbuf *m = obj; 755 struct mbcluster *mcl; 756 757 if (m->m_flags & M_EXT) { 758 KKASSERT((m->m_flags & M_EXT_CLUSTER) != 0); 759 mcl = m->m_ext.ext_arg; 760 KKASSERT(mcl->mcl_refs == 1); 761 mcl->mcl_refs = 0; 762 if (m->m_flags & M_EXT && m->m_ext.ext_size != MCLBYTES) 763 objcache_put(mjclmeta_cache, mcl); 764 else 765 objcache_put(mclmeta_cache, mcl); 766 } 767 } 768 769 struct objcache_malloc_args mbuf_malloc_args = { MSIZE, M_MBUF }; 770 struct objcache_malloc_args mclmeta_malloc_args = 771 { sizeof(struct mbcluster), M_MCLMETA }; 772 773 static void 774 mbinit(void *dummy __unused) 775 { 776 int limit, mb_limit, cl_limit, ncl_limit, jcl_limit, i; 777 778 /* 779 * Initialize statistics 780 */ 781 for (i = 0; i < ncpus; i++) { 782 mbstat[i].m_msize = MSIZE; 783 mbstat[i].m_mclbytes = MCLBYTES; 784 mbstat[i].m_mjumpagesize = MJUMPAGESIZE; 785 mbstat[i].m_minclsize = MINCLSIZE; 786 mbstat[i].m_mlen = MLEN; 787 mbstat[i].m_mhlen = MHLEN; 788 } 789 790 /* 791 * Create object caches and save cluster limits, which will 792 * be used to adjust backing kmalloc pools' limit later. 793 */ 794 795 mb_limit = cl_limit = 0; 796 797 limit = nmbufs; 798 mbuf_cache = objcache_create("mbuf", 799 limit, nmbufs / BASE_CACHEFRAC, 800 mbuf_ctor, NULL, NULL, 801 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); 802 mb_limit += limit; 803 804 limit = nmbufs; 805 mbufphdr_cache = objcache_create("mbuf pkthdr", 806 limit, nmbufs / BASE_CACHEFRAC, 807 mbufphdr_ctor, NULL, NULL, 808 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); 809 mb_limit += limit; 810 811 ncl_limit = nmbclusters; 812 mclmeta_cache = objcache_create("mbuf cluster", 813 ncl_limit, nmbclusters / BASE_CACHEFRAC, 814 mclmeta_ctor, mclmeta_dtor, NULL, 815 objcache_malloc_alloc, objcache_malloc_free, &mclmeta_malloc_args); 816 cl_limit += ncl_limit; 817 818 jcl_limit = nmbjclusters; 819 mjclmeta_cache = objcache_create("mbuf jcluster", 820 jcl_limit, nmbjclusters / BASE_CACHEFRAC, 821 mjclmeta_ctor, mclmeta_dtor, NULL, 822 objcache_malloc_alloc, objcache_malloc_free, &mclmeta_malloc_args); 823 cl_limit += jcl_limit; 824 825 limit = nmbclusters; 826 mbufcluster_cache = objcache_create("mbuf+cl", 827 limit, nmbclusters / mcl_cachefrac, 828 mbufcluster_ctor, mbufcluster_dtor, NULL, 829 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); 830 mb_limit += limit; 831 832 limit = nmbclusters; 833 mbufphdrcluster_cache = objcache_create("mbuf pkthdr+cl", 834 limit, nmbclusters / mclph_cachefrac, 835 mbufphdrcluster_ctor, mbufcluster_dtor, NULL, 836 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); 837 mb_limit += limit; 838 839 limit = nmbjclusters; 840 mbufjcluster_cache = objcache_create("mbuf+jcl", 841 limit, nmbjclusters / mjcl_cachefrac, 842 mbufjcluster_ctor, mbufcluster_dtor, NULL, 843 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); 844 mb_limit += limit; 845 846 limit = nmbjclusters; 847 mbufphdrjcluster_cache = objcache_create("mbuf pkthdr+jcl", 848 limit, nmbjclusters / mjclph_cachefrac, 849 mbufphdrjcluster_ctor, mbufcluster_dtor, NULL, 850 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); 851 mb_limit += limit; 852 853 /* 854 * Adjust backing kmalloc pools' limit 855 * 856 * NOTE: We raise the limit by another 1/8 to take the effect 857 * of loosememuse into account. 858 */ 859 cl_limit += cl_limit / 8; 860 kmalloc_raise_limit(mclmeta_malloc_args.mtype, 861 mclmeta_malloc_args.objsize * (size_t)cl_limit); 862 kmalloc_raise_limit(M_MBUFCL, 863 (MCLBYTES * (size_t)ncl_limit) + 864 (MJUMPAGESIZE * (size_t)jcl_limit)); 865 866 mb_limit += mb_limit / 8; 867 kmalloc_raise_limit(mbuf_malloc_args.mtype, 868 mbuf_malloc_args.objsize * (size_t)mb_limit); 869 } 870 871 /* 872 * Adjust mbuf limits after changes have been made 873 * 874 * Caller must hold mbupdate_lk 875 */ 876 static void 877 mbupdatelimits(void) 878 { 879 int limit, mb_limit, cl_limit, ncl_limit, jcl_limit; 880 881 KASSERT(lockstatus(&mbupdate_lk, curthread) != 0, 882 ("mbupdate_lk is not held")); 883 884 /* 885 * Figure out adjustments to object caches after nmbufs, nmbclusters, 886 * or nmbjclusters has been modified. 887 */ 888 mb_limit = cl_limit = 0; 889 890 limit = nmbufs; 891 objcache_set_cluster_limit(mbuf_cache, limit); 892 mb_limit += limit; 893 894 limit = nmbufs; 895 objcache_set_cluster_limit(mbufphdr_cache, limit); 896 mb_limit += limit; 897 898 ncl_limit = nmbclusters; 899 objcache_set_cluster_limit(mclmeta_cache, ncl_limit); 900 cl_limit += ncl_limit; 901 902 jcl_limit = nmbjclusters; 903 objcache_set_cluster_limit(mjclmeta_cache, jcl_limit); 904 cl_limit += jcl_limit; 905 906 limit = nmbclusters; 907 objcache_set_cluster_limit(mbufcluster_cache, limit); 908 mb_limit += limit; 909 910 limit = nmbclusters; 911 objcache_set_cluster_limit(mbufphdrcluster_cache, limit); 912 mb_limit += limit; 913 914 limit = nmbjclusters; 915 objcache_set_cluster_limit(mbufjcluster_cache, limit); 916 mb_limit += limit; 917 918 limit = nmbjclusters; 919 objcache_set_cluster_limit(mbufphdrjcluster_cache, limit); 920 mb_limit += limit; 921 922 /* 923 * Adjust backing kmalloc pools' limit 924 * 925 * NOTE: We raise the limit by another 1/8 to take the effect 926 * of loosememuse into account. 927 */ 928 cl_limit += cl_limit / 8; 929 kmalloc_raise_limit(mclmeta_malloc_args.mtype, 930 mclmeta_malloc_args.objsize * (size_t)cl_limit); 931 kmalloc_raise_limit(M_MBUFCL, 932 (MCLBYTES * (size_t)ncl_limit) + 933 (MJUMPAGESIZE * (size_t)jcl_limit)); 934 mb_limit += mb_limit / 8; 935 kmalloc_raise_limit(mbuf_malloc_args.mtype, 936 mbuf_malloc_args.objsize * (size_t)mb_limit); 937 } 938 939 /* 940 * Return the number of references to this mbuf's data. 0 is returned 941 * if the mbuf is not M_EXT, a reference count is returned if it is 942 * M_EXT | M_EXT_CLUSTER, and 99 is returned if it is a special M_EXT. 943 */ 944 int 945 m_sharecount(struct mbuf *m) 946 { 947 switch (m->m_flags & (M_EXT | M_EXT_CLUSTER)) { 948 case 0: 949 return (0); 950 case M_EXT: 951 return (99); 952 case M_EXT | M_EXT_CLUSTER: 953 return (((struct mbcluster *)m->m_ext.ext_arg)->mcl_refs); 954 } 955 /* NOTREACHED */ 956 return (0); /* to shut up compiler */ 957 } 958 959 /* 960 * change mbuf to new type 961 */ 962 void 963 m_chtype(struct mbuf *m, int type) 964 { 965 struct globaldata *gd = mycpu; 966 967 ++mbtypes[gd->gd_cpuid].stats[type]; 968 --mbtypes[gd->gd_cpuid].stats[m->m_type]; 969 m->m_type = type; 970 } 971 972 static void 973 m_reclaim(void) 974 { 975 struct domain *dp; 976 struct protosw *pr; 977 978 kprintf("Debug: m_reclaim() called\n"); 979 980 SLIST_FOREACH(dp, &domains, dom_next) { 981 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) { 982 if (pr->pr_drain) 983 (*pr->pr_drain)(); 984 } 985 } 986 ++mbstat[mycpu->gd_cpuid].m_drain; 987 } 988 989 static __inline void 990 updatestats(struct mbuf *m, int type) 991 { 992 struct globaldata *gd = mycpu; 993 994 m->m_type = type; 995 mbuftrack(m); 996 #ifdef MBUF_DEBUG 997 KASSERT(m->m_next == NULL, ("mbuf %p: bad m_next in get", m)); 998 KASSERT(m->m_nextpkt == NULL, ("mbuf %p: bad m_nextpkt in get", m)); 999 #endif 1000 1001 ++mbtypes[gd->gd_cpuid].stats[type]; 1002 ++mbstat[gd->gd_cpuid].m_mbufs; 1003 1004 } 1005 1006 /* 1007 * Allocate an mbuf. 1008 */ 1009 struct mbuf * 1010 m_get(int how, int type) 1011 { 1012 struct mbuf *m; 1013 int ntries = 0; 1014 int ocf = MB_OCFLAG(how); 1015 1016 retryonce: 1017 1018 m = objcache_get(mbuf_cache, ocf); 1019 1020 if (m == NULL) { 1021 if ((ocf & M_WAITOK) && ntries++ == 0) { 1022 struct objcache *reclaimlist[] = { 1023 mbufphdr_cache, 1024 mbufcluster_cache, 1025 mbufphdrcluster_cache, 1026 mbufjcluster_cache, 1027 mbufphdrjcluster_cache 1028 }; 1029 const int nreclaims = NELEM(reclaimlist); 1030 1031 if (!objcache_reclaimlist(reclaimlist, nreclaims)) 1032 m_reclaim(); 1033 goto retryonce; 1034 } 1035 ++mbstat[mycpu->gd_cpuid].m_drops; 1036 return (NULL); 1037 } 1038 #ifdef MBUF_DEBUG 1039 KASSERT(m->m_data == m->m_dat, ("mbuf %p: bad m_data in get", m)); 1040 #endif 1041 m->m_len = 0; 1042 1043 updatestats(m, type); 1044 return (m); 1045 } 1046 1047 struct mbuf * 1048 m_gethdr(int how, int type) 1049 { 1050 struct mbuf *m; 1051 int ocf = MB_OCFLAG(how); 1052 int ntries = 0; 1053 1054 retryonce: 1055 1056 m = objcache_get(mbufphdr_cache, ocf); 1057 1058 if (m == NULL) { 1059 if ((ocf & M_WAITOK) && ntries++ == 0) { 1060 struct objcache *reclaimlist[] = { 1061 mbuf_cache, 1062 mbufcluster_cache, mbufphdrcluster_cache, 1063 mbufjcluster_cache, mbufphdrjcluster_cache 1064 }; 1065 const int nreclaims = NELEM(reclaimlist); 1066 1067 if (!objcache_reclaimlist(reclaimlist, nreclaims)) 1068 m_reclaim(); 1069 goto retryonce; 1070 } 1071 ++mbstat[mycpu->gd_cpuid].m_drops; 1072 return (NULL); 1073 } 1074 #ifdef MBUF_DEBUG 1075 KASSERT(m->m_data == m->m_pktdat, ("mbuf %p: bad m_data in get", m)); 1076 #endif 1077 m->m_len = 0; 1078 m->m_pkthdr.len = 0; 1079 m->m_pkthdr.loop_cnt = 0; 1080 1081 updatestats(m, type); 1082 return (m); 1083 } 1084 1085 /* 1086 * Get a mbuf (not a mbuf cluster!) and zero it. 1087 * 1088 * Deprecated. 1089 */ 1090 struct mbuf * 1091 m_getclr(int how, int type) 1092 { 1093 struct mbuf *m; 1094 1095 m = m_get(how, type); 1096 if (m != NULL) 1097 bzero(m->m_data, MLEN); 1098 return (m); 1099 } 1100 1101 static struct mbuf * 1102 m_getcl_cache(int how, short type, int flags, struct objcache *mbclc, 1103 struct objcache *mbphclc, u_long *cl_stats) 1104 { 1105 struct mbuf *m = NULL; 1106 int ocflags = MB_OCFLAG(how); 1107 int ntries = 0; 1108 1109 retryonce: 1110 1111 if (flags & M_PKTHDR) 1112 m = objcache_get(mbphclc, ocflags); 1113 else 1114 m = objcache_get(mbclc, ocflags); 1115 1116 if (m == NULL) { 1117 if ((ocflags & M_WAITOK) && ntries++ == 0) { 1118 struct objcache *reclaimlist[1]; 1119 1120 if (flags & M_PKTHDR) 1121 reclaimlist[0] = mbclc; 1122 else 1123 reclaimlist[0] = mbphclc; 1124 if (!objcache_reclaimlist(reclaimlist, 1)) 1125 m_reclaim(); 1126 goto retryonce; 1127 } 1128 ++mbstat[mycpu->gd_cpuid].m_drops; 1129 return (NULL); 1130 } 1131 1132 #ifdef MBUF_DEBUG 1133 KASSERT(m->m_data == m->m_ext.ext_buf, 1134 ("mbuf %p: bad m_data in get", m)); 1135 #endif 1136 m->m_type = type; 1137 m->m_len = 0; 1138 m->m_pkthdr.len = 0; /* just do it unconditionally */ 1139 1140 mbuftrack(m); 1141 1142 ++mbtypes[mycpu->gd_cpuid].stats[type]; 1143 ++(*cl_stats); 1144 return (m); 1145 } 1146 1147 struct mbuf * 1148 m_getjcl(int how, short type, int flags, size_t size) 1149 { 1150 struct objcache *mbclc, *mbphclc; 1151 u_long *cl_stats; 1152 1153 switch (size) { 1154 case MCLBYTES: 1155 mbclc = mbufcluster_cache; 1156 mbphclc = mbufphdrcluster_cache; 1157 cl_stats = &mbstat[mycpu->gd_cpuid].m_clusters; 1158 break; 1159 1160 default: 1161 mbclc = mbufjcluster_cache; 1162 mbphclc = mbufphdrjcluster_cache; 1163 cl_stats = &mbstat[mycpu->gd_cpuid].m_jclusters; 1164 break; 1165 } 1166 return m_getcl_cache(how, type, flags, mbclc, mbphclc, cl_stats); 1167 } 1168 1169 /* 1170 * Returns an mbuf with an attached cluster. 1171 * Because many network drivers use this kind of buffers a lot, it is 1172 * convenient to keep a small pool of free buffers of this kind. 1173 * Even a small size such as 10 gives about 10% improvement in the 1174 * forwarding rate in a bridge or router. 1175 */ 1176 struct mbuf * 1177 m_getcl(int how, short type, int flags) 1178 { 1179 return m_getcl_cache(how, type, flags, 1180 mbufcluster_cache, mbufphdrcluster_cache, 1181 &mbstat[mycpu->gd_cpuid].m_clusters); 1182 } 1183 1184 /* 1185 * Allocate chain of requested length. 1186 */ 1187 struct mbuf * 1188 m_getc(int len, int how, int type) 1189 { 1190 struct mbuf *n, *nfirst = NULL, **ntail = &nfirst; 1191 int nsize; 1192 1193 while (len > 0) { 1194 n = m_getl(len, how, type, 0, &nsize); 1195 if (n == NULL) 1196 goto failed; 1197 n->m_len = 0; 1198 *ntail = n; 1199 ntail = &n->m_next; 1200 len -= nsize; 1201 } 1202 return (nfirst); 1203 1204 failed: 1205 m_freem(nfirst); 1206 return (NULL); 1207 } 1208 1209 /* 1210 * Allocate len-worth of mbufs and/or mbuf clusters (whatever fits best) 1211 * and return a pointer to the head of the allocated chain. If m0 is 1212 * non-null, then we assume that it is a single mbuf or an mbuf chain to 1213 * which we want len bytes worth of mbufs and/or clusters attached, and so 1214 * if we succeed in allocating it, we will just return a pointer to m0. 1215 * 1216 * If we happen to fail at any point during the allocation, we will free 1217 * up everything we have already allocated and return NULL. 1218 * 1219 * Deprecated. Use m_getc() and m_cat() instead. 1220 */ 1221 struct mbuf * 1222 m_getm(struct mbuf *m0, int len, int type, int how) 1223 { 1224 struct mbuf *nfirst; 1225 1226 nfirst = m_getc(len, how, type); 1227 1228 if (m0 != NULL) { 1229 m_last(m0)->m_next = nfirst; 1230 return (m0); 1231 } 1232 1233 return (nfirst); 1234 } 1235 1236 /* 1237 * Adds a cluster to a normal mbuf, M_EXT is set on success. 1238 * Deprecated. Use m_getcl() instead. 1239 */ 1240 void 1241 m_mclget(struct mbuf *m, int how) 1242 { 1243 struct mbcluster *mcl; 1244 1245 KKASSERT((m->m_flags & M_EXT) == 0); 1246 mcl = objcache_get(mclmeta_cache, MB_OCFLAG(how)); 1247 if (mcl != NULL) { 1248 linkcluster(m, mcl); 1249 ++mbstat[mycpu->gd_cpuid].m_clusters; 1250 } else { 1251 ++mbstat[mycpu->gd_cpuid].m_drops; 1252 } 1253 } 1254 1255 /* 1256 * Updates to mbcluster must be MPSAFE. Only an entity which already has 1257 * a reference to the cluster can ref it, so we are in no danger of 1258 * racing an add with a subtract. But the operation must still be atomic 1259 * since multiple entities may have a reference on the cluster. 1260 * 1261 * m_mclfree() is almost the same but it must contend with two entities 1262 * freeing the cluster at the same time. 1263 */ 1264 static void 1265 m_mclref(void *arg) 1266 { 1267 struct mbcluster *mcl = arg; 1268 1269 atomic_add_int(&mcl->mcl_refs, 1); 1270 } 1271 1272 /* 1273 * When dereferencing a cluster we have to deal with a N->0 race, where 1274 * N entities free their references simultaneously. To do this we use 1275 * atomic_fetchadd_int(). 1276 */ 1277 static void 1278 m_mclfree(void *arg) 1279 { 1280 struct mbcluster *mcl = arg; 1281 1282 if (atomic_fetchadd_int(&mcl->mcl_refs, -1) == 1) { 1283 --mbstat[mycpu->gd_cpuid].m_clusters; 1284 objcache_put(mclmeta_cache, mcl); 1285 } 1286 } 1287 1288 static void 1289 m_mjclfree(void *arg) 1290 { 1291 struct mbcluster *mcl = arg; 1292 1293 if (atomic_fetchadd_int(&mcl->mcl_refs, -1) == 1) { 1294 --mbstat[mycpu->gd_cpuid].m_jclusters; 1295 objcache_put(mjclmeta_cache, mcl); 1296 } 1297 } 1298 1299 /* 1300 * Free a single mbuf and any associated external storage. The successor, 1301 * if any, is returned. 1302 * 1303 * We do need to check non-first mbuf for m_aux, since some of existing 1304 * code does not call M_PREPEND properly. 1305 * (example: call to bpf_mtap from drivers) 1306 */ 1307 1308 struct mbuf * 1309 #ifdef MBUF_DEBUG 1310 _m_free(struct mbuf *m, const char *func) 1311 #else 1312 m_free(struct mbuf *m) 1313 #endif 1314 { 1315 struct mbuf *n; 1316 struct globaldata *gd = mycpu; 1317 1318 KASSERT(m->m_type != MT_FREE, ("freeing free mbuf %p", m)); 1319 KASSERT(M_TRAILINGSPACE(m) >= 0, ("overflowed mbuf %p", m)); 1320 --mbtypes[gd->gd_cpuid].stats[m->m_type]; 1321 1322 n = m->m_next; 1323 1324 /* 1325 * Make sure the mbuf is in constructed state before returning it 1326 * to the objcache. 1327 */ 1328 m->m_next = NULL; 1329 mbufuntrack(m); 1330 #ifdef MBUF_DEBUG 1331 m->m_hdr.mh_lastfunc = func; 1332 #endif 1333 #ifdef notyet 1334 KKASSERT(m->m_nextpkt == NULL); 1335 #else 1336 if (m->m_nextpkt != NULL) { 1337 static int afewtimes = 10; 1338 1339 if (afewtimes-- > 0) { 1340 kprintf("mfree: m->m_nextpkt != NULL\n"); 1341 print_backtrace(-1); 1342 } 1343 m->m_nextpkt = NULL; 1344 } 1345 #endif 1346 if (m->m_flags & M_PKTHDR) { 1347 m_tag_delete_chain(m); /* eliminate XXX JH */ 1348 } 1349 1350 m->m_flags &= (M_EXT | M_EXT_CLUSTER | M_CLCACHE | M_PHCACHE); 1351 1352 /* 1353 * Clean the M_PKTHDR state so we can return the mbuf to its original 1354 * cache. This is based on the PHCACHE flag which tells us whether 1355 * the mbuf was originally allocated out of a packet-header cache 1356 * or a non-packet-header cache. 1357 */ 1358 if (m->m_flags & M_PHCACHE) { 1359 m->m_flags |= M_PKTHDR; 1360 m->m_pkthdr.rcvif = NULL; /* eliminate XXX JH */ 1361 m->m_pkthdr.csum_flags = 0; /* eliminate XXX JH */ 1362 m->m_pkthdr.fw_flags = 0; /* eliminate XXX JH */ 1363 SLIST_INIT(&m->m_pkthdr.tags); 1364 } 1365 1366 /* 1367 * Handle remaining flags combinations. M_CLCACHE tells us whether 1368 * the mbuf was originally allocated from a cluster cache or not, 1369 * and is totally separate from whether the mbuf is currently 1370 * associated with a cluster. 1371 */ 1372 switch (m->m_flags & (M_CLCACHE | M_EXT | M_EXT_CLUSTER)) { 1373 case M_CLCACHE | M_EXT | M_EXT_CLUSTER: 1374 /* 1375 * mbuf+cluster cache case. The mbuf was allocated from the 1376 * combined mbuf_cluster cache and can be returned to the 1377 * cache if the cluster hasn't been shared. 1378 */ 1379 if (m_sharecount(m) == 1) { 1380 /* 1381 * The cluster has not been shared, we can just 1382 * reset the data pointer and return the mbuf 1383 * to the cluster cache. Note that the reference 1384 * count is left intact (it is still associated with 1385 * an mbuf). 1386 */ 1387 m->m_data = m->m_ext.ext_buf; 1388 if ((m->m_flags & M_EXT) && 1389 m->m_ext.ext_size != MCLBYTES) { 1390 if (m->m_flags & M_PHCACHE) 1391 objcache_put(mbufphdrjcluster_cache, m); 1392 else 1393 objcache_put(mbufjcluster_cache, m); 1394 --mbstat[mycpu->gd_cpuid].m_jclusters; 1395 } else { 1396 if (m->m_flags & M_PHCACHE) 1397 objcache_put(mbufphdrcluster_cache, m); 1398 else 1399 objcache_put(mbufcluster_cache, m); 1400 --mbstat[mycpu->gd_cpuid].m_clusters; 1401 } 1402 } else { 1403 /* 1404 * Hell. Someone else has a ref on this cluster, 1405 * we have to disconnect it which means we can't 1406 * put it back into the mbufcluster_cache, we 1407 * have to destroy the mbuf. 1408 * 1409 * Other mbuf references to the cluster will typically 1410 * be M_EXT | M_EXT_CLUSTER but without M_CLCACHE. 1411 * 1412 * XXX we could try to connect another cluster to it. 1413 */ 1414 m->m_ext.ext_free(m->m_ext.ext_arg); 1415 m->m_flags &= ~(M_EXT | M_EXT_CLUSTER); 1416 if (m->m_ext.ext_size == MCLBYTES) { 1417 if (m->m_flags & M_PHCACHE) 1418 objcache_dtor(mbufphdrcluster_cache, m); 1419 else 1420 objcache_dtor(mbufcluster_cache, m); 1421 } else { 1422 if (m->m_flags & M_PHCACHE) 1423 objcache_dtor(mbufphdrjcluster_cache, m); 1424 else 1425 objcache_dtor(mbufjcluster_cache, m); 1426 } 1427 } 1428 break; 1429 case M_EXT | M_EXT_CLUSTER: 1430 case M_EXT: 1431 /* 1432 * Normal cluster association case, disconnect the cluster from 1433 * the mbuf. The cluster may or may not be custom. 1434 */ 1435 m->m_ext.ext_free(m->m_ext.ext_arg); 1436 m->m_flags &= ~(M_EXT | M_EXT_CLUSTER); 1437 /* FALLTHROUGH */ 1438 case 0: 1439 /* 1440 * return the mbuf to the mbuf cache. 1441 */ 1442 if (m->m_flags & M_PHCACHE) { 1443 m->m_data = m->m_pktdat; 1444 objcache_put(mbufphdr_cache, m); 1445 } else { 1446 m->m_data = m->m_dat; 1447 objcache_put(mbuf_cache, m); 1448 } 1449 --mbstat[mycpu->gd_cpuid].m_mbufs; 1450 break; 1451 default: 1452 if (!panicstr) 1453 panic("bad mbuf flags %p %08x", m, m->m_flags); 1454 break; 1455 } 1456 return (n); 1457 } 1458 1459 #ifdef MBUF_DEBUG 1460 1461 void 1462 _m_freem(struct mbuf *m, const char *func) 1463 { 1464 while (m) 1465 m = _m_free(m, func); 1466 } 1467 1468 #else 1469 1470 void 1471 m_freem(struct mbuf *m) 1472 { 1473 while (m) 1474 m = m_free(m); 1475 } 1476 1477 #endif /* MBUF_DEBUG */ 1478 1479 void 1480 m_extadd(struct mbuf *m, void *buf, u_int size, void (*reff)(void *), 1481 void (*freef)(void *), void *arg) 1482 { 1483 m->m_ext.ext_arg = arg; 1484 m->m_ext.ext_buf = buf; 1485 m->m_ext.ext_ref = reff; 1486 m->m_ext.ext_free = freef; 1487 m->m_ext.ext_size = size; 1488 reff(arg); 1489 m->m_data = buf; 1490 m->m_flags |= M_EXT; 1491 } 1492 1493 /* 1494 * mbuf utility routines 1495 */ 1496 1497 /* 1498 * Lesser-used path for M_PREPEND: allocate new mbuf to prepend to chain and 1499 * copy junk along. 1500 */ 1501 struct mbuf * 1502 m_prepend(struct mbuf *m, int len, int how) 1503 { 1504 struct mbuf *mn; 1505 1506 if (m->m_flags & M_PKTHDR) 1507 mn = m_gethdr(how, m->m_type); 1508 else 1509 mn = m_get(how, m->m_type); 1510 if (mn == NULL) { 1511 m_freem(m); 1512 return (NULL); 1513 } 1514 if (m->m_flags & M_PKTHDR) 1515 M_MOVE_PKTHDR(mn, m); 1516 mn->m_next = m; 1517 m = mn; 1518 if (len < MHLEN) 1519 MH_ALIGN(m, len); 1520 m->m_len = len; 1521 return (m); 1522 } 1523 1524 /* 1525 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 1526 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 1527 * The wait parameter is a choice of M_WAITOK/M_NOWAIT from caller. 1528 * Note that the copy is read-only, because clusters are not copied, 1529 * only their reference counts are incremented. 1530 */ 1531 struct mbuf * 1532 m_copym(const struct mbuf *m, int off0, int len, int wait) 1533 { 1534 struct mbuf *n, **np; 1535 int off = off0; 1536 struct mbuf *top; 1537 int copyhdr = 0; 1538 1539 KASSERT(off >= 0, ("%s: negative off %d", __func__, off)); 1540 KASSERT(len >= 0, ("%s: negative len %d", __func__, len)); 1541 if (off == 0 && (m->m_flags & M_PKTHDR)) 1542 copyhdr = 1; 1543 while (off > 0) { 1544 KASSERT(m != NULL, 1545 ("%s: offset > size of mbuf chain", __func__)); 1546 if (off < m->m_len) 1547 break; 1548 off -= m->m_len; 1549 m = m->m_next; 1550 } 1551 np = ⊤ 1552 top = NULL; 1553 while (len > 0) { 1554 if (m == NULL) { 1555 KASSERT(len == M_COPYALL, 1556 ("%s: length > size of mbuf chain", __func__)); 1557 break; 1558 } 1559 /* 1560 * Because we are sharing any cluster attachment below, 1561 * be sure to get an mbuf that does not have a cluster 1562 * associated with it. 1563 */ 1564 if (copyhdr) 1565 n = m_gethdr(wait, m->m_type); 1566 else 1567 n = m_get(wait, m->m_type); 1568 *np = n; 1569 if (n == NULL) 1570 goto nospace; 1571 if (copyhdr) { 1572 if (!m_dup_pkthdr(n, m, wait)) 1573 goto nospace; 1574 if (len == M_COPYALL) 1575 n->m_pkthdr.len -= off0; 1576 else 1577 n->m_pkthdr.len = len; 1578 copyhdr = 0; 1579 } 1580 n->m_len = min(len, m->m_len - off); 1581 if (m->m_flags & M_EXT) { 1582 KKASSERT((n->m_flags & M_EXT) == 0); 1583 n->m_data = m->m_data + off; 1584 m->m_ext.ext_ref(m->m_ext.ext_arg); 1585 n->m_ext = m->m_ext; 1586 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER); 1587 } else { 1588 bcopy(mtod(m, caddr_t) + off, mtod(n, caddr_t), 1589 n->m_len); 1590 } 1591 if (len != M_COPYALL) 1592 len -= n->m_len; 1593 off = 0; 1594 m = m->m_next; 1595 np = &n->m_next; 1596 } 1597 if (top == NULL) 1598 ++mbstat[mycpu->gd_cpuid].m_mcfail; 1599 return (top); 1600 nospace: 1601 m_freem(top); 1602 ++mbstat[mycpu->gd_cpuid].m_mcfail; 1603 return (NULL); 1604 } 1605 1606 /* 1607 * Copy an entire packet, including header (which must be present). 1608 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'. 1609 * Note that the copy is read-only, because clusters are not copied, 1610 * only their reference counts are incremented. 1611 * Preserve alignment of the first mbuf so if the creator has left 1612 * some room at the beginning (e.g. for inserting protocol headers) 1613 * the copies also have the room available. 1614 */ 1615 struct mbuf * 1616 m_copypacket(struct mbuf *m, int how) 1617 { 1618 struct mbuf *top, *n, *o; 1619 1620 n = m_gethdr(how, m->m_type); 1621 top = n; 1622 if (!n) 1623 goto nospace; 1624 1625 if (!m_dup_pkthdr(n, m, how)) 1626 goto nospace; 1627 n->m_len = m->m_len; 1628 if (m->m_flags & M_EXT) { 1629 KKASSERT((n->m_flags & M_EXT) == 0); 1630 n->m_data = m->m_data; 1631 m->m_ext.ext_ref(m->m_ext.ext_arg); 1632 n->m_ext = m->m_ext; 1633 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER); 1634 } else { 1635 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat); 1636 bcopy(mtod(m, void *), mtod(n, void *), n->m_len); 1637 } 1638 1639 m = m->m_next; 1640 while (m) { 1641 o = m_get(how, m->m_type); 1642 if (!o) 1643 goto nospace; 1644 1645 n->m_next = o; 1646 n = n->m_next; 1647 1648 n->m_len = m->m_len; 1649 if (m->m_flags & M_EXT) { 1650 KKASSERT((n->m_flags & M_EXT) == 0); 1651 n->m_data = m->m_data; 1652 m->m_ext.ext_ref(m->m_ext.ext_arg); 1653 n->m_ext = m->m_ext; 1654 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER); 1655 } else { 1656 bcopy(mtod(m, void *), mtod(n, void *), n->m_len); 1657 } 1658 1659 m = m->m_next; 1660 } 1661 return top; 1662 nospace: 1663 m_freem(top); 1664 ++mbstat[mycpu->gd_cpuid].m_mcfail; 1665 return (NULL); 1666 } 1667 1668 /* 1669 * Copy data from an mbuf chain starting "off" bytes from the beginning, 1670 * continuing for "len" bytes, into the indicated buffer. 1671 */ 1672 void 1673 m_copydata(const struct mbuf *m, int off, int len, void *_cp) 1674 { 1675 caddr_t cp = _cp; 1676 unsigned count; 1677 1678 KASSERT(off >= 0, ("%s: negative off %d", __func__, off)); 1679 KASSERT(len >= 0, ("%s: negative len %d", __func__, len)); 1680 while (off > 0) { 1681 KASSERT(m != NULL, 1682 ("%s: offset > size of mbuf chain", __func__)); 1683 if (off < m->m_len) 1684 break; 1685 off -= m->m_len; 1686 m = m->m_next; 1687 } 1688 while (len > 0) { 1689 KASSERT(m != NULL, 1690 ("%s: length > size of mbuf chain", __func__)); 1691 count = min(m->m_len - off, len); 1692 bcopy(mtod(m, caddr_t) + off, cp, count); 1693 len -= count; 1694 cp += count; 1695 off = 0; 1696 m = m->m_next; 1697 } 1698 } 1699 1700 /* 1701 * Copy a packet header mbuf chain into a completely new chain, including 1702 * copying any mbuf clusters. Use this instead of m_copypacket() when 1703 * you need a writable copy of an mbuf chain. 1704 */ 1705 struct mbuf * 1706 m_dup(struct mbuf *m, int how) 1707 { 1708 struct mbuf **p, *n, *top = NULL; 1709 int remain, moff, nsize, chunk; 1710 1711 /* Sanity check */ 1712 if (m == NULL) 1713 return (NULL); 1714 1715 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __func__)); 1716 1717 /* While there's more data, get a new mbuf, tack it on, and fill it */ 1718 remain = m->m_pkthdr.len; 1719 moff = 0; 1720 p = ⊤ 1721 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */ 1722 /* Get the next new mbuf */ 1723 n = m_getl(remain, how, m->m_type, top == NULL ? M_PKTHDR : 0, 1724 &nsize); 1725 if (n == NULL) 1726 goto nospace; 1727 if (top == NULL) 1728 if (!m_dup_pkthdr(n, m, how)) 1729 goto nospace0; 1730 1731 /* Link it into the new chain */ 1732 *p = n; 1733 p = &n->m_next; 1734 1735 /* Copy data from original mbuf(s) into new mbuf */ 1736 n->m_len = 0; 1737 while (n->m_len < nsize && m != NULL) { 1738 chunk = min(nsize - n->m_len, m->m_len - moff); 1739 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk); 1740 moff += chunk; 1741 n->m_len += chunk; 1742 remain -= chunk; 1743 if (moff == m->m_len) { 1744 m = m->m_next; 1745 moff = 0; 1746 } 1747 } 1748 1749 /* Check correct total mbuf length */ 1750 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL), 1751 ("%s: bogus m_pkthdr.len", __func__)); 1752 } 1753 return (top); 1754 1755 nospace: 1756 m_freem(top); 1757 nospace0: 1758 ++mbstat[mycpu->gd_cpuid].m_mcfail; 1759 return (NULL); 1760 } 1761 1762 /* 1763 * Copy the non-packet mbuf data chain into a new set of mbufs, including 1764 * copying any mbuf clusters. This is typically used to realign a data 1765 * chain by nfs_realign(). 1766 * 1767 * The original chain is left intact. how should be M_WAITOK or M_NOWAIT 1768 * and NULL can be returned if M_NOWAIT is passed. 1769 * 1770 * Be careful to use cluster mbufs, a large mbuf chain converted to non 1771 * cluster mbufs can exhaust our supply of mbufs. 1772 */ 1773 struct mbuf * 1774 m_dup_data(struct mbuf *m, int how) 1775 { 1776 struct mbuf **p, *n, *top = NULL; 1777 int mlen, moff, chunk, gsize, nsize; 1778 1779 /* Degenerate case */ 1780 if (m == NULL) 1781 return (NULL); 1782 1783 /* 1784 * Optimize the mbuf allocation but do not get too carried away. 1785 */ 1786 if (m->m_next || m->m_len > MLEN) 1787 if (m->m_flags & M_EXT && m->m_ext.ext_size == MCLBYTES) 1788 gsize = MCLBYTES; 1789 else 1790 gsize = MJUMPAGESIZE; 1791 else 1792 gsize = MLEN; 1793 1794 /* Chain control */ 1795 p = ⊤ 1796 n = NULL; 1797 nsize = 0; 1798 1799 /* 1800 * Scan the mbuf chain until nothing is left, the new mbuf chain 1801 * will be allocated on the fly as needed. 1802 */ 1803 while (m) { 1804 mlen = m->m_len; 1805 moff = 0; 1806 1807 while (mlen) { 1808 KKASSERT(m->m_type == MT_DATA); 1809 if (n == NULL) { 1810 n = m_getl(gsize, how, MT_DATA, 0, &nsize); 1811 if (n == NULL) 1812 goto nospace; 1813 n->m_len = 0; 1814 *p = n; 1815 p = &n->m_next; 1816 } 1817 chunk = imin(mlen, nsize); 1818 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk); 1819 mlen -= chunk; 1820 moff += chunk; 1821 n->m_len += chunk; 1822 nsize -= chunk; 1823 if (nsize == 0) 1824 n = NULL; 1825 } 1826 m = m->m_next; 1827 } 1828 *p = NULL; 1829 return(top); 1830 nospace: 1831 *p = NULL; 1832 m_freem(top); 1833 ++mbstat[mycpu->gd_cpuid].m_mcfail; 1834 return (NULL); 1835 } 1836 1837 /* 1838 * Concatenate mbuf chain n to m. 1839 * Both chains must be of the same type (e.g. MT_DATA). 1840 * Any m_pkthdr is not updated. 1841 */ 1842 void 1843 m_cat(struct mbuf *m, struct mbuf *n) 1844 { 1845 m = m_last(m); 1846 while (n) { 1847 if (m->m_flags & M_EXT || 1848 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 1849 /* just join the two chains */ 1850 m->m_next = n; 1851 return; 1852 } 1853 /* splat the data from one into the other */ 1854 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, n->m_len); 1855 m->m_len += n->m_len; 1856 n = m_free(n); 1857 } 1858 } 1859 1860 void 1861 m_adj(struct mbuf *mp, int req_len) 1862 { 1863 struct mbuf *m; 1864 int count, len = req_len; 1865 1866 if ((m = mp) == NULL) 1867 return; 1868 if (len >= 0) { 1869 /* 1870 * Trim from head. 1871 */ 1872 while (m != NULL && len > 0) { 1873 if (m->m_len <= len) { 1874 len -= m->m_len; 1875 m->m_len = 0; 1876 m = m->m_next; 1877 } else { 1878 m->m_len -= len; 1879 m->m_data += len; 1880 len = 0; 1881 } 1882 } 1883 m = mp; 1884 if (mp->m_flags & M_PKTHDR) 1885 m->m_pkthdr.len -= (req_len - len); 1886 } else { 1887 /* 1888 * Trim from tail. Scan the mbuf chain, 1889 * calculating its length and finding the last mbuf. 1890 * If the adjustment only affects this mbuf, then just 1891 * adjust and return. Otherwise, rescan and truncate 1892 * after the remaining size. 1893 */ 1894 len = -len; 1895 count = 0; 1896 for (;;) { 1897 count += m->m_len; 1898 if (m->m_next == NULL) 1899 break; 1900 m = m->m_next; 1901 } 1902 if (m->m_len >= len) { 1903 m->m_len -= len; 1904 if (mp->m_flags & M_PKTHDR) 1905 mp->m_pkthdr.len -= len; 1906 return; 1907 } 1908 count -= len; 1909 if (count < 0) 1910 count = 0; 1911 /* 1912 * Correct length for chain is "count". 1913 * Find the mbuf with last data, adjust its length, 1914 * and toss data from remaining mbufs on chain. 1915 */ 1916 m = mp; 1917 if (m->m_flags & M_PKTHDR) 1918 m->m_pkthdr.len = count; 1919 for (; m; m = m->m_next) { 1920 if (m->m_len >= count) { 1921 m->m_len = count; 1922 break; 1923 } 1924 count -= m->m_len; 1925 } 1926 while ((m = m->m_next) != NULL) 1927 m->m_len = 0; 1928 } 1929 } 1930 1931 /* 1932 * Set the m_data pointer of a newly-allocated mbuf 1933 * to place an object of the specified size at the 1934 * end of the mbuf, longword aligned. 1935 */ 1936 void 1937 m_align(struct mbuf *m, int len) 1938 { 1939 int adjust; 1940 1941 if (m->m_flags & M_EXT) 1942 adjust = m->m_ext.ext_size - len; 1943 else if (m->m_flags & M_PKTHDR) 1944 adjust = MHLEN - len; 1945 else 1946 adjust = MLEN - len; 1947 m->m_data += rounddown2(adjust, sizeof(long)); 1948 } 1949 1950 /* 1951 * Create a writable copy of the mbuf chain. While doing this 1952 * we compact the chain with a goal of producing a chain with 1953 * at most two mbufs. The second mbuf in this chain is likely 1954 * to be a cluster. The primary purpose of this work is to create 1955 * a writable packet for encryption, compression, etc. The 1956 * secondary goal is to linearize the data so the data can be 1957 * passed to crypto hardware in the most efficient manner possible. 1958 */ 1959 struct mbuf * 1960 m_unshare(struct mbuf *m0, int how) 1961 { 1962 struct mbuf *m, *mprev; 1963 struct mbuf *n, *mfirst, *mlast; 1964 int len, off; 1965 1966 mprev = NULL; 1967 for (m = m0; m != NULL; m = mprev->m_next) { 1968 /* 1969 * Regular mbufs are ignored unless there's a cluster 1970 * in front of it that we can use to coalesce. We do 1971 * the latter mainly so later clusters can be coalesced 1972 * also w/o having to handle them specially (i.e. convert 1973 * mbuf+cluster -> cluster). This optimization is heavily 1974 * influenced by the assumption that we're running over 1975 * Ethernet where MCLBYTES is large enough that the max 1976 * packet size will permit lots of coalescing into a 1977 * single cluster. This in turn permits efficient 1978 * crypto operations, especially when using hardware. 1979 */ 1980 if ((m->m_flags & M_EXT) == 0) { 1981 if (mprev != NULL && (mprev->m_flags & M_EXT) && 1982 m->m_len <= M_TRAILINGSPACE(mprev)) { 1983 /* XXX: this ignores mbuf types */ 1984 memcpy(mtod(mprev, caddr_t) + mprev->m_len, 1985 mtod(m, caddr_t), m->m_len); 1986 mprev->m_len += m->m_len; 1987 /* unlink from chain and reclaim */ 1988 mprev->m_next = m->m_next; 1989 m_free(m); 1990 } else { 1991 mprev = m; 1992 } 1993 continue; 1994 } 1995 /* 1996 * Writable mbufs are left alone (for now). 1997 */ 1998 if (M_WRITABLE(m)) { 1999 mprev = m; 2000 continue; 2001 } 2002 2003 /* 2004 * Not writable, replace with a copy or coalesce with 2005 * the previous mbuf if possible (since we have to copy 2006 * it anyway, we try to reduce the number of mbufs and 2007 * clusters so that future work is easier). 2008 */ 2009 KASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags)); 2010 /* NB: we only coalesce into a cluster or larger */ 2011 if (mprev != NULL && (mprev->m_flags & M_EXT) && 2012 m->m_len <= M_TRAILINGSPACE(mprev)) { 2013 /* XXX: this ignores mbuf types */ 2014 memcpy(mtod(mprev, caddr_t) + mprev->m_len, 2015 mtod(m, caddr_t), m->m_len); 2016 mprev->m_len += m->m_len; 2017 /* unlink from chain and reclaim */ 2018 mprev->m_next = m->m_next; 2019 m_free(m); 2020 continue; 2021 } 2022 2023 /* 2024 * Allocate new space to hold the copy... 2025 */ 2026 /* XXX why can M_PKTHDR be set past the first mbuf? */ 2027 if (mprev == NULL && (m->m_flags & M_PKTHDR)) { 2028 /* 2029 * NB: if a packet header is present we must 2030 * allocate the mbuf separately from any cluster 2031 * because M_MOVE_PKTHDR will smash the data 2032 * pointer and drop the M_EXT marker. 2033 */ 2034 MGETHDR(n, how, m->m_type); 2035 if (n == NULL) { 2036 m_freem(m0); 2037 return (NULL); 2038 } 2039 M_MOVE_PKTHDR(n, m); 2040 MCLGET(n, how); 2041 if ((n->m_flags & M_EXT) == 0) { 2042 m_free(n); 2043 m_freem(m0); 2044 return (NULL); 2045 } 2046 } else { 2047 n = m_getcl(how, m->m_type, m->m_flags); 2048 if (n == NULL) { 2049 m_freem(m0); 2050 return (NULL); 2051 } 2052 } 2053 /* 2054 * ... and copy the data. We deal with jumbo mbufs 2055 * (i.e. m_len > MCLBYTES) by splitting them into 2056 * clusters. We could just malloc a buffer and make 2057 * it external but too many device drivers don't know 2058 * how to break up the non-contiguous memory when 2059 * doing DMA. 2060 */ 2061 len = m->m_len; 2062 off = 0; 2063 mfirst = n; 2064 mlast = NULL; 2065 for (;;) { 2066 int cc = min(len, MCLBYTES); 2067 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, cc); 2068 n->m_len = cc; 2069 if (mlast != NULL) 2070 mlast->m_next = n; 2071 mlast = n; 2072 2073 len -= cc; 2074 if (len <= 0) 2075 break; 2076 off += cc; 2077 2078 n = m_getcl(how, m->m_type, m->m_flags); 2079 if (n == NULL) { 2080 m_freem(mfirst); 2081 m_freem(m0); 2082 return (NULL); 2083 } 2084 } 2085 n->m_next = m->m_next; 2086 if (mprev == NULL) 2087 m0 = mfirst; /* new head of chain */ 2088 else 2089 mprev->m_next = mfirst; /* replace old mbuf */ 2090 m_free(m); /* release old mbuf */ 2091 mprev = mfirst; 2092 } 2093 return (m0); 2094 } 2095 2096 /* 2097 * Rearrange an mbuf chain so that len bytes are contiguous 2098 * and in the data area of an mbuf (so that mtod will work for a structure 2099 * of size len). Returns the resulting mbuf chain on success, frees it and 2100 * returns null on failure. If there is room, it will add up to 2101 * max_protohdr-len extra bytes to the contiguous region in an attempt to 2102 * avoid being called next time. 2103 */ 2104 struct mbuf * 2105 m_pullup(struct mbuf *n, int len) 2106 { 2107 struct mbuf *m; 2108 int count; 2109 int space; 2110 2111 /* 2112 * If first mbuf has no cluster, and has room for len bytes 2113 * without shifting current data, pullup into it, 2114 * otherwise allocate a new mbuf to prepend to the chain. 2115 */ 2116 if (!(n->m_flags & M_EXT) && 2117 n->m_data + len < &n->m_dat[MLEN] && 2118 n->m_next) { 2119 if (n->m_len >= len) 2120 return (n); 2121 m = n; 2122 n = n->m_next; 2123 len -= m->m_len; 2124 } else { 2125 if (len > MHLEN) 2126 goto bad; 2127 if (n->m_flags & M_PKTHDR) 2128 m = m_gethdr(M_NOWAIT, n->m_type); 2129 else 2130 m = m_get(M_NOWAIT, n->m_type); 2131 if (m == NULL) 2132 goto bad; 2133 m->m_len = 0; 2134 if (n->m_flags & M_PKTHDR) 2135 M_MOVE_PKTHDR(m, n); 2136 } 2137 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 2138 do { 2139 count = min(min(max(len, max_protohdr), space), n->m_len); 2140 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, count); 2141 len -= count; 2142 m->m_len += count; 2143 n->m_len -= count; 2144 space -= count; 2145 if (n->m_len) 2146 n->m_data += count; 2147 else 2148 n = m_free(n); 2149 } while (len > 0 && n); 2150 if (len > 0) { 2151 m_free(m); 2152 goto bad; 2153 } 2154 m->m_next = n; 2155 return (m); 2156 bad: 2157 m_freem(n); 2158 ++mbstat[mycpu->gd_cpuid].m_mcfail; 2159 return (NULL); 2160 } 2161 2162 /* 2163 * Partition an mbuf chain in two pieces, returning the tail -- 2164 * all but the first len0 bytes. In case of failure, it returns NULL and 2165 * attempts to restore the chain to its original state. 2166 * 2167 * Note that the resulting mbufs might be read-only, because the new 2168 * mbuf can end up sharing an mbuf cluster with the original mbuf if 2169 * the "breaking point" happens to lie within a cluster mbuf. Use the 2170 * M_WRITABLE() macro to check for this case. 2171 */ 2172 struct mbuf * 2173 m_split(struct mbuf *m0, int len0, int wait) 2174 { 2175 struct mbuf *m, *n; 2176 unsigned len = len0, remain; 2177 2178 for (m = m0; m && len > m->m_len; m = m->m_next) 2179 len -= m->m_len; 2180 if (m == NULL) 2181 return (NULL); 2182 remain = m->m_len - len; 2183 if (m0->m_flags & M_PKTHDR) { 2184 n = m_gethdr(wait, m0->m_type); 2185 if (n == NULL) 2186 return (NULL); 2187 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 2188 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 2189 m0->m_pkthdr.len = len0; 2190 if (m->m_flags & M_EXT) 2191 goto extpacket; 2192 if (remain > MHLEN) { 2193 /* m can't be the lead packet */ 2194 MH_ALIGN(n, 0); 2195 n->m_next = m_split(m, len, wait); 2196 if (n->m_next == NULL) { 2197 m_free(n); 2198 return (NULL); 2199 } else { 2200 n->m_len = 0; 2201 return (n); 2202 } 2203 } else 2204 MH_ALIGN(n, remain); 2205 } else if (remain == 0) { 2206 n = m->m_next; 2207 m->m_next = NULL; 2208 return (n); 2209 } else { 2210 n = m_get(wait, m->m_type); 2211 if (n == NULL) 2212 return (NULL); 2213 M_ALIGN(n, remain); 2214 } 2215 extpacket: 2216 if (m->m_flags & M_EXT) { 2217 KKASSERT((n->m_flags & M_EXT) == 0); 2218 n->m_data = m->m_data + len; 2219 m->m_ext.ext_ref(m->m_ext.ext_arg); 2220 n->m_ext = m->m_ext; 2221 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER); 2222 } else { 2223 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 2224 } 2225 n->m_len = remain; 2226 m->m_len = len; 2227 n->m_next = m->m_next; 2228 m->m_next = NULL; 2229 return (n); 2230 } 2231 2232 /* 2233 * Routine to copy from device local memory into mbufs. 2234 * Note: "offset" is ill-defined and always called as 0, so ignore it. 2235 */ 2236 struct mbuf * 2237 m_devget(void *_buf, int len, int offset __unused, struct ifnet *ifp) 2238 { 2239 struct mbuf *m, *mfirst = NULL, **mtail; 2240 caddr_t buf = _buf; 2241 int nsize, flags; 2242 2243 KKASSERT(offset == 0); 2244 mtail = &mfirst; 2245 flags = M_PKTHDR; 2246 2247 while (len > 0) { 2248 m = m_getl(len, M_NOWAIT, MT_DATA, flags, &nsize); 2249 if (m == NULL) { 2250 m_freem(mfirst); 2251 return (NULL); 2252 } 2253 m->m_len = min(len, nsize); 2254 2255 if (flags & M_PKTHDR) { 2256 if (len + max_linkhdr <= nsize) 2257 m->m_data += max_linkhdr; 2258 m->m_pkthdr.rcvif = ifp; 2259 m->m_pkthdr.len = len; 2260 flags = 0; 2261 } 2262 2263 bcopy(buf, m->m_data, m->m_len); 2264 buf += m->m_len; 2265 len -= m->m_len; 2266 *mtail = m; 2267 mtail = &m->m_next; 2268 } 2269 2270 return (mfirst); 2271 } 2272 2273 /* 2274 * Routine to pad mbuf to the specified length 'padto'. 2275 */ 2276 int 2277 m_devpad(struct mbuf *m, int padto) 2278 { 2279 struct mbuf *last = NULL; 2280 int padlen; 2281 2282 if (padto <= m->m_pkthdr.len) 2283 return 0; 2284 2285 padlen = padto - m->m_pkthdr.len; 2286 2287 /* if there's only the packet-header and we can pad there, use it. */ 2288 if (m->m_pkthdr.len == m->m_len && M_TRAILINGSPACE(m) >= padlen) { 2289 last = m; 2290 } else { 2291 /* 2292 * Walk packet chain to find last mbuf. We will either 2293 * pad there, or append a new mbuf and pad it 2294 */ 2295 for (last = m; last->m_next != NULL; last = last->m_next) 2296 ; /* EMPTY */ 2297 2298 /* `last' now points to last in chain. */ 2299 if (M_TRAILINGSPACE(last) < padlen) { 2300 struct mbuf *n; 2301 2302 /* Allocate new empty mbuf, pad it. Compact later. */ 2303 MGET(n, M_NOWAIT, MT_DATA); 2304 if (n == NULL) 2305 return ENOBUFS; 2306 n->m_len = 0; 2307 last->m_next = n; 2308 last = n; 2309 } 2310 } 2311 KKASSERT(M_TRAILINGSPACE(last) >= padlen); 2312 KKASSERT(M_WRITABLE(last)); 2313 2314 /* Now zero the pad area */ 2315 bzero(mtod(last, caddr_t) + last->m_len, padlen); 2316 last->m_len += padlen; 2317 m->m_pkthdr.len += padlen; 2318 return 0; 2319 } 2320 2321 /* 2322 * Copy data from a buffer back into the indicated mbuf chain, 2323 * starting "off" bytes from the beginning, extending the mbuf 2324 * chain if necessary. 2325 * 2326 * Note that m0->m_len may be 0 (e.g., a newly allocated mbuf). 2327 */ 2328 static __inline int 2329 _m_copyback2(struct mbuf *m0, int off, int len, const void *_cp, int how, 2330 boolean_t allow_alloc) 2331 { 2332 struct mbuf *m = m0, *n; 2333 c_caddr_t cp = _cp; 2334 int mlen, tlen, nsize, totlen = 0, error = ENOBUFS; 2335 2336 KASSERT(off >= 0, ("%s: negative off %d", __func__, off)); 2337 KASSERT(len >= 0, ("%s: negative len %d", __func__, len)); 2338 2339 if (m0 == NULL) 2340 return (0); 2341 2342 while (off > m->m_len) { 2343 if (m->m_next == NULL && (tlen = M_TRAILINGSPACE(m)) > 0) { 2344 /* Use the trailing space of the last mbuf. */ 2345 mlen = min(off - m->m_len, tlen); 2346 bzero(mtod(m, caddr_t) + m->m_len, mlen); 2347 m->m_len += mlen; 2348 } 2349 off -= m->m_len; 2350 totlen += m->m_len; 2351 if (m->m_next == NULL) { 2352 if (!allow_alloc) 2353 goto out; 2354 n = m_getl(off + len, how, m->m_type, 0, &nsize); 2355 if (n == NULL) 2356 goto out; 2357 n->m_len = min(nsize, off + len); 2358 bzero(mtod(n, void *), n->m_len); 2359 m->m_next = n; 2360 } 2361 m = m->m_next; 2362 } 2363 while (len > 0) { 2364 if (m->m_next == NULL && 2365 m->m_len < off + len && 2366 (tlen = M_TRAILINGSPACE(m)) > 0) { 2367 /* Use the trailing space of the last mbuf. */ 2368 m->m_len += min(off + len - m->m_len, tlen); 2369 } 2370 mlen = min(m->m_len - off, len); 2371 bcopy(cp, mtod(m, caddr_t) + off, mlen); 2372 off = 0; 2373 cp += mlen; 2374 len -= mlen; 2375 totlen += mlen + off; 2376 if (len == 0) 2377 break; 2378 if (m->m_next == NULL) { 2379 if (!allow_alloc) 2380 goto out; 2381 n = m_getl(len, how, m->m_type, 0, &nsize); 2382 if (n == NULL) 2383 goto out; 2384 n->m_len = min(nsize, len); 2385 m->m_next = n; 2386 } 2387 m = m->m_next; 2388 } 2389 error = 0; 2390 2391 out: 2392 if ((m0->m_flags & M_PKTHDR) && (m0->m_pkthdr.len < totlen)) 2393 m0->m_pkthdr.len = totlen; 2394 2395 return (error); 2396 } 2397 2398 int 2399 m_copyback2(struct mbuf *m0, int off, int len, const void *cp, int how) 2400 { 2401 return _m_copyback2(m0, off, len, cp, how, TRUE); 2402 } 2403 2404 /* 2405 * Similar to m_copyback2() but forbid mbuf expansion. The caller must 2406 * ensure that the mbuf (chain) is big enough; otherwise, the copyback 2407 * would fail with diagnostics printed to the console. 2408 */ 2409 void 2410 m_copyback(struct mbuf *m0, int off, int len, const void *cp) 2411 { 2412 if (_m_copyback2(m0, off, len, cp, 0, FALSE) != 0) { 2413 kprintf("%s: unexpected mbuf expansion required, " 2414 "code path needs to be fixed:\n", __func__); 2415 print_backtrace(8); 2416 } 2417 } 2418 2419 /* 2420 * Append the specified data to the indicated mbuf chain, 2421 * Extend the mbuf chain if the new data does not fit in 2422 * existing space. 2423 * 2424 * Return 1 if able to complete the job; otherwise 0. 2425 */ 2426 int 2427 m_append(struct mbuf *m0, int len, const void *_cp) 2428 { 2429 struct mbuf *m, *n; 2430 c_caddr_t cp = _cp; 2431 int remainder, space; 2432 2433 for (m = m0; m->m_next != NULL; m = m->m_next) 2434 ; 2435 remainder = len; 2436 space = M_TRAILINGSPACE(m); 2437 if (space > 0) { 2438 /* 2439 * Copy into available space. 2440 */ 2441 if (space > remainder) 2442 space = remainder; 2443 bcopy(cp, mtod(m, caddr_t) + m->m_len, space); 2444 m->m_len += space; 2445 cp += space, remainder -= space; 2446 } 2447 while (remainder > 0) { 2448 /* 2449 * Allocate a new mbuf; could check space 2450 * and allocate a cluster instead. 2451 */ 2452 n = m_get(M_NOWAIT, m->m_type); 2453 if (n == NULL) 2454 break; 2455 n->m_len = min(MLEN, remainder); 2456 bcopy(cp, mtod(n, caddr_t), n->m_len); 2457 cp += n->m_len, remainder -= n->m_len; 2458 m->m_next = n; 2459 m = n; 2460 } 2461 if (m0->m_flags & M_PKTHDR) 2462 m0->m_pkthdr.len += len - remainder; 2463 return (remainder == 0); 2464 } 2465 2466 /* 2467 * Apply function f to the data in an mbuf chain starting "off" bytes from 2468 * the beginning, continuing for "len" bytes. 2469 */ 2470 int 2471 m_apply(struct mbuf *m, int off, int len, 2472 int (*f)(void *, void *, u_int), void *arg) 2473 { 2474 u_int count; 2475 int rval; 2476 2477 KASSERT(off >= 0, ("%s: negative off %d", __func__, off)); 2478 KASSERT(len >= 0, ("%s: negative len %d", __func__, len)); 2479 while (off > 0) { 2480 KASSERT(m != NULL, 2481 ("%s: offset > size of mbuf chain", __func__)); 2482 if (off < m->m_len) 2483 break; 2484 off -= m->m_len; 2485 m = m->m_next; 2486 } 2487 while (len > 0) { 2488 KASSERT(m != NULL, 2489 ("%s: offset > size of mbuf chain", __func__)); 2490 count = min(m->m_len - off, len); 2491 rval = (*f)(arg, mtod(m, caddr_t) + off, count); 2492 if (rval) 2493 return (rval); 2494 len -= count; 2495 off = 0; 2496 m = m->m_next; 2497 } 2498 return (0); 2499 } 2500 2501 /* 2502 * Return a pointer to mbuf/offset of location in mbuf chain. 2503 */ 2504 struct mbuf * 2505 m_getptr(struct mbuf *m, int loc, int *off) 2506 { 2507 while (loc >= 0) { 2508 /* Normal end of search. */ 2509 if (m->m_len > loc) { 2510 *off = loc; 2511 return (m); 2512 } else { 2513 loc -= m->m_len; 2514 if (m->m_next == NULL) { 2515 if (loc == 0) { 2516 /* Point at the end of valid data. */ 2517 *off = m->m_len; 2518 return (m); 2519 } 2520 return (NULL); 2521 } 2522 m = m->m_next; 2523 } 2524 } 2525 return (NULL); 2526 } 2527 2528 void 2529 m_print(const struct mbuf *m) 2530 { 2531 int len; 2532 const struct mbuf *m2; 2533 char *hexstr; 2534 2535 len = m->m_pkthdr.len; 2536 m2 = m; 2537 hexstr = kmalloc(HEX_NCPYLEN(len), M_TEMP, M_ZERO | M_WAITOK); 2538 while (len) { 2539 kprintf("%p %s\n", m2, hexncpy(m2->m_data, m2->m_len, hexstr, 2540 HEX_NCPYLEN(m2->m_len), "-")); 2541 len -= m2->m_len; 2542 m2 = m2->m_next; 2543 } 2544 kfree(hexstr, M_TEMP); 2545 return; 2546 } 2547 2548 /* 2549 * "Move" mbuf pkthdr from "from" to "to". 2550 * "from" must have M_PKTHDR set, and "to" must be empty. 2551 */ 2552 void 2553 m_move_pkthdr(struct mbuf *to, struct mbuf *from) 2554 { 2555 KASSERT((to->m_flags & M_PKTHDR), ("%s: not packet header", __func__)); 2556 2557 to->m_flags |= from->m_flags & M_COPYFLAGS; 2558 to->m_pkthdr = from->m_pkthdr; /* especially tags */ 2559 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */ 2560 } 2561 2562 /* 2563 * Duplicate "from"'s mbuf pkthdr in "to". 2564 * "from" must have M_PKTHDR set, and "to" must be empty. 2565 * In particular, this does a deep copy of the packet tags. 2566 */ 2567 int 2568 m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how) 2569 { 2570 KASSERT((to->m_flags & M_PKTHDR), ("%s: not packet header", __func__)); 2571 2572 to->m_flags = (from->m_flags & M_COPYFLAGS) | 2573 (to->m_flags & ~M_COPYFLAGS); 2574 to->m_pkthdr = from->m_pkthdr; 2575 SLIST_INIT(&to->m_pkthdr.tags); 2576 return (m_tag_copy_chain(to, from, how)); 2577 } 2578 2579 /* 2580 * Defragment a mbuf chain, returning the shortest possible 2581 * chain of mbufs and clusters. If allocation fails and 2582 * this cannot be completed, NULL will be returned, but 2583 * the passed in chain will be unchanged. Upon success, 2584 * the original chain will be freed, and the new chain 2585 * will be returned. 2586 * 2587 * If a non-packet header is passed in, the original 2588 * mbuf (chain?) will be returned unharmed. 2589 * 2590 * m_defrag_nofree doesn't free the passed in mbuf. 2591 */ 2592 struct mbuf * 2593 m_defrag(struct mbuf *m0, int how) 2594 { 2595 struct mbuf *m_new; 2596 2597 if ((m_new = m_defrag_nofree(m0, how)) == NULL) 2598 return (NULL); 2599 if (m_new != m0) 2600 m_freem(m0); 2601 return (m_new); 2602 } 2603 2604 struct mbuf * 2605 m_defrag_nofree(struct mbuf *m0, int how) 2606 { 2607 struct mbuf *m_new = NULL, *m_final = NULL; 2608 int progress = 0, length, nsize; 2609 2610 if (!(m0->m_flags & M_PKTHDR)) 2611 return (m0); 2612 2613 #ifdef MBUF_STRESS_TEST 2614 if (m_defragrandomfailures) { 2615 int temp = karc4random() & 0xff; 2616 if (temp == 0xba) 2617 goto nospace; 2618 } 2619 #endif 2620 2621 m_final = m_getl(m0->m_pkthdr.len, how, MT_DATA, M_PKTHDR, &nsize); 2622 if (m_final == NULL) 2623 goto nospace; 2624 m_final->m_len = 0; /* in case m0->m_pkthdr.len is zero */ 2625 2626 if (m_dup_pkthdr(m_final, m0, how) == 0) 2627 goto nospace; 2628 2629 m_new = m_final; 2630 2631 while (progress < m0->m_pkthdr.len) { 2632 length = m0->m_pkthdr.len - progress; 2633 if (length > MCLBYTES) 2634 length = MCLBYTES; 2635 2636 if (m_new == NULL) { 2637 m_new = m_getl(length, how, MT_DATA, 0, &nsize); 2638 if (m_new == NULL) 2639 goto nospace; 2640 } 2641 2642 m_copydata(m0, progress, length, mtod(m_new, void *)); 2643 progress += length; 2644 m_new->m_len = length; 2645 if (m_new != m_final) 2646 m_cat(m_final, m_new); 2647 m_new = NULL; 2648 } 2649 if (m0->m_next == NULL) 2650 m_defraguseless++; 2651 m_defragpackets++; 2652 m_defragbytes += m_final->m_pkthdr.len; 2653 return (m_final); 2654 nospace: 2655 m_defragfailure++; 2656 if (m_new) 2657 m_free(m_new); 2658 m_freem(m_final); 2659 return (NULL); 2660 } 2661 2662 /* 2663 * Move data from uio into mbufs. 2664 */ 2665 struct mbuf * 2666 m_uiomove(struct uio *uio) 2667 { 2668 struct mbuf *m; /* current working mbuf */ 2669 struct mbuf *head = NULL; /* result mbuf chain */ 2670 struct mbuf **mp = &head; 2671 int flags = M_PKTHDR; 2672 int nsize; 2673 int error; 2674 int resid; 2675 2676 do { 2677 if (uio->uio_resid > INT_MAX) 2678 resid = INT_MAX; 2679 else 2680 resid = (int)uio->uio_resid; 2681 m = m_getl(resid, M_WAITOK, MT_DATA, flags, &nsize); 2682 if (flags) { 2683 m->m_pkthdr.len = 0; 2684 /* Leave room for protocol headers. */ 2685 if (resid < MHLEN) 2686 MH_ALIGN(m, resid); 2687 flags = 0; 2688 } 2689 m->m_len = imin(nsize, resid); 2690 error = uiomove(mtod(m, caddr_t), m->m_len, uio); 2691 if (error) { 2692 m_free(m); 2693 goto failed; 2694 } 2695 *mp = m; 2696 mp = &m->m_next; 2697 head->m_pkthdr.len += m->m_len; 2698 } while (uio->uio_resid > 0); 2699 2700 return (head); 2701 2702 failed: 2703 m_freem(head); 2704 return (NULL); 2705 } 2706 2707 struct mbuf * 2708 m_last(struct mbuf *m) 2709 { 2710 while (m->m_next) 2711 m = m->m_next; 2712 return (m); 2713 } 2714 2715 /* 2716 * Return the number of bytes in an mbuf chain. 2717 * If lastm is not NULL, also return the last mbuf. 2718 */ 2719 u_int 2720 m_lengthm(struct mbuf *m, struct mbuf **lastm) 2721 { 2722 u_int len = 0; 2723 struct mbuf *prev = m; 2724 2725 while (m) { 2726 len += m->m_len; 2727 prev = m; 2728 m = m->m_next; 2729 } 2730 if (lastm != NULL) 2731 *lastm = prev; 2732 return (len); 2733 } 2734 2735 /* 2736 * Like m_lengthm(), except also keep track of mbuf usage. 2737 */ 2738 u_int 2739 m_countm(struct mbuf *m, struct mbuf **lastm, u_int *pmbcnt) 2740 { 2741 u_int len = 0, mbcnt = 0; 2742 struct mbuf *prev = m; 2743 2744 while (m) { 2745 len += m->m_len; 2746 mbcnt += MSIZE; 2747 if (m->m_flags & M_EXT) 2748 mbcnt += m->m_ext.ext_size; 2749 prev = m; 2750 m = m->m_next; 2751 } 2752 if (lastm != NULL) 2753 *lastm = prev; 2754 *pmbcnt = mbcnt; 2755 return (len); 2756 } 2757