1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved. 5 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 6 * 7 * This code is derived from software contributed to The DragonFly Project 8 * by Jeffrey M. Hsu. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 36 /* 37 * Copyright (c) 1982, 1986, 1988, 1991, 1993 38 * The Regents of the University of California. All rights reserved. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in the 47 * documentation and/or other materials provided with the distribution. 48 * 3. Neither the name of the University nor the names of its contributors 49 * may be used to endorse or promote products derived from this software 50 * without specific prior written permission. 51 * 52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 62 * SUCH DAMAGE. 63 * 64 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 65 * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $ 66 */ 67 68 #include "opt_param.h" 69 #include "opt_mbuf_stress_test.h" 70 #include <sys/param.h> 71 #include <sys/systm.h> 72 #include <sys/file.h> 73 #include <sys/malloc.h> 74 #include <sys/mbuf.h> 75 #include <sys/kernel.h> 76 #include <sys/sysctl.h> 77 #include <sys/domain.h> 78 #include <sys/objcache.h> 79 #include <sys/tree.h> 80 #include <sys/protosw.h> 81 #include <sys/uio.h> 82 #include <sys/thread.h> 83 #include <sys/proc.h> 84 #include <sys/globaldata.h> 85 86 #include <sys/spinlock2.h> 87 88 #include <machine/atomic.h> 89 #include <machine/limits.h> 90 91 #include <vm/vm.h> 92 #include <vm/vm_kern.h> 93 #include <vm/vm_extern.h> 94 95 #ifdef INVARIANTS 96 #include <machine/cpu.h> 97 #endif 98 99 /* 100 * mbuf cluster meta-data 101 */ 102 struct mbcluster { 103 int32_t mcl_refs; 104 void *mcl_data; 105 }; 106 107 /* 108 * mbuf tracking for debugging purposes 109 */ 110 #ifdef MBUF_DEBUG 111 112 static MALLOC_DEFINE(M_MTRACK, "mtrack", "mtrack"); 113 114 struct mbctrack; 115 RB_HEAD(mbuf_rb_tree, mbtrack); 116 RB_PROTOTYPE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *); 117 118 struct mbtrack { 119 RB_ENTRY(mbtrack) rb_node; 120 int trackid; 121 struct mbuf *m; 122 }; 123 124 static int 125 mbtrack_cmp(struct mbtrack *mb1, struct mbtrack *mb2) 126 { 127 if (mb1->m < mb2->m) 128 return(-1); 129 if (mb1->m > mb2->m) 130 return(1); 131 return(0); 132 } 133 134 RB_GENERATE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *, m); 135 136 struct mbuf_rb_tree mbuf_track_root; 137 static struct spinlock mbuf_track_spin = SPINLOCK_INITIALIZER(mbuf_track_spin, "mbuf_track_spin"); 138 139 static void 140 mbuftrack(struct mbuf *m) 141 { 142 struct mbtrack *mbt; 143 144 mbt = kmalloc(sizeof(*mbt), M_MTRACK, M_INTWAIT|M_ZERO); 145 spin_lock(&mbuf_track_spin); 146 mbt->m = m; 147 if (mbuf_rb_tree_RB_INSERT(&mbuf_track_root, mbt)) { 148 spin_unlock(&mbuf_track_spin); 149 panic("mbuftrack: mbuf %p already being tracked", m); 150 } 151 spin_unlock(&mbuf_track_spin); 152 } 153 154 static void 155 mbufuntrack(struct mbuf *m) 156 { 157 struct mbtrack *mbt; 158 159 spin_lock(&mbuf_track_spin); 160 mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m); 161 if (mbt == NULL) { 162 spin_unlock(&mbuf_track_spin); 163 panic("mbufuntrack: mbuf %p was not tracked", m); 164 } else { 165 mbuf_rb_tree_RB_REMOVE(&mbuf_track_root, mbt); 166 spin_unlock(&mbuf_track_spin); 167 kfree(mbt, M_MTRACK); 168 } 169 } 170 171 void 172 mbuftrackid(struct mbuf *m, int trackid) 173 { 174 struct mbtrack *mbt; 175 struct mbuf *n; 176 177 spin_lock(&mbuf_track_spin); 178 while (m) { 179 n = m->m_nextpkt; 180 while (m) { 181 mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m); 182 if (mbt == NULL) { 183 spin_unlock(&mbuf_track_spin); 184 panic("mbuftrackid: mbuf %p not tracked", m); 185 } 186 mbt->trackid = trackid; 187 m = m->m_next; 188 } 189 m = n; 190 } 191 spin_unlock(&mbuf_track_spin); 192 } 193 194 static int 195 mbuftrack_callback(struct mbtrack *mbt, void *arg) 196 { 197 struct sysctl_req *req = arg; 198 char buf[64]; 199 int error; 200 201 ksnprintf(buf, sizeof(buf), "mbuf %p track %d\n", mbt->m, mbt->trackid); 202 203 spin_unlock(&mbuf_track_spin); 204 error = SYSCTL_OUT(req, buf, strlen(buf)); 205 spin_lock(&mbuf_track_spin); 206 if (error) 207 return(-error); 208 return(0); 209 } 210 211 static int 212 mbuftrack_show(SYSCTL_HANDLER_ARGS) 213 { 214 int error; 215 216 spin_lock(&mbuf_track_spin); 217 error = mbuf_rb_tree_RB_SCAN(&mbuf_track_root, NULL, 218 mbuftrack_callback, req); 219 spin_unlock(&mbuf_track_spin); 220 return (-error); 221 } 222 SYSCTL_PROC(_kern_ipc, OID_AUTO, showmbufs, CTLFLAG_RD|CTLTYPE_STRING, 223 0, 0, mbuftrack_show, "A", "Show all in-use mbufs"); 224 225 #else 226 227 #define mbuftrack(m) 228 #define mbufuntrack(m) 229 230 #endif 231 232 static void mbinit(void *); 233 SYSINIT(mbuf, SI_BOOT2_MACHDEP, SI_ORDER_FIRST, mbinit, NULL); 234 235 struct mbtypes_stat { 236 u_long stats[MT_NTYPES]; 237 } __cachealign; 238 239 static struct mbtypes_stat mbtypes[SMP_MAXCPU]; 240 241 static struct mbstat mbstat[SMP_MAXCPU] __cachealign; 242 int max_linkhdr; 243 int max_protohdr; 244 int max_hdr; 245 int max_datalen; 246 int m_defragpackets; 247 int m_defragbytes; 248 int m_defraguseless; 249 int m_defragfailure; 250 #ifdef MBUF_STRESS_TEST 251 int m_defragrandomfailures; 252 #endif 253 254 struct objcache *mbuf_cache, *mbufphdr_cache; 255 struct objcache *mclmeta_cache, *mjclmeta_cache; 256 struct objcache *mbufcluster_cache, *mbufphdrcluster_cache; 257 struct objcache *mbufjcluster_cache, *mbufphdrjcluster_cache; 258 259 struct lock mbupdate_lk = LOCK_INITIALIZER("mbupdate", 0, LK_CANRECURSE); 260 261 int nmbclusters; 262 static int nmbjclusters; 263 int nmbufs; 264 265 static int mjclph_cachefrac; 266 static int mjcl_cachefrac; 267 static int mclph_cachefrac; 268 static int mcl_cachefrac; 269 270 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW, 271 &max_linkhdr, 0, "Max size of a link-level header"); 272 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW, 273 &max_protohdr, 0, "Max size of a protocol header"); 274 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, 275 "Max size of link+protocol headers"); 276 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW, 277 &max_datalen, 0, "Max data payload size without headers"); 278 SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW, 279 &mbuf_wait, 0, "Time in ticks to sleep after failed mbuf allocations"); 280 static int do_mbstat(SYSCTL_HANDLER_ARGS); 281 282 SYSCTL_PROC(_kern_ipc, KIPC_MBSTAT, mbstat, CTLTYPE_STRUCT|CTLFLAG_RD, 283 0, 0, do_mbstat, "S,mbstat", "mbuf usage statistics"); 284 285 static int do_mbtypes(SYSCTL_HANDLER_ARGS); 286 287 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbtypes, CTLTYPE_ULONG|CTLFLAG_RD, 288 0, 0, do_mbtypes, "LU", ""); 289 290 static int 291 do_mbstat(SYSCTL_HANDLER_ARGS) 292 { 293 struct mbstat mbstat_total; 294 struct mbstat *mbstat_totalp; 295 int i; 296 297 bzero(&mbstat_total, sizeof(mbstat_total)); 298 mbstat_totalp = &mbstat_total; 299 300 for (i = 0; i < ncpus; i++) { 301 mbstat_total.m_mbufs += mbstat[i].m_mbufs; 302 mbstat_total.m_clusters += mbstat[i].m_clusters; 303 mbstat_total.m_jclusters += mbstat[i].m_jclusters; 304 mbstat_total.m_clfree += mbstat[i].m_clfree; 305 mbstat_total.m_drops += mbstat[i].m_drops; 306 mbstat_total.m_wait += mbstat[i].m_wait; 307 mbstat_total.m_drain += mbstat[i].m_drain; 308 mbstat_total.m_mcfail += mbstat[i].m_mcfail; 309 mbstat_total.m_mpfail += mbstat[i].m_mpfail; 310 311 } 312 /* 313 * The following fields are not cumulative fields so just 314 * get their values once. 315 */ 316 mbstat_total.m_msize = mbstat[0].m_msize; 317 mbstat_total.m_mclbytes = mbstat[0].m_mclbytes; 318 mbstat_total.m_minclsize = mbstat[0].m_minclsize; 319 mbstat_total.m_mlen = mbstat[0].m_mlen; 320 mbstat_total.m_mhlen = mbstat[0].m_mhlen; 321 322 return(sysctl_handle_opaque(oidp, mbstat_totalp, sizeof(mbstat_total), req)); 323 } 324 325 static int 326 do_mbtypes(SYSCTL_HANDLER_ARGS) 327 { 328 u_long totals[MT_NTYPES]; 329 int i, j; 330 331 for (i = 0; i < MT_NTYPES; i++) 332 totals[i] = 0; 333 334 for (i = 0; i < ncpus; i++) { 335 for (j = 0; j < MT_NTYPES; j++) 336 totals[j] += mbtypes[i].stats[j]; 337 } 338 339 return(sysctl_handle_opaque(oidp, totals, sizeof(totals), req)); 340 } 341 342 /* 343 * The variables may be set as boot-time tunables or live. Setting these 344 * values too low can deadlock your network. Network interfaces may also 345 * adjust nmbclusters and/or nmbjclusters to account for preloading the 346 * hardware rings. 347 */ 348 static int sysctl_nmbclusters(SYSCTL_HANDLER_ARGS); 349 static int sysctl_nmbjclusters(SYSCTL_HANDLER_ARGS); 350 static int sysctl_nmbufs(SYSCTL_HANDLER_ARGS); 351 SYSCTL_PROC(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLTYPE_INT | CTLFLAG_RW, 352 0, 0, sysctl_nmbclusters, "I", 353 "Maximum number of mbuf clusters available"); 354 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjclusters, CTLTYPE_INT | CTLFLAG_RW, 355 0, 0, sysctl_nmbjclusters, "I", 356 "Maximum number of mbuf jclusters available"); 357 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbufs, CTLTYPE_INT | CTLFLAG_RW, 358 0, 0, sysctl_nmbufs, "I", 359 "Maximum number of mbufs available"); 360 361 SYSCTL_INT(_kern_ipc, OID_AUTO, mjclph_cachefrac, CTLFLAG_RD, 362 &mjclph_cachefrac, 0, 363 "Fraction of cacheable mbuf jclusters w/ pkthdr"); 364 SYSCTL_INT(_kern_ipc, OID_AUTO, mjcl_cachefrac, CTLFLAG_RD, 365 &mjcl_cachefrac, 0, 366 "Fraction of cacheable mbuf jclusters"); 367 SYSCTL_INT(_kern_ipc, OID_AUTO, mclph_cachefrac, CTLFLAG_RD, 368 &mclph_cachefrac, 0, 369 "Fraction of cacheable mbuf clusters w/ pkthdr"); 370 SYSCTL_INT(_kern_ipc, OID_AUTO, mcl_cachefrac, CTLFLAG_RD, 371 &mcl_cachefrac, 0, "Fraction of cacheable mbuf clusters"); 372 373 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD, 374 &m_defragpackets, 0, "Number of defragment packets"); 375 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD, 376 &m_defragbytes, 0, "Number of defragment bytes"); 377 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD, 378 &m_defraguseless, 0, "Number of useless defragment mbuf chain operations"); 379 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD, 380 &m_defragfailure, 0, "Number of failed defragment mbuf chain operations"); 381 #ifdef MBUF_STRESS_TEST 382 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW, 383 &m_defragrandomfailures, 0, ""); 384 #endif 385 386 static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf"); 387 static MALLOC_DEFINE(M_MBUFCL, "mbufcl", "mbufcl"); 388 static MALLOC_DEFINE(M_MCLMETA, "mclmeta", "mclmeta"); 389 390 static void m_reclaim (void); 391 static void m_mclref(void *arg); 392 static void m_mclfree(void *arg); 393 static void m_mjclfree(void *arg); 394 395 static void mbupdatelimits(void); 396 397 /* 398 * Generally scale default mbufs to maxproc. 399 * 400 * NOTE: Default NMBUFS must take into account a possible DOS attack 401 * using fd passing on unix domain sockets. 402 */ 403 #ifndef NMBCLUSTERS 404 #define NMBCLUSTERS (512 + maxproc * 4) 405 #endif 406 #ifndef BASE_CACHEFRAC 407 #define BASE_CACHEFRAC 16 408 #endif 409 #ifndef MJCLPH_CACHEFRAC 410 #define MJCLPH_CACHEFRAC (BASE_CACHEFRAC * 2) 411 #endif 412 #ifndef MJCL_CACHEFRAC 413 #define MJCL_CACHEFRAC (BASE_CACHEFRAC * 2) 414 #endif 415 #ifndef MCLPH_CACHEFRAC 416 #define MCLPH_CACHEFRAC (BASE_CACHEFRAC * 2) 417 #endif 418 #ifndef MCL_CACHEFRAC 419 #define MCL_CACHEFRAC (BASE_CACHEFRAC * 2) 420 #endif 421 #ifndef NMBJCLUSTERS 422 #define NMBJCLUSTERS (NMBCLUSTERS / 4) 423 #endif 424 #ifndef NMBUFS 425 #define NMBUFS (nmbclusters / 2 + maxfiles) 426 #endif 427 428 #define NMBCLUSTERS_MIN (NMBCLUSTERS / 2) 429 #define NMBJCLUSTERS_MIN (NMBJCLUSTERS / 2) 430 #define NMBUFS_MIN (NMBUFS / 2) 431 432 /* 433 * Perform sanity checks of tunables declared above. 434 */ 435 static void 436 tunable_mbinit(void *dummy) 437 { 438 /* 439 * This has to be done before VM init. 440 */ 441 nmbclusters = NMBCLUSTERS; 442 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters); 443 mjclph_cachefrac = MJCLPH_CACHEFRAC; 444 TUNABLE_INT_FETCH("kern.ipc.mjclph_cachefrac", &mjclph_cachefrac); 445 mjcl_cachefrac = MJCL_CACHEFRAC; 446 TUNABLE_INT_FETCH("kern.ipc.mjcl_cachefrac", &mjcl_cachefrac); 447 mclph_cachefrac = MCLPH_CACHEFRAC; 448 TUNABLE_INT_FETCH("kern.ipc.mclph_cachefrac", &mclph_cachefrac); 449 mcl_cachefrac = MCL_CACHEFRAC; 450 TUNABLE_INT_FETCH("kern.ipc.mcl_cachefrac", &mcl_cachefrac); 451 452 /* 453 * WARNING! each mcl cache feeds two mbuf caches, so the minimum 454 * cachefrac is 2. For safety, use 3. 455 */ 456 if (mjclph_cachefrac < 3) 457 mjclph_cachefrac = 3; 458 if (mjcl_cachefrac < 3) 459 mjcl_cachefrac = 3; 460 if (mclph_cachefrac < 3) 461 mclph_cachefrac = 3; 462 if (mcl_cachefrac < 3) 463 mcl_cachefrac = 3; 464 465 nmbjclusters = NMBJCLUSTERS; 466 TUNABLE_INT_FETCH("kern.ipc.nmbjclusters", &nmbjclusters); 467 468 nmbufs = NMBUFS; 469 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs); 470 471 /* Sanity checks */ 472 if (nmbufs < nmbclusters * 2) 473 nmbufs = nmbclusters * 2; 474 } 475 SYSINIT(tunable_mbinit, SI_BOOT1_TUNABLES, SI_ORDER_ANY, 476 tunable_mbinit, NULL); 477 478 static void 479 mbinclimit(int *limit, int inc, int minlim) 480 { 481 int new_limit; 482 483 lockmgr(&mbupdate_lk, LK_EXCLUSIVE); 484 485 new_limit = *limit + inc; 486 if (new_limit < minlim) 487 new_limit = minlim; 488 489 if (*limit != new_limit) { 490 *limit = new_limit; 491 mbupdatelimits(); 492 } 493 494 lockmgr(&mbupdate_lk, LK_RELEASE); 495 } 496 497 static int 498 mbsetlimit(int *limit, int new_limit, int minlim) 499 { 500 if (new_limit < minlim) 501 return EINVAL; 502 503 lockmgr(&mbupdate_lk, LK_EXCLUSIVE); 504 mbinclimit(limit, new_limit - *limit, minlim); 505 lockmgr(&mbupdate_lk, LK_RELEASE); 506 return 0; 507 } 508 509 static int 510 sysctl_mblimit(SYSCTL_HANDLER_ARGS, int *limit, int minlim) 511 { 512 int error, value; 513 514 value = *limit; 515 error = sysctl_handle_int(oidp, &value, 0, req); 516 if (error || req->newptr == NULL) 517 return error; 518 519 return mbsetlimit(limit, value, minlim); 520 } 521 522 /* 523 * Sysctl support to update nmbclusters, nmbjclusters, and nmbufs. 524 */ 525 static int 526 sysctl_nmbclusters(SYSCTL_HANDLER_ARGS) 527 { 528 return sysctl_mblimit(oidp, arg1, arg2, req, &nmbclusters, 529 NMBCLUSTERS_MIN); 530 } 531 532 static int 533 sysctl_nmbjclusters(SYSCTL_HANDLER_ARGS) 534 { 535 return sysctl_mblimit(oidp, arg1, arg2, req, &nmbjclusters, 536 NMBJCLUSTERS_MIN); 537 } 538 539 static int 540 sysctl_nmbufs(SYSCTL_HANDLER_ARGS) 541 { 542 return sysctl_mblimit(oidp, arg1, arg2, req, &nmbufs, NMBUFS_MIN); 543 } 544 545 void 546 mcl_inclimit(int inc) 547 { 548 mbinclimit(&nmbclusters, inc, NMBCLUSTERS_MIN); 549 } 550 551 void 552 mjcl_inclimit(int inc) 553 { 554 mbinclimit(&nmbjclusters, inc, NMBJCLUSTERS_MIN); 555 } 556 557 void 558 mb_inclimit(int inc) 559 { 560 mbinclimit(&nmbufs, inc, NMBUFS_MIN); 561 } 562 563 /* "number of clusters of pages" */ 564 #define NCL_INIT 1 565 566 #define NMB_INIT 16 567 568 /* 569 * The mbuf object cache only guarantees that m_next and m_nextpkt are 570 * NULL and that m_data points to the beginning of the data area. In 571 * particular, m_len and m_pkthdr.len are uninitialized. It is the 572 * responsibility of the caller to initialize those fields before use. 573 */ 574 static __inline boolean_t 575 mbuf_ctor(void *obj, void *private, int ocflags) 576 { 577 struct mbuf *m = obj; 578 579 m->m_next = NULL; 580 m->m_nextpkt = NULL; 581 m->m_data = m->m_dat; 582 m->m_flags = 0; 583 584 return (TRUE); 585 } 586 587 /* 588 * Initialize the mbuf and the packet header fields. 589 */ 590 static boolean_t 591 mbufphdr_ctor(void *obj, void *private, int ocflags) 592 { 593 struct mbuf *m = obj; 594 595 m->m_next = NULL; 596 m->m_nextpkt = NULL; 597 m->m_data = m->m_pktdat; 598 m->m_flags = M_PKTHDR | M_PHCACHE; 599 600 m->m_pkthdr.rcvif = NULL; /* eliminate XXX JH */ 601 SLIST_INIT(&m->m_pkthdr.tags); 602 m->m_pkthdr.csum_flags = 0; /* eliminate XXX JH */ 603 m->m_pkthdr.fw_flags = 0; /* eliminate XXX JH */ 604 605 return (TRUE); 606 } 607 608 /* 609 * A mbcluster object consists of 2K (MCLBYTES) cluster and a refcount. 610 */ 611 static boolean_t 612 mclmeta_ctor(void *obj, void *private, int ocflags) 613 { 614 struct mbcluster *cl = obj; 615 void *buf; 616 617 if (ocflags & M_NOWAIT) 618 buf = kmalloc(MCLBYTES, M_MBUFCL, M_NOWAIT | M_ZERO); 619 else 620 buf = kmalloc(MCLBYTES, M_MBUFCL, M_INTWAIT | M_ZERO); 621 if (buf == NULL) 622 return (FALSE); 623 cl->mcl_refs = 0; 624 cl->mcl_data = buf; 625 return (TRUE); 626 } 627 628 static boolean_t 629 mjclmeta_ctor(void *obj, void *private, int ocflags) 630 { 631 struct mbcluster *cl = obj; 632 void *buf; 633 634 if (ocflags & M_NOWAIT) 635 buf = kmalloc(MJUMPAGESIZE, M_MBUFCL, M_NOWAIT | M_ZERO); 636 else 637 buf = kmalloc(MJUMPAGESIZE, M_MBUFCL, M_INTWAIT | M_ZERO); 638 if (buf == NULL) 639 return (FALSE); 640 cl->mcl_refs = 0; 641 cl->mcl_data = buf; 642 return (TRUE); 643 } 644 645 static void 646 mclmeta_dtor(void *obj, void *private) 647 { 648 struct mbcluster *mcl = obj; 649 650 KKASSERT(mcl->mcl_refs == 0); 651 kfree(mcl->mcl_data, M_MBUFCL); 652 } 653 654 static void 655 linkjcluster(struct mbuf *m, struct mbcluster *cl, uint size) 656 { 657 /* 658 * Add the cluster to the mbuf. The caller will detect that the 659 * mbuf now has an attached cluster. 660 */ 661 m->m_ext.ext_arg = cl; 662 m->m_ext.ext_buf = cl->mcl_data; 663 m->m_ext.ext_ref = m_mclref; 664 if (size != MCLBYTES) 665 m->m_ext.ext_free = m_mjclfree; 666 else 667 m->m_ext.ext_free = m_mclfree; 668 m->m_ext.ext_size = size; 669 atomic_add_int(&cl->mcl_refs, 1); 670 671 m->m_data = m->m_ext.ext_buf; 672 m->m_flags |= M_EXT | M_EXT_CLUSTER; 673 } 674 675 static void 676 linkcluster(struct mbuf *m, struct mbcluster *cl) 677 { 678 linkjcluster(m, cl, MCLBYTES); 679 } 680 681 static boolean_t 682 mbufphdrcluster_ctor(void *obj, void *private, int ocflags) 683 { 684 struct mbuf *m = obj; 685 struct mbcluster *cl; 686 687 mbufphdr_ctor(obj, private, ocflags); 688 cl = objcache_get(mclmeta_cache, ocflags); 689 if (cl == NULL) { 690 ++mbstat[mycpu->gd_cpuid].m_drops; 691 return (FALSE); 692 } 693 m->m_flags |= M_CLCACHE; 694 linkcluster(m, cl); 695 return (TRUE); 696 } 697 698 static boolean_t 699 mbufphdrjcluster_ctor(void *obj, void *private, int ocflags) 700 { 701 struct mbuf *m = obj; 702 struct mbcluster *cl; 703 704 mbufphdr_ctor(obj, private, ocflags); 705 cl = objcache_get(mjclmeta_cache, ocflags); 706 if (cl == NULL) { 707 ++mbstat[mycpu->gd_cpuid].m_drops; 708 return (FALSE); 709 } 710 m->m_flags |= M_CLCACHE; 711 linkjcluster(m, cl, MJUMPAGESIZE); 712 return (TRUE); 713 } 714 715 static boolean_t 716 mbufcluster_ctor(void *obj, void *private, int ocflags) 717 { 718 struct mbuf *m = obj; 719 struct mbcluster *cl; 720 721 mbuf_ctor(obj, private, ocflags); 722 cl = objcache_get(mclmeta_cache, ocflags); 723 if (cl == NULL) { 724 ++mbstat[mycpu->gd_cpuid].m_drops; 725 return (FALSE); 726 } 727 m->m_flags |= M_CLCACHE; 728 linkcluster(m, cl); 729 return (TRUE); 730 } 731 732 static boolean_t 733 mbufjcluster_ctor(void *obj, void *private, int ocflags) 734 { 735 struct mbuf *m = obj; 736 struct mbcluster *cl; 737 738 mbuf_ctor(obj, private, ocflags); 739 cl = objcache_get(mjclmeta_cache, ocflags); 740 if (cl == NULL) { 741 ++mbstat[mycpu->gd_cpuid].m_drops; 742 return (FALSE); 743 } 744 m->m_flags |= M_CLCACHE; 745 linkjcluster(m, cl, MJUMPAGESIZE); 746 return (TRUE); 747 } 748 749 /* 750 * Used for both the cluster and cluster PHDR caches. 751 * 752 * The mbuf may have lost its cluster due to sharing, deal 753 * with the situation by checking M_EXT. 754 */ 755 static void 756 mbufcluster_dtor(void *obj, void *private) 757 { 758 struct mbuf *m = obj; 759 struct mbcluster *mcl; 760 761 if (m->m_flags & M_EXT) { 762 KKASSERT((m->m_flags & M_EXT_CLUSTER) != 0); 763 mcl = m->m_ext.ext_arg; 764 KKASSERT(mcl->mcl_refs == 1); 765 mcl->mcl_refs = 0; 766 if (m->m_flags & M_EXT && m->m_ext.ext_size != MCLBYTES) 767 objcache_put(mjclmeta_cache, mcl); 768 else 769 objcache_put(mclmeta_cache, mcl); 770 } 771 } 772 773 struct objcache_malloc_args mbuf_malloc_args = { MSIZE, M_MBUF }; 774 struct objcache_malloc_args mclmeta_malloc_args = 775 { sizeof(struct mbcluster), M_MCLMETA }; 776 777 /* ARGSUSED*/ 778 static void 779 mbinit(void *dummy) 780 { 781 int mb_limit, cl_limit, ncl_limit, jcl_limit; 782 int limit; 783 int i; 784 785 /* 786 * Initialize statistics 787 */ 788 for (i = 0; i < ncpus; i++) { 789 mbstat[i].m_msize = MSIZE; 790 mbstat[i].m_mclbytes = MCLBYTES; 791 mbstat[i].m_mjumpagesize = MJUMPAGESIZE; 792 mbstat[i].m_minclsize = MINCLSIZE; 793 mbstat[i].m_mlen = MLEN; 794 mbstat[i].m_mhlen = MHLEN; 795 } 796 797 /* 798 * Create object caches and save cluster limits, which will 799 * be used to adjust backing kmalloc pools' limit later. 800 */ 801 802 mb_limit = cl_limit = 0; 803 804 limit = nmbufs; 805 mbuf_cache = objcache_create("mbuf", 806 limit, nmbufs / BASE_CACHEFRAC, 807 mbuf_ctor, NULL, NULL, 808 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); 809 mb_limit += limit; 810 811 limit = nmbufs; 812 mbufphdr_cache = objcache_create("mbuf pkthdr", 813 limit, nmbufs / BASE_CACHEFRAC, 814 mbufphdr_ctor, NULL, NULL, 815 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); 816 mb_limit += limit; 817 818 ncl_limit = nmbclusters; 819 mclmeta_cache = objcache_create("mbuf cluster", 820 ncl_limit, nmbclusters / BASE_CACHEFRAC, 821 mclmeta_ctor, mclmeta_dtor, NULL, 822 objcache_malloc_alloc, objcache_malloc_free, &mclmeta_malloc_args); 823 cl_limit += ncl_limit; 824 825 jcl_limit = nmbjclusters; 826 mjclmeta_cache = objcache_create("mbuf jcluster", 827 jcl_limit, nmbjclusters / BASE_CACHEFRAC, 828 mjclmeta_ctor, mclmeta_dtor, NULL, 829 objcache_malloc_alloc, objcache_malloc_free, &mclmeta_malloc_args); 830 cl_limit += jcl_limit; 831 832 limit = nmbclusters; 833 mbufcluster_cache = objcache_create("mbuf+cl", 834 limit, nmbclusters / mcl_cachefrac, 835 mbufcluster_ctor, mbufcluster_dtor, NULL, 836 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); 837 mb_limit += limit; 838 839 limit = nmbclusters; 840 mbufphdrcluster_cache = objcache_create("mbuf pkthdr+cl", 841 limit, nmbclusters / mclph_cachefrac, 842 mbufphdrcluster_ctor, mbufcluster_dtor, NULL, 843 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); 844 mb_limit += limit; 845 846 limit = nmbjclusters; 847 mbufjcluster_cache = objcache_create("mbuf+jcl", 848 limit, nmbjclusters / mjcl_cachefrac, 849 mbufjcluster_ctor, mbufcluster_dtor, NULL, 850 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); 851 mb_limit += limit; 852 853 limit = nmbjclusters; 854 mbufphdrjcluster_cache = objcache_create("mbuf pkthdr+jcl", 855 limit, nmbjclusters / mjclph_cachefrac, 856 mbufphdrjcluster_ctor, mbufcluster_dtor, NULL, 857 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); 858 mb_limit += limit; 859 860 /* 861 * Adjust backing kmalloc pools' limit 862 * 863 * NOTE: We raise the limit by another 1/8 to take the effect 864 * of loosememuse into account. 865 */ 866 cl_limit += cl_limit / 8; 867 kmalloc_raise_limit(mclmeta_malloc_args.mtype, 868 mclmeta_malloc_args.objsize * (size_t)cl_limit); 869 kmalloc_raise_limit(M_MBUFCL, 870 (MCLBYTES * (size_t)ncl_limit) + 871 (MJUMPAGESIZE * (size_t)jcl_limit)); 872 873 mb_limit += mb_limit / 8; 874 kmalloc_raise_limit(mbuf_malloc_args.mtype, 875 mbuf_malloc_args.objsize * (size_t)mb_limit); 876 } 877 878 /* 879 * Adjust mbuf limits after changes have been made 880 * 881 * Caller must hold mbupdate_lk 882 */ 883 static void 884 mbupdatelimits(void) 885 { 886 int mb_limit, cl_limit, ncl_limit, jcl_limit; 887 int limit; 888 889 KASSERT(lockstatus(&mbupdate_lk, curthread) != 0, 890 ("mbupdate_lk is not held")); 891 892 /* 893 * Figure out adjustments to object caches after nmbufs, nmbclusters, 894 * or nmbjclusters has been modified. 895 */ 896 mb_limit = cl_limit = 0; 897 898 limit = nmbufs; 899 objcache_set_cluster_limit(mbuf_cache, limit); 900 mb_limit += limit; 901 902 limit = nmbufs; 903 objcache_set_cluster_limit(mbufphdr_cache, limit); 904 mb_limit += limit; 905 906 ncl_limit = nmbclusters; 907 objcache_set_cluster_limit(mclmeta_cache, ncl_limit); 908 cl_limit += ncl_limit; 909 910 jcl_limit = nmbjclusters; 911 objcache_set_cluster_limit(mjclmeta_cache, jcl_limit); 912 cl_limit += jcl_limit; 913 914 limit = nmbclusters; 915 objcache_set_cluster_limit(mbufcluster_cache, limit); 916 mb_limit += limit; 917 918 limit = nmbclusters; 919 objcache_set_cluster_limit(mbufphdrcluster_cache, limit); 920 mb_limit += limit; 921 922 limit = nmbjclusters; 923 objcache_set_cluster_limit(mbufjcluster_cache, limit); 924 mb_limit += limit; 925 926 limit = nmbjclusters; 927 objcache_set_cluster_limit(mbufphdrjcluster_cache, limit); 928 mb_limit += limit; 929 930 /* 931 * Adjust backing kmalloc pools' limit 932 * 933 * NOTE: We raise the limit by another 1/8 to take the effect 934 * of loosememuse into account. 935 */ 936 cl_limit += cl_limit / 8; 937 kmalloc_raise_limit(mclmeta_malloc_args.mtype, 938 mclmeta_malloc_args.objsize * (size_t)cl_limit); 939 kmalloc_raise_limit(M_MBUFCL, 940 (MCLBYTES * (size_t)ncl_limit) + 941 (MJUMPAGESIZE * (size_t)jcl_limit)); 942 mb_limit += mb_limit / 8; 943 kmalloc_raise_limit(mbuf_malloc_args.mtype, 944 mbuf_malloc_args.objsize * (size_t)mb_limit); 945 } 946 947 /* 948 * Return the number of references to this mbuf's data. 0 is returned 949 * if the mbuf is not M_EXT, a reference count is returned if it is 950 * M_EXT | M_EXT_CLUSTER, and 99 is returned if it is a special M_EXT. 951 */ 952 int 953 m_sharecount(struct mbuf *m) 954 { 955 switch (m->m_flags & (M_EXT | M_EXT_CLUSTER)) { 956 case 0: 957 return (0); 958 case M_EXT: 959 return (99); 960 case M_EXT | M_EXT_CLUSTER: 961 return (((struct mbcluster *)m->m_ext.ext_arg)->mcl_refs); 962 } 963 /* NOTREACHED */ 964 return (0); /* to shut up compiler */ 965 } 966 967 /* 968 * change mbuf to new type 969 */ 970 void 971 m_chtype(struct mbuf *m, int type) 972 { 973 struct globaldata *gd = mycpu; 974 975 ++mbtypes[gd->gd_cpuid].stats[type]; 976 --mbtypes[gd->gd_cpuid].stats[m->m_type]; 977 m->m_type = type; 978 } 979 980 static void 981 m_reclaim(void) 982 { 983 struct domain *dp; 984 struct protosw *pr; 985 986 kprintf("Debug: m_reclaim() called\n"); 987 988 SLIST_FOREACH(dp, &domains, dom_next) { 989 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) { 990 if (pr->pr_drain) 991 (*pr->pr_drain)(); 992 } 993 } 994 ++mbstat[mycpu->gd_cpuid].m_drain; 995 } 996 997 static __inline void 998 updatestats(struct mbuf *m, int type) 999 { 1000 struct globaldata *gd = mycpu; 1001 1002 m->m_type = type; 1003 mbuftrack(m); 1004 #ifdef MBUF_DEBUG 1005 KASSERT(m->m_next == NULL, ("mbuf %p: bad m_next in get", m)); 1006 KASSERT(m->m_nextpkt == NULL, ("mbuf %p: bad m_nextpkt in get", m)); 1007 #endif 1008 1009 ++mbtypes[gd->gd_cpuid].stats[type]; 1010 ++mbstat[gd->gd_cpuid].m_mbufs; 1011 1012 } 1013 1014 /* 1015 * Allocate an mbuf. 1016 */ 1017 struct mbuf * 1018 m_get(int how, int type) 1019 { 1020 struct mbuf *m; 1021 int ntries = 0; 1022 int ocf = MB_OCFLAG(how); 1023 1024 retryonce: 1025 1026 m = objcache_get(mbuf_cache, ocf); 1027 1028 if (m == NULL) { 1029 if ((ocf & M_WAITOK) && ntries++ == 0) { 1030 struct objcache *reclaimlist[] = { 1031 mbufphdr_cache, 1032 mbufcluster_cache, 1033 mbufphdrcluster_cache, 1034 mbufjcluster_cache, 1035 mbufphdrjcluster_cache 1036 }; 1037 const int nreclaims = NELEM(reclaimlist); 1038 1039 if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf)) 1040 m_reclaim(); 1041 goto retryonce; 1042 } 1043 ++mbstat[mycpu->gd_cpuid].m_drops; 1044 return (NULL); 1045 } 1046 #ifdef MBUF_DEBUG 1047 KASSERT(m->m_data == m->m_dat, ("mbuf %p: bad m_data in get", m)); 1048 #endif 1049 m->m_len = 0; 1050 1051 updatestats(m, type); 1052 return (m); 1053 } 1054 1055 struct mbuf * 1056 m_gethdr(int how, int type) 1057 { 1058 struct mbuf *m; 1059 int ocf = MB_OCFLAG(how); 1060 int ntries = 0; 1061 1062 retryonce: 1063 1064 m = objcache_get(mbufphdr_cache, ocf); 1065 1066 if (m == NULL) { 1067 if ((ocf & M_WAITOK) && ntries++ == 0) { 1068 struct objcache *reclaimlist[] = { 1069 mbuf_cache, 1070 mbufcluster_cache, mbufphdrcluster_cache, 1071 mbufjcluster_cache, mbufphdrjcluster_cache 1072 }; 1073 const int nreclaims = NELEM(reclaimlist); 1074 1075 if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf)) 1076 m_reclaim(); 1077 goto retryonce; 1078 } 1079 ++mbstat[mycpu->gd_cpuid].m_drops; 1080 return (NULL); 1081 } 1082 #ifdef MBUF_DEBUG 1083 KASSERT(m->m_data == m->m_pktdat, ("mbuf %p: bad m_data in get", m)); 1084 #endif 1085 m->m_len = 0; 1086 m->m_pkthdr.len = 0; 1087 1088 updatestats(m, type); 1089 return (m); 1090 } 1091 1092 /* 1093 * Get a mbuf (not a mbuf cluster!) and zero it. 1094 * Deprecated. 1095 */ 1096 struct mbuf * 1097 m_getclr(int how, int type) 1098 { 1099 struct mbuf *m; 1100 1101 m = m_get(how, type); 1102 if (m != NULL) 1103 bzero(m->m_data, MLEN); 1104 return (m); 1105 } 1106 1107 static struct mbuf * 1108 m_getcl_cache(int how, short type, int flags, struct objcache *mbclc, 1109 struct objcache *mbphclc, u_long *cl_stats) 1110 { 1111 struct mbuf *m = NULL; 1112 int ocflags = MB_OCFLAG(how); 1113 int ntries = 0; 1114 1115 retryonce: 1116 1117 if (flags & M_PKTHDR) 1118 m = objcache_get(mbphclc, ocflags); 1119 else 1120 m = objcache_get(mbclc, ocflags); 1121 1122 if (m == NULL) { 1123 if ((ocflags & M_WAITOK) && ntries++ == 0) { 1124 struct objcache *reclaimlist[1]; 1125 1126 if (flags & M_PKTHDR) 1127 reclaimlist[0] = mbclc; 1128 else 1129 reclaimlist[0] = mbphclc; 1130 if (!objcache_reclaimlist(reclaimlist, 1, ocflags)) 1131 m_reclaim(); 1132 goto retryonce; 1133 } 1134 ++mbstat[mycpu->gd_cpuid].m_drops; 1135 return (NULL); 1136 } 1137 1138 #ifdef MBUF_DEBUG 1139 KASSERT(m->m_data == m->m_ext.ext_buf, 1140 ("mbuf %p: bad m_data in get", m)); 1141 #endif 1142 m->m_type = type; 1143 m->m_len = 0; 1144 m->m_pkthdr.len = 0; /* just do it unconditonally */ 1145 1146 mbuftrack(m); 1147 1148 ++mbtypes[mycpu->gd_cpuid].stats[type]; 1149 ++(*cl_stats); 1150 return (m); 1151 } 1152 1153 struct mbuf * 1154 m_getjcl(int how, short type, int flags, size_t size) 1155 { 1156 struct objcache *mbclc, *mbphclc; 1157 u_long *cl_stats; 1158 1159 switch (size) { 1160 case MCLBYTES: 1161 mbclc = mbufcluster_cache; 1162 mbphclc = mbufphdrcluster_cache; 1163 cl_stats = &mbstat[mycpu->gd_cpuid].m_clusters; 1164 break; 1165 1166 default: 1167 mbclc = mbufjcluster_cache; 1168 mbphclc = mbufphdrjcluster_cache; 1169 cl_stats = &mbstat[mycpu->gd_cpuid].m_jclusters; 1170 break; 1171 } 1172 return m_getcl_cache(how, type, flags, mbclc, mbphclc, cl_stats); 1173 } 1174 1175 /* 1176 * Returns an mbuf with an attached cluster. 1177 * Because many network drivers use this kind of buffers a lot, it is 1178 * convenient to keep a small pool of free buffers of this kind. 1179 * Even a small size such as 10 gives about 10% improvement in the 1180 * forwarding rate in a bridge or router. 1181 */ 1182 struct mbuf * 1183 m_getcl(int how, short type, int flags) 1184 { 1185 return m_getcl_cache(how, type, flags, 1186 mbufcluster_cache, mbufphdrcluster_cache, 1187 &mbstat[mycpu->gd_cpuid].m_clusters); 1188 } 1189 1190 /* 1191 * Allocate chain of requested length. 1192 */ 1193 struct mbuf * 1194 m_getc(int len, int how, int type) 1195 { 1196 struct mbuf *n, *nfirst = NULL, **ntail = &nfirst; 1197 int nsize; 1198 1199 while (len > 0) { 1200 n = m_getl(len, how, type, 0, &nsize); 1201 if (n == NULL) 1202 goto failed; 1203 n->m_len = 0; 1204 *ntail = n; 1205 ntail = &n->m_next; 1206 len -= nsize; 1207 } 1208 return (nfirst); 1209 1210 failed: 1211 m_freem(nfirst); 1212 return (NULL); 1213 } 1214 1215 /* 1216 * Allocate len-worth of mbufs and/or mbuf clusters (whatever fits best) 1217 * and return a pointer to the head of the allocated chain. If m0 is 1218 * non-null, then we assume that it is a single mbuf or an mbuf chain to 1219 * which we want len bytes worth of mbufs and/or clusters attached, and so 1220 * if we succeed in allocating it, we will just return a pointer to m0. 1221 * 1222 * If we happen to fail at any point during the allocation, we will free 1223 * up everything we have already allocated and return NULL. 1224 * 1225 * Deprecated. Use m_getc() and m_cat() instead. 1226 */ 1227 struct mbuf * 1228 m_getm(struct mbuf *m0, int len, int type, int how) 1229 { 1230 struct mbuf *nfirst; 1231 1232 nfirst = m_getc(len, how, type); 1233 1234 if (m0 != NULL) { 1235 m_last(m0)->m_next = nfirst; 1236 return (m0); 1237 } 1238 1239 return (nfirst); 1240 } 1241 1242 /* 1243 * Adds a cluster to a normal mbuf, M_EXT is set on success. 1244 * Deprecated. Use m_getcl() instead. 1245 */ 1246 void 1247 m_mclget(struct mbuf *m, int how) 1248 { 1249 struct mbcluster *mcl; 1250 1251 KKASSERT((m->m_flags & M_EXT) == 0); 1252 mcl = objcache_get(mclmeta_cache, MB_OCFLAG(how)); 1253 if (mcl != NULL) { 1254 linkcluster(m, mcl); 1255 ++mbstat[mycpu->gd_cpuid].m_clusters; 1256 } else { 1257 ++mbstat[mycpu->gd_cpuid].m_drops; 1258 } 1259 } 1260 1261 /* 1262 * Updates to mbcluster must be MPSAFE. Only an entity which already has 1263 * a reference to the cluster can ref it, so we are in no danger of 1264 * racing an add with a subtract. But the operation must still be atomic 1265 * since multiple entities may have a reference on the cluster. 1266 * 1267 * m_mclfree() is almost the same but it must contend with two entities 1268 * freeing the cluster at the same time. 1269 */ 1270 static void 1271 m_mclref(void *arg) 1272 { 1273 struct mbcluster *mcl = arg; 1274 1275 atomic_add_int(&mcl->mcl_refs, 1); 1276 } 1277 1278 /* 1279 * When dereferencing a cluster we have to deal with a N->0 race, where 1280 * N entities free their references simultaniously. To do this we use 1281 * atomic_fetchadd_int(). 1282 */ 1283 static void 1284 m_mclfree(void *arg) 1285 { 1286 struct mbcluster *mcl = arg; 1287 1288 if (atomic_fetchadd_int(&mcl->mcl_refs, -1) == 1) { 1289 --mbstat[mycpu->gd_cpuid].m_clusters; 1290 objcache_put(mclmeta_cache, mcl); 1291 } 1292 } 1293 1294 static void 1295 m_mjclfree(void *arg) 1296 { 1297 struct mbcluster *mcl = arg; 1298 1299 if (atomic_fetchadd_int(&mcl->mcl_refs, -1) == 1) { 1300 --mbstat[mycpu->gd_cpuid].m_jclusters; 1301 objcache_put(mjclmeta_cache, mcl); 1302 } 1303 } 1304 1305 /* 1306 * Free a single mbuf and any associated external storage. The successor, 1307 * if any, is returned. 1308 * 1309 * We do need to check non-first mbuf for m_aux, since some of existing 1310 * code does not call M_PREPEND properly. 1311 * (example: call to bpf_mtap from drivers) 1312 */ 1313 1314 #ifdef MBUF_DEBUG 1315 1316 struct mbuf * 1317 _m_free(struct mbuf *m, const char *func) 1318 1319 #else 1320 1321 struct mbuf * 1322 m_free(struct mbuf *m) 1323 1324 #endif 1325 { 1326 struct mbuf *n; 1327 struct globaldata *gd = mycpu; 1328 1329 KASSERT(m->m_type != MT_FREE, ("freeing free mbuf %p", m)); 1330 KASSERT(M_TRAILINGSPACE(m) >= 0, ("overflowed mbuf %p", m)); 1331 --mbtypes[gd->gd_cpuid].stats[m->m_type]; 1332 1333 n = m->m_next; 1334 1335 /* 1336 * Make sure the mbuf is in constructed state before returning it 1337 * to the objcache. 1338 */ 1339 m->m_next = NULL; 1340 mbufuntrack(m); 1341 #ifdef MBUF_DEBUG 1342 m->m_hdr.mh_lastfunc = func; 1343 #endif 1344 #ifdef notyet 1345 KKASSERT(m->m_nextpkt == NULL); 1346 #else 1347 if (m->m_nextpkt != NULL) { 1348 static int afewtimes = 10; 1349 1350 if (afewtimes-- > 0) { 1351 kprintf("mfree: m->m_nextpkt != NULL\n"); 1352 print_backtrace(-1); 1353 } 1354 m->m_nextpkt = NULL; 1355 } 1356 #endif 1357 if (m->m_flags & M_PKTHDR) { 1358 m_tag_delete_chain(m); /* eliminate XXX JH */ 1359 } 1360 1361 m->m_flags &= (M_EXT | M_EXT_CLUSTER | M_CLCACHE | M_PHCACHE); 1362 1363 /* 1364 * Clean the M_PKTHDR state so we can return the mbuf to its original 1365 * cache. This is based on the PHCACHE flag which tells us whether 1366 * the mbuf was originally allocated out of a packet-header cache 1367 * or a non-packet-header cache. 1368 */ 1369 if (m->m_flags & M_PHCACHE) { 1370 m->m_flags |= M_PKTHDR; 1371 m->m_pkthdr.rcvif = NULL; /* eliminate XXX JH */ 1372 m->m_pkthdr.csum_flags = 0; /* eliminate XXX JH */ 1373 m->m_pkthdr.fw_flags = 0; /* eliminate XXX JH */ 1374 SLIST_INIT(&m->m_pkthdr.tags); 1375 } 1376 1377 /* 1378 * Handle remaining flags combinations. M_CLCACHE tells us whether 1379 * the mbuf was originally allocated from a cluster cache or not, 1380 * and is totally separate from whether the mbuf is currently 1381 * associated with a cluster. 1382 */ 1383 switch(m->m_flags & (M_CLCACHE | M_EXT | M_EXT_CLUSTER)) { 1384 case M_CLCACHE | M_EXT | M_EXT_CLUSTER: 1385 /* 1386 * mbuf+cluster cache case. The mbuf was allocated from the 1387 * combined mbuf_cluster cache and can be returned to the 1388 * cache if the cluster hasn't been shared. 1389 */ 1390 if (m_sharecount(m) == 1) { 1391 /* 1392 * The cluster has not been shared, we can just 1393 * reset the data pointer and return the mbuf 1394 * to the cluster cache. Note that the reference 1395 * count is left intact (it is still associated with 1396 * an mbuf). 1397 */ 1398 m->m_data = m->m_ext.ext_buf; 1399 if (m->m_flags & M_EXT && m->m_ext.ext_size != MCLBYTES) { 1400 if (m->m_flags & M_PHCACHE) 1401 objcache_put(mbufphdrjcluster_cache, m); 1402 else 1403 objcache_put(mbufjcluster_cache, m); 1404 --mbstat[mycpu->gd_cpuid].m_jclusters; 1405 } else { 1406 if (m->m_flags & M_PHCACHE) 1407 objcache_put(mbufphdrcluster_cache, m); 1408 else 1409 objcache_put(mbufcluster_cache, m); 1410 --mbstat[mycpu->gd_cpuid].m_clusters; 1411 } 1412 } else { 1413 /* 1414 * Hell. Someone else has a ref on this cluster, 1415 * we have to disconnect it which means we can't 1416 * put it back into the mbufcluster_cache, we 1417 * have to destroy the mbuf. 1418 * 1419 * Other mbuf references to the cluster will typically 1420 * be M_EXT | M_EXT_CLUSTER but without M_CLCACHE. 1421 * 1422 * XXX we could try to connect another cluster to 1423 * it. 1424 */ 1425 m->m_ext.ext_free(m->m_ext.ext_arg); 1426 m->m_flags &= ~(M_EXT | M_EXT_CLUSTER); 1427 if (m->m_ext.ext_size == MCLBYTES) { 1428 if (m->m_flags & M_PHCACHE) 1429 objcache_dtor(mbufphdrcluster_cache, m); 1430 else 1431 objcache_dtor(mbufcluster_cache, m); 1432 } else { 1433 if (m->m_flags & M_PHCACHE) 1434 objcache_dtor(mbufphdrjcluster_cache, m); 1435 else 1436 objcache_dtor(mbufjcluster_cache, m); 1437 } 1438 } 1439 break; 1440 case M_EXT | M_EXT_CLUSTER: 1441 case M_EXT: 1442 /* 1443 * Normal cluster association case, disconnect the cluster from 1444 * the mbuf. The cluster may or may not be custom. 1445 */ 1446 m->m_ext.ext_free(m->m_ext.ext_arg); 1447 m->m_flags &= ~(M_EXT | M_EXT_CLUSTER); 1448 /* fall through */ 1449 case 0: 1450 /* 1451 * return the mbuf to the mbuf cache. 1452 */ 1453 if (m->m_flags & M_PHCACHE) { 1454 m->m_data = m->m_pktdat; 1455 objcache_put(mbufphdr_cache, m); 1456 } else { 1457 m->m_data = m->m_dat; 1458 objcache_put(mbuf_cache, m); 1459 } 1460 --mbstat[mycpu->gd_cpuid].m_mbufs; 1461 break; 1462 default: 1463 if (!panicstr) 1464 panic("bad mbuf flags %p %08x", m, m->m_flags); 1465 break; 1466 } 1467 return (n); 1468 } 1469 1470 #ifdef MBUF_DEBUG 1471 1472 void 1473 _m_freem(struct mbuf *m, const char *func) 1474 { 1475 while (m) 1476 m = _m_free(m, func); 1477 } 1478 1479 #else 1480 1481 void 1482 m_freem(struct mbuf *m) 1483 { 1484 while (m) 1485 m = m_free(m); 1486 } 1487 1488 #endif 1489 1490 void 1491 m_extadd(struct mbuf *m, caddr_t buf, u_int size, void (*reff)(void *), 1492 void (*freef)(void *), void *arg) 1493 { 1494 m->m_ext.ext_arg = arg; 1495 m->m_ext.ext_buf = buf; 1496 m->m_ext.ext_ref = reff; 1497 m->m_ext.ext_free = freef; 1498 m->m_ext.ext_size = size; 1499 reff(arg); 1500 m->m_data = buf; 1501 m->m_flags |= M_EXT; 1502 } 1503 1504 /* 1505 * mbuf utility routines 1506 */ 1507 1508 /* 1509 * Lesser-used path for M_PREPEND: allocate new mbuf to prepend to chain and 1510 * copy junk along. 1511 */ 1512 struct mbuf * 1513 m_prepend(struct mbuf *m, int len, int how) 1514 { 1515 struct mbuf *mn; 1516 1517 if (m->m_flags & M_PKTHDR) 1518 mn = m_gethdr(how, m->m_type); 1519 else 1520 mn = m_get(how, m->m_type); 1521 if (mn == NULL) { 1522 m_freem(m); 1523 return (NULL); 1524 } 1525 if (m->m_flags & M_PKTHDR) 1526 M_MOVE_PKTHDR(mn, m); 1527 mn->m_next = m; 1528 m = mn; 1529 if (len < MHLEN) 1530 MH_ALIGN(m, len); 1531 m->m_len = len; 1532 return (m); 1533 } 1534 1535 /* 1536 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 1537 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 1538 * The wait parameter is a choice of M_WAITOK/M_NOWAIT from caller. 1539 * Note that the copy is read-only, because clusters are not copied, 1540 * only their reference counts are incremented. 1541 */ 1542 struct mbuf * 1543 m_copym(const struct mbuf *m, int off0, int len, int wait) 1544 { 1545 struct mbuf *n, **np; 1546 int off = off0; 1547 struct mbuf *top; 1548 int copyhdr = 0; 1549 1550 KASSERT(off >= 0, ("m_copym, negative off %d", off)); 1551 KASSERT(len >= 0, ("m_copym, negative len %d", len)); 1552 if (off == 0 && (m->m_flags & M_PKTHDR)) 1553 copyhdr = 1; 1554 while (off > 0) { 1555 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain")); 1556 if (off < m->m_len) 1557 break; 1558 off -= m->m_len; 1559 m = m->m_next; 1560 } 1561 np = ⊤ 1562 top = NULL; 1563 while (len > 0) { 1564 if (m == NULL) { 1565 KASSERT(len == M_COPYALL, 1566 ("m_copym, length > size of mbuf chain")); 1567 break; 1568 } 1569 /* 1570 * Because we are sharing any cluster attachment below, 1571 * be sure to get an mbuf that does not have a cluster 1572 * associated with it. 1573 */ 1574 if (copyhdr) 1575 n = m_gethdr(wait, m->m_type); 1576 else 1577 n = m_get(wait, m->m_type); 1578 *np = n; 1579 if (n == NULL) 1580 goto nospace; 1581 if (copyhdr) { 1582 if (!m_dup_pkthdr(n, m, wait)) 1583 goto nospace; 1584 if (len == M_COPYALL) 1585 n->m_pkthdr.len -= off0; 1586 else 1587 n->m_pkthdr.len = len; 1588 copyhdr = 0; 1589 } 1590 n->m_len = min(len, m->m_len - off); 1591 if (m->m_flags & M_EXT) { 1592 KKASSERT((n->m_flags & M_EXT) == 0); 1593 n->m_data = m->m_data + off; 1594 m->m_ext.ext_ref(m->m_ext.ext_arg); 1595 n->m_ext = m->m_ext; 1596 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER); 1597 } else { 1598 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 1599 (unsigned)n->m_len); 1600 } 1601 if (len != M_COPYALL) 1602 len -= n->m_len; 1603 off = 0; 1604 m = m->m_next; 1605 np = &n->m_next; 1606 } 1607 if (top == NULL) 1608 ++mbstat[mycpu->gd_cpuid].m_mcfail; 1609 return (top); 1610 nospace: 1611 m_freem(top); 1612 ++mbstat[mycpu->gd_cpuid].m_mcfail; 1613 return (NULL); 1614 } 1615 1616 /* 1617 * Copy an entire packet, including header (which must be present). 1618 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'. 1619 * Note that the copy is read-only, because clusters are not copied, 1620 * only their reference counts are incremented. 1621 * Preserve alignment of the first mbuf so if the creator has left 1622 * some room at the beginning (e.g. for inserting protocol headers) 1623 * the copies also have the room available. 1624 */ 1625 struct mbuf * 1626 m_copypacket(struct mbuf *m, int how) 1627 { 1628 struct mbuf *top, *n, *o; 1629 1630 n = m_gethdr(how, m->m_type); 1631 top = n; 1632 if (!n) 1633 goto nospace; 1634 1635 if (!m_dup_pkthdr(n, m, how)) 1636 goto nospace; 1637 n->m_len = m->m_len; 1638 if (m->m_flags & M_EXT) { 1639 KKASSERT((n->m_flags & M_EXT) == 0); 1640 n->m_data = m->m_data; 1641 m->m_ext.ext_ref(m->m_ext.ext_arg); 1642 n->m_ext = m->m_ext; 1643 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER); 1644 } else { 1645 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat ); 1646 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 1647 } 1648 1649 m = m->m_next; 1650 while (m) { 1651 o = m_get(how, m->m_type); 1652 if (!o) 1653 goto nospace; 1654 1655 n->m_next = o; 1656 n = n->m_next; 1657 1658 n->m_len = m->m_len; 1659 if (m->m_flags & M_EXT) { 1660 KKASSERT((n->m_flags & M_EXT) == 0); 1661 n->m_data = m->m_data; 1662 m->m_ext.ext_ref(m->m_ext.ext_arg); 1663 n->m_ext = m->m_ext; 1664 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER); 1665 } else { 1666 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 1667 } 1668 1669 m = m->m_next; 1670 } 1671 return top; 1672 nospace: 1673 m_freem(top); 1674 ++mbstat[mycpu->gd_cpuid].m_mcfail; 1675 return (NULL); 1676 } 1677 1678 /* 1679 * Copy data from an mbuf chain starting "off" bytes from the beginning, 1680 * continuing for "len" bytes, into the indicated buffer. 1681 */ 1682 void 1683 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp) 1684 { 1685 unsigned count; 1686 1687 KASSERT(off >= 0, ("m_copydata, negative off %d", off)); 1688 KASSERT(len >= 0, ("m_copydata, negative len %d", len)); 1689 while (off > 0) { 1690 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain")); 1691 if (off < m->m_len) 1692 break; 1693 off -= m->m_len; 1694 m = m->m_next; 1695 } 1696 while (len > 0) { 1697 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain")); 1698 count = min(m->m_len - off, len); 1699 bcopy(mtod(m, caddr_t) + off, cp, count); 1700 len -= count; 1701 cp += count; 1702 off = 0; 1703 m = m->m_next; 1704 } 1705 } 1706 1707 /* 1708 * Copy a packet header mbuf chain into a completely new chain, including 1709 * copying any mbuf clusters. Use this instead of m_copypacket() when 1710 * you need a writable copy of an mbuf chain. 1711 */ 1712 struct mbuf * 1713 m_dup(struct mbuf *m, int how) 1714 { 1715 struct mbuf **p, *top = NULL; 1716 int remain, moff, nsize; 1717 1718 /* Sanity check */ 1719 if (m == NULL) 1720 return (NULL); 1721 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __func__)); 1722 1723 /* While there's more data, get a new mbuf, tack it on, and fill it */ 1724 remain = m->m_pkthdr.len; 1725 moff = 0; 1726 p = ⊤ 1727 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */ 1728 struct mbuf *n; 1729 1730 /* Get the next new mbuf */ 1731 n = m_getl(remain, how, m->m_type, top == NULL ? M_PKTHDR : 0, 1732 &nsize); 1733 if (n == NULL) 1734 goto nospace; 1735 if (top == NULL) 1736 if (!m_dup_pkthdr(n, m, how)) 1737 goto nospace0; 1738 1739 /* Link it into the new chain */ 1740 *p = n; 1741 p = &n->m_next; 1742 1743 /* Copy data from original mbuf(s) into new mbuf */ 1744 n->m_len = 0; 1745 while (n->m_len < nsize && m != NULL) { 1746 int chunk = min(nsize - n->m_len, m->m_len - moff); 1747 1748 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk); 1749 moff += chunk; 1750 n->m_len += chunk; 1751 remain -= chunk; 1752 if (moff == m->m_len) { 1753 m = m->m_next; 1754 moff = 0; 1755 } 1756 } 1757 1758 /* Check correct total mbuf length */ 1759 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL), 1760 ("%s: bogus m_pkthdr.len", __func__)); 1761 } 1762 return (top); 1763 1764 nospace: 1765 m_freem(top); 1766 nospace0: 1767 ++mbstat[mycpu->gd_cpuid].m_mcfail; 1768 return (NULL); 1769 } 1770 1771 /* 1772 * Copy the non-packet mbuf data chain into a new set of mbufs, including 1773 * copying any mbuf clusters. This is typically used to realign a data 1774 * chain by nfs_realign(). 1775 * 1776 * The original chain is left intact. how should be M_WAITOK or M_NOWAIT 1777 * and NULL can be returned if M_NOWAIT is passed. 1778 * 1779 * Be careful to use cluster mbufs, a large mbuf chain converted to non 1780 * cluster mbufs can exhaust our supply of mbufs. 1781 */ 1782 struct mbuf * 1783 m_dup_data(struct mbuf *m, int how) 1784 { 1785 struct mbuf **p, *n, *top = NULL; 1786 int mlen, moff, chunk, gsize, nsize; 1787 1788 /* 1789 * Degenerate case 1790 */ 1791 if (m == NULL) 1792 return (NULL); 1793 1794 /* 1795 * Optimize the mbuf allocation but do not get too carried away. 1796 */ 1797 if (m->m_next || m->m_len > MLEN) 1798 if (m->m_flags & M_EXT && m->m_ext.ext_size == MCLBYTES) 1799 gsize = MCLBYTES; 1800 else 1801 gsize = MJUMPAGESIZE; 1802 else 1803 gsize = MLEN; 1804 1805 /* Chain control */ 1806 p = ⊤ 1807 n = NULL; 1808 nsize = 0; 1809 1810 /* 1811 * Scan the mbuf chain until nothing is left, the new mbuf chain 1812 * will be allocated on the fly as needed. 1813 */ 1814 while (m) { 1815 mlen = m->m_len; 1816 moff = 0; 1817 1818 while (mlen) { 1819 KKASSERT(m->m_type == MT_DATA); 1820 if (n == NULL) { 1821 n = m_getl(gsize, how, MT_DATA, 0, &nsize); 1822 n->m_len = 0; 1823 if (n == NULL) 1824 goto nospace; 1825 *p = n; 1826 p = &n->m_next; 1827 } 1828 chunk = imin(mlen, nsize); 1829 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk); 1830 mlen -= chunk; 1831 moff += chunk; 1832 n->m_len += chunk; 1833 nsize -= chunk; 1834 if (nsize == 0) 1835 n = NULL; 1836 } 1837 m = m->m_next; 1838 } 1839 *p = NULL; 1840 return(top); 1841 nospace: 1842 *p = NULL; 1843 m_freem(top); 1844 ++mbstat[mycpu->gd_cpuid].m_mcfail; 1845 return (NULL); 1846 } 1847 1848 /* 1849 * Concatenate mbuf chain n to m. 1850 * Both chains must be of the same type (e.g. MT_DATA). 1851 * Any m_pkthdr is not updated. 1852 */ 1853 void 1854 m_cat(struct mbuf *m, struct mbuf *n) 1855 { 1856 m = m_last(m); 1857 while (n) { 1858 if (m->m_flags & M_EXT || 1859 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 1860 /* just join the two chains */ 1861 m->m_next = n; 1862 return; 1863 } 1864 /* splat the data from one into the other */ 1865 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 1866 (u_int)n->m_len); 1867 m->m_len += n->m_len; 1868 n = m_free(n); 1869 } 1870 } 1871 1872 void 1873 m_adj(struct mbuf *mp, int req_len) 1874 { 1875 int len = req_len; 1876 struct mbuf *m; 1877 int count; 1878 1879 if ((m = mp) == NULL) 1880 return; 1881 if (len >= 0) { 1882 /* 1883 * Trim from head. 1884 */ 1885 while (m != NULL && len > 0) { 1886 if (m->m_len <= len) { 1887 len -= m->m_len; 1888 m->m_len = 0; 1889 m = m->m_next; 1890 } else { 1891 m->m_len -= len; 1892 m->m_data += len; 1893 len = 0; 1894 } 1895 } 1896 m = mp; 1897 if (mp->m_flags & M_PKTHDR) 1898 m->m_pkthdr.len -= (req_len - len); 1899 } else { 1900 /* 1901 * Trim from tail. Scan the mbuf chain, 1902 * calculating its length and finding the last mbuf. 1903 * If the adjustment only affects this mbuf, then just 1904 * adjust and return. Otherwise, rescan and truncate 1905 * after the remaining size. 1906 */ 1907 len = -len; 1908 count = 0; 1909 for (;;) { 1910 count += m->m_len; 1911 if (m->m_next == NULL) 1912 break; 1913 m = m->m_next; 1914 } 1915 if (m->m_len >= len) { 1916 m->m_len -= len; 1917 if (mp->m_flags & M_PKTHDR) 1918 mp->m_pkthdr.len -= len; 1919 return; 1920 } 1921 count -= len; 1922 if (count < 0) 1923 count = 0; 1924 /* 1925 * Correct length for chain is "count". 1926 * Find the mbuf with last data, adjust its length, 1927 * and toss data from remaining mbufs on chain. 1928 */ 1929 m = mp; 1930 if (m->m_flags & M_PKTHDR) 1931 m->m_pkthdr.len = count; 1932 for (; m; m = m->m_next) { 1933 if (m->m_len >= count) { 1934 m->m_len = count; 1935 break; 1936 } 1937 count -= m->m_len; 1938 } 1939 while (m->m_next) 1940 (m = m->m_next) ->m_len = 0; 1941 } 1942 } 1943 1944 /* 1945 * Set the m_data pointer of a newly-allocated mbuf 1946 * to place an object of the specified size at the 1947 * end of the mbuf, longword aligned. 1948 */ 1949 void 1950 m_align(struct mbuf *m, int len) 1951 { 1952 int adjust; 1953 1954 if (m->m_flags & M_EXT) 1955 adjust = m->m_ext.ext_size - len; 1956 else if (m->m_flags & M_PKTHDR) 1957 adjust = MHLEN - len; 1958 else 1959 adjust = MLEN - len; 1960 m->m_data += rounddown2(adjust, sizeof(long)); 1961 } 1962 1963 /* 1964 * Create a writable copy of the mbuf chain. While doing this 1965 * we compact the chain with a goal of producing a chain with 1966 * at most two mbufs. The second mbuf in this chain is likely 1967 * to be a cluster. The primary purpose of this work is to create 1968 * a writable packet for encryption, compression, etc. The 1969 * secondary goal is to linearize the data so the data can be 1970 * passed to crypto hardware in the most efficient manner possible. 1971 */ 1972 struct mbuf * 1973 m_unshare(struct mbuf *m0, int how) 1974 { 1975 struct mbuf *m, *mprev; 1976 struct mbuf *n, *mfirst, *mlast; 1977 int len, off; 1978 1979 mprev = NULL; 1980 for (m = m0; m != NULL; m = mprev->m_next) { 1981 /* 1982 * Regular mbufs are ignored unless there's a cluster 1983 * in front of it that we can use to coalesce. We do 1984 * the latter mainly so later clusters can be coalesced 1985 * also w/o having to handle them specially (i.e. convert 1986 * mbuf+cluster -> cluster). This optimization is heavily 1987 * influenced by the assumption that we're running over 1988 * Ethernet where MCLBYTES is large enough that the max 1989 * packet size will permit lots of coalescing into a 1990 * single cluster. This in turn permits efficient 1991 * crypto operations, especially when using hardware. 1992 */ 1993 if ((m->m_flags & M_EXT) == 0) { 1994 if (mprev && (mprev->m_flags & M_EXT) && 1995 m->m_len <= M_TRAILINGSPACE(mprev)) { 1996 /* XXX: this ignores mbuf types */ 1997 memcpy(mtod(mprev, caddr_t) + mprev->m_len, 1998 mtod(m, caddr_t), m->m_len); 1999 mprev->m_len += m->m_len; 2000 mprev->m_next = m->m_next; /* unlink from chain */ 2001 m_free(m); /* reclaim mbuf */ 2002 } else { 2003 mprev = m; 2004 } 2005 continue; 2006 } 2007 /* 2008 * Writable mbufs are left alone (for now). 2009 */ 2010 if (M_WRITABLE(m)) { 2011 mprev = m; 2012 continue; 2013 } 2014 2015 /* 2016 * Not writable, replace with a copy or coalesce with 2017 * the previous mbuf if possible (since we have to copy 2018 * it anyway, we try to reduce the number of mbufs and 2019 * clusters so that future work is easier). 2020 */ 2021 KASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags)); 2022 /* NB: we only coalesce into a cluster or larger */ 2023 if (mprev != NULL && (mprev->m_flags & M_EXT) && 2024 m->m_len <= M_TRAILINGSPACE(mprev)) { 2025 /* XXX: this ignores mbuf types */ 2026 memcpy(mtod(mprev, caddr_t) + mprev->m_len, 2027 mtod(m, caddr_t), m->m_len); 2028 mprev->m_len += m->m_len; 2029 mprev->m_next = m->m_next; /* unlink from chain */ 2030 m_free(m); /* reclaim mbuf */ 2031 continue; 2032 } 2033 2034 /* 2035 * Allocate new space to hold the copy... 2036 */ 2037 /* XXX why can M_PKTHDR be set past the first mbuf? */ 2038 if (mprev == NULL && (m->m_flags & M_PKTHDR)) { 2039 /* 2040 * NB: if a packet header is present we must 2041 * allocate the mbuf separately from any cluster 2042 * because M_MOVE_PKTHDR will smash the data 2043 * pointer and drop the M_EXT marker. 2044 */ 2045 MGETHDR(n, how, m->m_type); 2046 if (n == NULL) { 2047 m_freem(m0); 2048 return (NULL); 2049 } 2050 M_MOVE_PKTHDR(n, m); 2051 MCLGET(n, how); 2052 if ((n->m_flags & M_EXT) == 0) { 2053 m_free(n); 2054 m_freem(m0); 2055 return (NULL); 2056 } 2057 } else { 2058 n = m_getcl(how, m->m_type, m->m_flags); 2059 if (n == NULL) { 2060 m_freem(m0); 2061 return (NULL); 2062 } 2063 } 2064 /* 2065 * ... and copy the data. We deal with jumbo mbufs 2066 * (i.e. m_len > MCLBYTES) by splitting them into 2067 * clusters. We could just malloc a buffer and make 2068 * it external but too many device drivers don't know 2069 * how to break up the non-contiguous memory when 2070 * doing DMA. 2071 */ 2072 len = m->m_len; 2073 off = 0; 2074 mfirst = n; 2075 mlast = NULL; 2076 for (;;) { 2077 int cc = min(len, MCLBYTES); 2078 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, cc); 2079 n->m_len = cc; 2080 if (mlast != NULL) 2081 mlast->m_next = n; 2082 mlast = n; 2083 2084 len -= cc; 2085 if (len <= 0) 2086 break; 2087 off += cc; 2088 2089 n = m_getcl(how, m->m_type, m->m_flags); 2090 if (n == NULL) { 2091 m_freem(mfirst); 2092 m_freem(m0); 2093 return (NULL); 2094 } 2095 } 2096 n->m_next = m->m_next; 2097 if (mprev == NULL) 2098 m0 = mfirst; /* new head of chain */ 2099 else 2100 mprev->m_next = mfirst; /* replace old mbuf */ 2101 m_free(m); /* release old mbuf */ 2102 mprev = mfirst; 2103 } 2104 return (m0); 2105 } 2106 2107 /* 2108 * Rearrange an mbuf chain so that len bytes are contiguous 2109 * and in the data area of an mbuf (so that mtod will work for a structure 2110 * of size len). Returns the resulting mbuf chain on success, frees it and 2111 * returns null on failure. If there is room, it will add up to 2112 * max_protohdr-len extra bytes to the contiguous region in an attempt to 2113 * avoid being called next time. 2114 */ 2115 struct mbuf * 2116 m_pullup(struct mbuf *n, int len) 2117 { 2118 struct mbuf *m; 2119 int count; 2120 int space; 2121 2122 /* 2123 * If first mbuf has no cluster, and has room for len bytes 2124 * without shifting current data, pullup into it, 2125 * otherwise allocate a new mbuf to prepend to the chain. 2126 */ 2127 if (!(n->m_flags & M_EXT) && 2128 n->m_data + len < &n->m_dat[MLEN] && 2129 n->m_next) { 2130 if (n->m_len >= len) 2131 return (n); 2132 m = n; 2133 n = n->m_next; 2134 len -= m->m_len; 2135 } else { 2136 if (len > MHLEN) 2137 goto bad; 2138 if (n->m_flags & M_PKTHDR) 2139 m = m_gethdr(M_NOWAIT, n->m_type); 2140 else 2141 m = m_get(M_NOWAIT, n->m_type); 2142 if (m == NULL) 2143 goto bad; 2144 m->m_len = 0; 2145 if (n->m_flags & M_PKTHDR) 2146 M_MOVE_PKTHDR(m, n); 2147 } 2148 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 2149 do { 2150 count = min(min(max(len, max_protohdr), space), n->m_len); 2151 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 2152 (unsigned)count); 2153 len -= count; 2154 m->m_len += count; 2155 n->m_len -= count; 2156 space -= count; 2157 if (n->m_len) 2158 n->m_data += count; 2159 else 2160 n = m_free(n); 2161 } while (len > 0 && n); 2162 if (len > 0) { 2163 m_free(m); 2164 goto bad; 2165 } 2166 m->m_next = n; 2167 return (m); 2168 bad: 2169 m_freem(n); 2170 ++mbstat[mycpu->gd_cpuid].m_mcfail; 2171 return (NULL); 2172 } 2173 2174 /* 2175 * Partition an mbuf chain in two pieces, returning the tail -- 2176 * all but the first len0 bytes. In case of failure, it returns NULL and 2177 * attempts to restore the chain to its original state. 2178 * 2179 * Note that the resulting mbufs might be read-only, because the new 2180 * mbuf can end up sharing an mbuf cluster with the original mbuf if 2181 * the "breaking point" happens to lie within a cluster mbuf. Use the 2182 * M_WRITABLE() macro to check for this case. 2183 */ 2184 struct mbuf * 2185 m_split(struct mbuf *m0, int len0, int wait) 2186 { 2187 struct mbuf *m, *n; 2188 unsigned len = len0, remain; 2189 2190 for (m = m0; m && len > m->m_len; m = m->m_next) 2191 len -= m->m_len; 2192 if (m == NULL) 2193 return (NULL); 2194 remain = m->m_len - len; 2195 if (m0->m_flags & M_PKTHDR) { 2196 n = m_gethdr(wait, m0->m_type); 2197 if (n == NULL) 2198 return (NULL); 2199 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 2200 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 2201 m0->m_pkthdr.len = len0; 2202 if (m->m_flags & M_EXT) 2203 goto extpacket; 2204 if (remain > MHLEN) { 2205 /* m can't be the lead packet */ 2206 MH_ALIGN(n, 0); 2207 n->m_next = m_split(m, len, wait); 2208 if (n->m_next == NULL) { 2209 m_free(n); 2210 return (NULL); 2211 } else { 2212 n->m_len = 0; 2213 return (n); 2214 } 2215 } else 2216 MH_ALIGN(n, remain); 2217 } else if (remain == 0) { 2218 n = m->m_next; 2219 m->m_next = NULL; 2220 return (n); 2221 } else { 2222 n = m_get(wait, m->m_type); 2223 if (n == NULL) 2224 return (NULL); 2225 M_ALIGN(n, remain); 2226 } 2227 extpacket: 2228 if (m->m_flags & M_EXT) { 2229 KKASSERT((n->m_flags & M_EXT) == 0); 2230 n->m_data = m->m_data + len; 2231 m->m_ext.ext_ref(m->m_ext.ext_arg); 2232 n->m_ext = m->m_ext; 2233 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER); 2234 } else { 2235 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 2236 } 2237 n->m_len = remain; 2238 m->m_len = len; 2239 n->m_next = m->m_next; 2240 m->m_next = NULL; 2241 return (n); 2242 } 2243 2244 /* 2245 * Routine to copy from device local memory into mbufs. 2246 * Note: "offset" is ill-defined and always called as 0, so ignore it. 2247 */ 2248 struct mbuf * 2249 m_devget(char *buf, int len, int offset, struct ifnet *ifp) 2250 { 2251 struct mbuf *m, *mfirst = NULL, **mtail; 2252 int nsize, flags; 2253 2254 mtail = &mfirst; 2255 flags = M_PKTHDR; 2256 2257 while (len > 0) { 2258 m = m_getl(len, M_NOWAIT, MT_DATA, flags, &nsize); 2259 if (m == NULL) { 2260 m_freem(mfirst); 2261 return (NULL); 2262 } 2263 m->m_len = min(len, nsize); 2264 2265 if (flags & M_PKTHDR) { 2266 if (len + max_linkhdr <= nsize) 2267 m->m_data += max_linkhdr; 2268 m->m_pkthdr.rcvif = ifp; 2269 m->m_pkthdr.len = len; 2270 flags = 0; 2271 } 2272 2273 bcopy(buf, m->m_data, (unsigned)m->m_len); 2274 buf += m->m_len; 2275 len -= m->m_len; 2276 *mtail = m; 2277 mtail = &m->m_next; 2278 } 2279 2280 return (mfirst); 2281 } 2282 2283 /* 2284 * Routine to pad mbuf to the specified length 'padto'. 2285 */ 2286 int 2287 m_devpad(struct mbuf *m, int padto) 2288 { 2289 struct mbuf *last = NULL; 2290 int padlen; 2291 2292 if (padto <= m->m_pkthdr.len) 2293 return 0; 2294 2295 padlen = padto - m->m_pkthdr.len; 2296 2297 /* if there's only the packet-header and we can pad there, use it. */ 2298 if (m->m_pkthdr.len == m->m_len && M_TRAILINGSPACE(m) >= padlen) { 2299 last = m; 2300 } else { 2301 /* 2302 * Walk packet chain to find last mbuf. We will either 2303 * pad there, or append a new mbuf and pad it 2304 */ 2305 for (last = m; last->m_next != NULL; last = last->m_next) 2306 ; /* EMPTY */ 2307 2308 /* `last' now points to last in chain. */ 2309 if (M_TRAILINGSPACE(last) < padlen) { 2310 struct mbuf *n; 2311 2312 /* Allocate new empty mbuf, pad it. Compact later. */ 2313 MGET(n, M_NOWAIT, MT_DATA); 2314 if (n == NULL) 2315 return ENOBUFS; 2316 n->m_len = 0; 2317 last->m_next = n; 2318 last = n; 2319 } 2320 } 2321 KKASSERT(M_TRAILINGSPACE(last) >= padlen); 2322 KKASSERT(M_WRITABLE(last)); 2323 2324 /* Now zero the pad area */ 2325 bzero(mtod(last, char *) + last->m_len, padlen); 2326 last->m_len += padlen; 2327 m->m_pkthdr.len += padlen; 2328 return 0; 2329 } 2330 2331 /* 2332 * Copy data from a buffer back into the indicated mbuf chain, 2333 * starting "off" bytes from the beginning, extending the mbuf 2334 * chain if necessary. 2335 */ 2336 void 2337 m_copyback(struct mbuf *m0, int off, int len, caddr_t cp) 2338 { 2339 int mlen; 2340 struct mbuf *m = m0, *n; 2341 int totlen = 0; 2342 2343 if (m0 == NULL) 2344 return; 2345 while (off > (mlen = m->m_len)) { 2346 off -= mlen; 2347 totlen += mlen; 2348 if (m->m_next == NULL) { 2349 n = m_getclr(M_NOWAIT, m->m_type); 2350 if (n == NULL) 2351 goto out; 2352 n->m_len = min(MLEN, len + off); 2353 m->m_next = n; 2354 } 2355 m = m->m_next; 2356 } 2357 while (len > 0) { 2358 mlen = min (m->m_len - off, len); 2359 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen); 2360 cp += mlen; 2361 len -= mlen; 2362 mlen += off; 2363 off = 0; 2364 totlen += mlen; 2365 if (len == 0) 2366 break; 2367 if (m->m_next == NULL) { 2368 n = m_get(M_NOWAIT, m->m_type); 2369 if (n == NULL) 2370 break; 2371 n->m_len = min(MLEN, len); 2372 m->m_next = n; 2373 } 2374 m = m->m_next; 2375 } 2376 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 2377 m->m_pkthdr.len = totlen; 2378 } 2379 2380 /* 2381 * Append the specified data to the indicated mbuf chain, 2382 * Extend the mbuf chain if the new data does not fit in 2383 * existing space. 2384 * 2385 * Return 1 if able to complete the job; otherwise 0. 2386 */ 2387 int 2388 m_append(struct mbuf *m0, int len, c_caddr_t cp) 2389 { 2390 struct mbuf *m, *n; 2391 int remainder, space; 2392 2393 for (m = m0; m->m_next != NULL; m = m->m_next) 2394 ; 2395 remainder = len; 2396 space = M_TRAILINGSPACE(m); 2397 if (space > 0) { 2398 /* 2399 * Copy into available space. 2400 */ 2401 if (space > remainder) 2402 space = remainder; 2403 bcopy(cp, mtod(m, caddr_t) + m->m_len, space); 2404 m->m_len += space; 2405 cp += space, remainder -= space; 2406 } 2407 while (remainder > 0) { 2408 /* 2409 * Allocate a new mbuf; could check space 2410 * and allocate a cluster instead. 2411 */ 2412 n = m_get(M_NOWAIT, m->m_type); 2413 if (n == NULL) 2414 break; 2415 n->m_len = min(MLEN, remainder); 2416 bcopy(cp, mtod(n, caddr_t), n->m_len); 2417 cp += n->m_len, remainder -= n->m_len; 2418 m->m_next = n; 2419 m = n; 2420 } 2421 if (m0->m_flags & M_PKTHDR) 2422 m0->m_pkthdr.len += len - remainder; 2423 return (remainder == 0); 2424 } 2425 2426 /* 2427 * Apply function f to the data in an mbuf chain starting "off" bytes from 2428 * the beginning, continuing for "len" bytes. 2429 */ 2430 int 2431 m_apply(struct mbuf *m, int off, int len, 2432 int (*f)(void *, void *, u_int), void *arg) 2433 { 2434 u_int count; 2435 int rval; 2436 2437 KASSERT(off >= 0, ("m_apply, negative off %d", off)); 2438 KASSERT(len >= 0, ("m_apply, negative len %d", len)); 2439 while (off > 0) { 2440 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain")); 2441 if (off < m->m_len) 2442 break; 2443 off -= m->m_len; 2444 m = m->m_next; 2445 } 2446 while (len > 0) { 2447 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain")); 2448 count = min(m->m_len - off, len); 2449 rval = (*f)(arg, mtod(m, caddr_t) + off, count); 2450 if (rval) 2451 return (rval); 2452 len -= count; 2453 off = 0; 2454 m = m->m_next; 2455 } 2456 return (0); 2457 } 2458 2459 /* 2460 * Return a pointer to mbuf/offset of location in mbuf chain. 2461 */ 2462 struct mbuf * 2463 m_getptr(struct mbuf *m, int loc, int *off) 2464 { 2465 2466 while (loc >= 0) { 2467 /* Normal end of search. */ 2468 if (m->m_len > loc) { 2469 *off = loc; 2470 return (m); 2471 } else { 2472 loc -= m->m_len; 2473 if (m->m_next == NULL) { 2474 if (loc == 0) { 2475 /* Point at the end of valid data. */ 2476 *off = m->m_len; 2477 return (m); 2478 } 2479 return (NULL); 2480 } 2481 m = m->m_next; 2482 } 2483 } 2484 return (NULL); 2485 } 2486 2487 void 2488 m_print(const struct mbuf *m) 2489 { 2490 int len; 2491 const struct mbuf *m2; 2492 char *hexstr; 2493 2494 len = m->m_pkthdr.len; 2495 m2 = m; 2496 hexstr = kmalloc(HEX_NCPYLEN(len), M_TEMP, M_ZERO | M_WAITOK); 2497 while (len) { 2498 kprintf("%p %s\n", m2, hexncpy(m2->m_data, m2->m_len, hexstr, 2499 HEX_NCPYLEN(m2->m_len), "-")); 2500 len -= m2->m_len; 2501 m2 = m2->m_next; 2502 } 2503 kfree(hexstr, M_TEMP); 2504 return; 2505 } 2506 2507 /* 2508 * "Move" mbuf pkthdr from "from" to "to". 2509 * "from" must have M_PKTHDR set, and "to" must be empty. 2510 */ 2511 void 2512 m_move_pkthdr(struct mbuf *to, struct mbuf *from) 2513 { 2514 KASSERT((to->m_flags & M_PKTHDR), ("m_move_pkthdr: not packet header")); 2515 2516 to->m_flags |= from->m_flags & M_COPYFLAGS; 2517 to->m_pkthdr = from->m_pkthdr; /* especially tags */ 2518 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */ 2519 } 2520 2521 /* 2522 * Duplicate "from"'s mbuf pkthdr in "to". 2523 * "from" must have M_PKTHDR set, and "to" must be empty. 2524 * In particular, this does a deep copy of the packet tags. 2525 */ 2526 int 2527 m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how) 2528 { 2529 KASSERT((to->m_flags & M_PKTHDR), ("m_dup_pkthdr: not packet header")); 2530 2531 to->m_flags = (from->m_flags & M_COPYFLAGS) | 2532 (to->m_flags & ~M_COPYFLAGS); 2533 to->m_pkthdr = from->m_pkthdr; 2534 SLIST_INIT(&to->m_pkthdr.tags); 2535 return (m_tag_copy_chain(to, from, how)); 2536 } 2537 2538 /* 2539 * Defragment a mbuf chain, returning the shortest possible 2540 * chain of mbufs and clusters. If allocation fails and 2541 * this cannot be completed, NULL will be returned, but 2542 * the passed in chain will be unchanged. Upon success, 2543 * the original chain will be freed, and the new chain 2544 * will be returned. 2545 * 2546 * If a non-packet header is passed in, the original 2547 * mbuf (chain?) will be returned unharmed. 2548 * 2549 * m_defrag_nofree doesn't free the passed in mbuf. 2550 */ 2551 struct mbuf * 2552 m_defrag(struct mbuf *m0, int how) 2553 { 2554 struct mbuf *m_new; 2555 2556 if ((m_new = m_defrag_nofree(m0, how)) == NULL) 2557 return (NULL); 2558 if (m_new != m0) 2559 m_freem(m0); 2560 return (m_new); 2561 } 2562 2563 struct mbuf * 2564 m_defrag_nofree(struct mbuf *m0, int how) 2565 { 2566 struct mbuf *m_new = NULL, *m_final = NULL; 2567 int progress = 0, length, nsize; 2568 2569 if (!(m0->m_flags & M_PKTHDR)) 2570 return (m0); 2571 2572 #ifdef MBUF_STRESS_TEST 2573 if (m_defragrandomfailures) { 2574 int temp = karc4random() & 0xff; 2575 if (temp == 0xba) 2576 goto nospace; 2577 } 2578 #endif 2579 2580 m_final = m_getl(m0->m_pkthdr.len, how, MT_DATA, M_PKTHDR, &nsize); 2581 if (m_final == NULL) 2582 goto nospace; 2583 m_final->m_len = 0; /* in case m0->m_pkthdr.len is zero */ 2584 2585 if (m_dup_pkthdr(m_final, m0, how) == 0) 2586 goto nospace; 2587 2588 m_new = m_final; 2589 2590 while (progress < m0->m_pkthdr.len) { 2591 length = m0->m_pkthdr.len - progress; 2592 if (length > MCLBYTES) 2593 length = MCLBYTES; 2594 2595 if (m_new == NULL) { 2596 m_new = m_getl(length, how, MT_DATA, 0, &nsize); 2597 if (m_new == NULL) 2598 goto nospace; 2599 } 2600 2601 m_copydata(m0, progress, length, mtod(m_new, caddr_t)); 2602 progress += length; 2603 m_new->m_len = length; 2604 if (m_new != m_final) 2605 m_cat(m_final, m_new); 2606 m_new = NULL; 2607 } 2608 if (m0->m_next == NULL) 2609 m_defraguseless++; 2610 m_defragpackets++; 2611 m_defragbytes += m_final->m_pkthdr.len; 2612 return (m_final); 2613 nospace: 2614 m_defragfailure++; 2615 if (m_new) 2616 m_free(m_new); 2617 m_freem(m_final); 2618 return (NULL); 2619 } 2620 2621 /* 2622 * Move data from uio into mbufs. 2623 */ 2624 struct mbuf * 2625 m_uiomove(struct uio *uio) 2626 { 2627 struct mbuf *m; /* current working mbuf */ 2628 struct mbuf *head = NULL; /* result mbuf chain */ 2629 struct mbuf **mp = &head; 2630 int flags = M_PKTHDR; 2631 int nsize; 2632 int error; 2633 int resid; 2634 2635 do { 2636 if (uio->uio_resid > INT_MAX) 2637 resid = INT_MAX; 2638 else 2639 resid = (int)uio->uio_resid; 2640 m = m_getl(resid, M_WAITOK, MT_DATA, flags, &nsize); 2641 if (flags) { 2642 m->m_pkthdr.len = 0; 2643 /* Leave room for protocol headers. */ 2644 if (resid < MHLEN) 2645 MH_ALIGN(m, resid); 2646 flags = 0; 2647 } 2648 m->m_len = imin(nsize, resid); 2649 error = uiomove(mtod(m, caddr_t), m->m_len, uio); 2650 if (error) { 2651 m_free(m); 2652 goto failed; 2653 } 2654 *mp = m; 2655 mp = &m->m_next; 2656 head->m_pkthdr.len += m->m_len; 2657 } while (uio->uio_resid > 0); 2658 2659 return (head); 2660 2661 failed: 2662 m_freem(head); 2663 return (NULL); 2664 } 2665 2666 struct mbuf * 2667 m_last(struct mbuf *m) 2668 { 2669 while (m->m_next) 2670 m = m->m_next; 2671 return (m); 2672 } 2673 2674 /* 2675 * Return the number of bytes in an mbuf chain. 2676 * If lastm is not NULL, also return the last mbuf. 2677 */ 2678 u_int 2679 m_lengthm(struct mbuf *m, struct mbuf **lastm) 2680 { 2681 u_int len = 0; 2682 struct mbuf *prev = m; 2683 2684 while (m) { 2685 len += m->m_len; 2686 prev = m; 2687 m = m->m_next; 2688 } 2689 if (lastm != NULL) 2690 *lastm = prev; 2691 return (len); 2692 } 2693 2694 /* 2695 * Like m_lengthm(), except also keep track of mbuf usage. 2696 */ 2697 u_int 2698 m_countm(struct mbuf *m, struct mbuf **lastm, u_int *pmbcnt) 2699 { 2700 u_int len = 0, mbcnt = 0; 2701 struct mbuf *prev = m; 2702 2703 while (m) { 2704 len += m->m_len; 2705 mbcnt += MSIZE; 2706 if (m->m_flags & M_EXT) 2707 mbcnt += m->m_ext.ext_size; 2708 prev = m; 2709 m = m->m_next; 2710 } 2711 if (lastm != NULL) 2712 *lastm = prev; 2713 *pmbcnt = mbcnt; 2714 return (len); 2715 } 2716