1 /* 2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 1982, 1986, 1988, 1991, 1993 36 * The Regents of the University of California. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. All advertising materials mentioning features or use of this software 47 * must display the following acknowledgement: 48 * This product includes software developed by the University of 49 * California, Berkeley and its contributors. 50 * 4. Neither the name of the University nor the names of its contributors 51 * may be used to endorse or promote products derived from this software 52 * without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 * 66 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 67 * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $ 68 * $DragonFly: src/sys/kern/uipc_mbuf.c,v 1.70 2008/11/20 14:21:01 sephe Exp $ 69 */ 70 71 #include "opt_param.h" 72 #include "opt_mbuf_stress_test.h" 73 #include <sys/param.h> 74 #include <sys/systm.h> 75 #include <sys/malloc.h> 76 #include <sys/mbuf.h> 77 #include <sys/kernel.h> 78 #include <sys/sysctl.h> 79 #include <sys/domain.h> 80 #include <sys/objcache.h> 81 #include <sys/tree.h> 82 #include <sys/protosw.h> 83 #include <sys/uio.h> 84 #include <sys/thread.h> 85 #include <sys/globaldata.h> 86 #include <sys/thread2.h> 87 88 #include <machine/atomic.h> 89 90 #include <vm/vm.h> 91 #include <vm/vm_kern.h> 92 #include <vm/vm_extern.h> 93 94 #ifdef INVARIANTS 95 #include <machine/cpu.h> 96 #endif 97 98 /* 99 * mbuf cluster meta-data 100 */ 101 struct mbcluster { 102 int32_t mcl_refs; 103 void *mcl_data; 104 }; 105 106 /* 107 * mbuf tracking for debugging purposes 108 */ 109 #ifdef MBUF_DEBUG 110 111 static MALLOC_DEFINE(M_MTRACK, "mtrack", "mtrack"); 112 113 struct mbctrack; 114 RB_HEAD(mbuf_rb_tree, mbtrack); 115 RB_PROTOTYPE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *); 116 117 struct mbtrack { 118 RB_ENTRY(mbtrack) rb_node; 119 int trackid; 120 struct mbuf *m; 121 }; 122 123 static int 124 mbtrack_cmp(struct mbtrack *mb1, struct mbtrack *mb2) 125 { 126 if (mb1->m < mb2->m) 127 return(-1); 128 if (mb1->m > mb2->m) 129 return(1); 130 return(0); 131 } 132 133 RB_GENERATE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *, m); 134 135 struct mbuf_rb_tree mbuf_track_root; 136 137 static void 138 mbuftrack(struct mbuf *m) 139 { 140 struct mbtrack *mbt; 141 142 crit_enter(); 143 mbt = kmalloc(sizeof(*mbt), M_MTRACK, M_INTWAIT|M_ZERO); 144 mbt->m = m; 145 if (mbuf_rb_tree_RB_INSERT(&mbuf_track_root, mbt)) 146 panic("mbuftrack: mbuf %p already being tracked\n", m); 147 crit_exit(); 148 } 149 150 static void 151 mbufuntrack(struct mbuf *m) 152 { 153 struct mbtrack *mbt; 154 155 crit_enter(); 156 mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m); 157 if (mbt == NULL) { 158 kprintf("mbufuntrack: mbuf %p was not tracked\n", m); 159 } else { 160 mbuf_rb_tree_RB_REMOVE(&mbuf_track_root, mbt); 161 kfree(mbt, M_MTRACK); 162 } 163 crit_exit(); 164 } 165 166 void 167 mbuftrackid(struct mbuf *m, int trackid) 168 { 169 struct mbtrack *mbt; 170 struct mbuf *n; 171 172 crit_enter(); 173 while (m) { 174 n = m->m_nextpkt; 175 while (m) { 176 mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m); 177 if (mbt) 178 mbt->trackid = trackid; 179 m = m->m_next; 180 } 181 m = n; 182 } 183 crit_exit(); 184 } 185 186 static int 187 mbuftrack_callback(struct mbtrack *mbt, void *arg) 188 { 189 struct sysctl_req *req = arg; 190 char buf[64]; 191 int error; 192 193 ksnprintf(buf, sizeof(buf), "mbuf %p track %d\n", mbt->m, mbt->trackid); 194 195 error = SYSCTL_OUT(req, buf, strlen(buf)); 196 if (error) 197 return(-error); 198 return(0); 199 } 200 201 static int 202 mbuftrack_show(SYSCTL_HANDLER_ARGS) 203 { 204 int error; 205 206 crit_enter(); 207 error = mbuf_rb_tree_RB_SCAN(&mbuf_track_root, NULL, 208 mbuftrack_callback, req); 209 crit_exit(); 210 return (-error); 211 } 212 SYSCTL_PROC(_kern_ipc, OID_AUTO, showmbufs, CTLFLAG_RD|CTLTYPE_STRING, 213 0, 0, mbuftrack_show, "A", "Show all in-use mbufs"); 214 215 #else 216 217 #define mbuftrack(m) 218 #define mbufuntrack(m) 219 220 #endif 221 222 static void mbinit(void *); 223 SYSINIT(mbuf, SI_BOOT2_MACHDEP, SI_ORDER_FIRST, mbinit, NULL) 224 225 static u_long mbtypes[SMP_MAXCPU][MT_NTYPES]; 226 227 static struct mbstat mbstat[SMP_MAXCPU]; 228 int max_linkhdr; 229 int max_protohdr; 230 int max_hdr; 231 int max_datalen; 232 int m_defragpackets; 233 int m_defragbytes; 234 int m_defraguseless; 235 int m_defragfailure; 236 #ifdef MBUF_STRESS_TEST 237 int m_defragrandomfailures; 238 #endif 239 240 struct objcache *mbuf_cache, *mbufphdr_cache; 241 struct objcache *mclmeta_cache; 242 struct objcache *mbufcluster_cache, *mbufphdrcluster_cache; 243 244 int nmbclusters; 245 int nmbufs; 246 247 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW, 248 &max_linkhdr, 0, ""); 249 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW, 250 &max_protohdr, 0, ""); 251 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, ""); 252 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW, 253 &max_datalen, 0, ""); 254 SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW, 255 &mbuf_wait, 0, ""); 256 static int do_mbstat(SYSCTL_HANDLER_ARGS); 257 258 SYSCTL_PROC(_kern_ipc, KIPC_MBSTAT, mbstat, CTLTYPE_STRUCT|CTLFLAG_RD, 259 0, 0, do_mbstat, "S,mbstat", ""); 260 261 static int do_mbtypes(SYSCTL_HANDLER_ARGS); 262 263 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbtypes, CTLTYPE_ULONG|CTLFLAG_RD, 264 0, 0, do_mbtypes, "LU", ""); 265 266 static int 267 do_mbstat(SYSCTL_HANDLER_ARGS) 268 { 269 struct mbstat mbstat_total; 270 struct mbstat *mbstat_totalp; 271 int i; 272 273 bzero(&mbstat_total, sizeof(mbstat_total)); 274 mbstat_totalp = &mbstat_total; 275 276 for (i = 0; i < ncpus; i++) 277 { 278 mbstat_total.m_mbufs += mbstat[i].m_mbufs; 279 mbstat_total.m_clusters += mbstat[i].m_clusters; 280 mbstat_total.m_spare += mbstat[i].m_spare; 281 mbstat_total.m_clfree += mbstat[i].m_clfree; 282 mbstat_total.m_drops += mbstat[i].m_drops; 283 mbstat_total.m_wait += mbstat[i].m_wait; 284 mbstat_total.m_drain += mbstat[i].m_drain; 285 mbstat_total.m_mcfail += mbstat[i].m_mcfail; 286 mbstat_total.m_mpfail += mbstat[i].m_mpfail; 287 288 } 289 /* 290 * The following fields are not cumulative fields so just 291 * get their values once. 292 */ 293 mbstat_total.m_msize = mbstat[0].m_msize; 294 mbstat_total.m_mclbytes = mbstat[0].m_mclbytes; 295 mbstat_total.m_minclsize = mbstat[0].m_minclsize; 296 mbstat_total.m_mlen = mbstat[0].m_mlen; 297 mbstat_total.m_mhlen = mbstat[0].m_mhlen; 298 299 return(sysctl_handle_opaque(oidp, mbstat_totalp, sizeof(mbstat_total), req)); 300 } 301 302 static int 303 do_mbtypes(SYSCTL_HANDLER_ARGS) 304 { 305 u_long totals[MT_NTYPES]; 306 int i, j; 307 308 for (i = 0; i < MT_NTYPES; i++) 309 totals[i] = 0; 310 311 for (i = 0; i < ncpus; i++) 312 { 313 for (j = 0; j < MT_NTYPES; j++) 314 totals[j] += mbtypes[i][j]; 315 } 316 317 return(sysctl_handle_opaque(oidp, totals, sizeof(totals), req)); 318 } 319 320 /* 321 * These are read-only because we do not currently have any code 322 * to adjust the objcache limits after the fact. The variables 323 * may only be set as boot-time tunables. 324 */ 325 SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD, 326 &nmbclusters, 0, "Maximum number of mbuf clusters available"); 327 SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0, 328 "Maximum number of mbufs available"); 329 330 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD, 331 &m_defragpackets, 0, ""); 332 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD, 333 &m_defragbytes, 0, ""); 334 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD, 335 &m_defraguseless, 0, ""); 336 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD, 337 &m_defragfailure, 0, ""); 338 #ifdef MBUF_STRESS_TEST 339 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW, 340 &m_defragrandomfailures, 0, ""); 341 #endif 342 343 static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf"); 344 static MALLOC_DEFINE(M_MBUFCL, "mbufcl", "mbufcl"); 345 static MALLOC_DEFINE(M_MCLMETA, "mclmeta", "mclmeta"); 346 347 static void m_reclaim (void); 348 static void m_mclref(void *arg); 349 static void m_mclfree(void *arg); 350 351 #ifndef NMBCLUSTERS 352 #define NMBCLUSTERS (512 + maxusers * 16) 353 #endif 354 #ifndef NMBUFS 355 #define NMBUFS (nmbclusters * 2) 356 #endif 357 358 /* 359 * Perform sanity checks of tunables declared above. 360 */ 361 static void 362 tunable_mbinit(void *dummy) 363 { 364 /* 365 * This has to be done before VM init. 366 */ 367 nmbclusters = NMBCLUSTERS; 368 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters); 369 nmbufs = NMBUFS; 370 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs); 371 /* Sanity checks */ 372 if (nmbufs < nmbclusters * 2) 373 nmbufs = nmbclusters * 2; 374 } 375 SYSINIT(tunable_mbinit, SI_BOOT1_TUNABLES, SI_ORDER_ANY, 376 tunable_mbinit, NULL); 377 378 /* "number of clusters of pages" */ 379 #define NCL_INIT 1 380 381 #define NMB_INIT 16 382 383 /* 384 * The mbuf object cache only guarantees that m_next and m_nextpkt are 385 * NULL and that m_data points to the beginning of the data area. In 386 * particular, m_len and m_pkthdr.len are uninitialized. It is the 387 * responsibility of the caller to initialize those fields before use. 388 */ 389 390 static boolean_t __inline 391 mbuf_ctor(void *obj, void *private, int ocflags) 392 { 393 struct mbuf *m = obj; 394 395 m->m_next = NULL; 396 m->m_nextpkt = NULL; 397 m->m_data = m->m_dat; 398 m->m_flags = 0; 399 400 return (TRUE); 401 } 402 403 /* 404 * Initialize the mbuf and the packet header fields. 405 */ 406 static boolean_t 407 mbufphdr_ctor(void *obj, void *private, int ocflags) 408 { 409 struct mbuf *m = obj; 410 411 m->m_next = NULL; 412 m->m_nextpkt = NULL; 413 m->m_data = m->m_pktdat; 414 m->m_flags = M_PKTHDR | M_PHCACHE; 415 416 m->m_pkthdr.rcvif = NULL; /* eliminate XXX JH */ 417 SLIST_INIT(&m->m_pkthdr.tags); 418 m->m_pkthdr.csum_flags = 0; /* eliminate XXX JH */ 419 m->m_pkthdr.fw_flags = 0; /* eliminate XXX JH */ 420 421 return (TRUE); 422 } 423 424 /* 425 * A mbcluster object consists of 2K (MCLBYTES) cluster and a refcount. 426 */ 427 static boolean_t 428 mclmeta_ctor(void *obj, void *private, int ocflags) 429 { 430 struct mbcluster *cl = obj; 431 void *buf; 432 433 if (ocflags & M_NOWAIT) 434 buf = kmalloc(MCLBYTES, M_MBUFCL, M_NOWAIT | M_ZERO); 435 else 436 buf = kmalloc(MCLBYTES, M_MBUFCL, M_INTWAIT | M_ZERO); 437 if (buf == NULL) 438 return (FALSE); 439 cl->mcl_refs = 0; 440 cl->mcl_data = buf; 441 return (TRUE); 442 } 443 444 static void 445 mclmeta_dtor(void *obj, void *private) 446 { 447 struct mbcluster *mcl = obj; 448 449 KKASSERT(mcl->mcl_refs == 0); 450 kfree(mcl->mcl_data, M_MBUFCL); 451 } 452 453 static void 454 linkcluster(struct mbuf *m, struct mbcluster *cl) 455 { 456 /* 457 * Add the cluster to the mbuf. The caller will detect that the 458 * mbuf now has an attached cluster. 459 */ 460 m->m_ext.ext_arg = cl; 461 m->m_ext.ext_buf = cl->mcl_data; 462 m->m_ext.ext_ref = m_mclref; 463 m->m_ext.ext_free = m_mclfree; 464 m->m_ext.ext_size = MCLBYTES; 465 atomic_add_int(&cl->mcl_refs, 1); 466 467 m->m_data = m->m_ext.ext_buf; 468 m->m_flags |= M_EXT | M_EXT_CLUSTER; 469 } 470 471 static boolean_t 472 mbufphdrcluster_ctor(void *obj, void *private, int ocflags) 473 { 474 struct mbuf *m = obj; 475 struct mbcluster *cl; 476 477 mbufphdr_ctor(obj, private, ocflags); 478 cl = objcache_get(mclmeta_cache, ocflags); 479 if (cl == NULL) 480 return (FALSE); 481 m->m_flags |= M_CLCACHE; 482 linkcluster(m, cl); 483 return (TRUE); 484 } 485 486 static boolean_t 487 mbufcluster_ctor(void *obj, void *private, int ocflags) 488 { 489 struct mbuf *m = obj; 490 struct mbcluster *cl; 491 492 mbuf_ctor(obj, private, ocflags); 493 cl = objcache_get(mclmeta_cache, ocflags); 494 if (cl == NULL) 495 return (FALSE); 496 m->m_flags |= M_CLCACHE; 497 linkcluster(m, cl); 498 return (TRUE); 499 } 500 501 /* 502 * Used for both the cluster and cluster PHDR caches. 503 * 504 * The mbuf may have lost its cluster due to sharing, deal 505 * with the situation by checking M_EXT. 506 */ 507 static void 508 mbufcluster_dtor(void *obj, void *private) 509 { 510 struct mbuf *m = obj; 511 struct mbcluster *mcl; 512 513 if (m->m_flags & M_EXT) { 514 KKASSERT((m->m_flags & M_EXT_CLUSTER) != 0); 515 mcl = m->m_ext.ext_arg; 516 KKASSERT(mcl->mcl_refs == 1); 517 mcl->mcl_refs = 0; 518 objcache_put(mclmeta_cache, mcl); 519 } 520 } 521 522 struct objcache_malloc_args mbuf_malloc_args = { MSIZE, M_MBUF }; 523 struct objcache_malloc_args mclmeta_malloc_args = 524 { sizeof(struct mbcluster), M_MCLMETA }; 525 526 /* ARGSUSED*/ 527 static void 528 mbinit(void *dummy) 529 { 530 int mb_limit, cl_limit; 531 int limit; 532 int i; 533 534 /* 535 * Initialize statistics 536 */ 537 for (i = 0; i < ncpus; i++) { 538 atomic_set_long_nonlocked(&mbstat[i].m_msize, MSIZE); 539 atomic_set_long_nonlocked(&mbstat[i].m_mclbytes, MCLBYTES); 540 atomic_set_long_nonlocked(&mbstat[i].m_minclsize, MINCLSIZE); 541 atomic_set_long_nonlocked(&mbstat[i].m_mlen, MLEN); 542 atomic_set_long_nonlocked(&mbstat[i].m_mhlen, MHLEN); 543 } 544 545 /* 546 * Create objtect caches and save cluster limits, which will 547 * be used to adjust backing kmalloc pools' limit later. 548 */ 549 550 mb_limit = cl_limit = 0; 551 552 limit = nmbufs; 553 mbuf_cache = objcache_create("mbuf", &limit, 0, 554 mbuf_ctor, NULL, NULL, 555 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); 556 mb_limit += limit; 557 558 limit = nmbufs; 559 mbufphdr_cache = objcache_create("mbuf pkt hdr", &limit, 64, 560 mbufphdr_ctor, NULL, NULL, 561 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); 562 mb_limit += limit; 563 564 cl_limit = nmbclusters; 565 mclmeta_cache = objcache_create("cluster mbuf", &cl_limit, 0, 566 mclmeta_ctor, mclmeta_dtor, NULL, 567 objcache_malloc_alloc, objcache_malloc_free, &mclmeta_malloc_args); 568 569 limit = nmbclusters; 570 mbufcluster_cache = objcache_create("mbuf + cluster", &limit, 0, 571 mbufcluster_ctor, mbufcluster_dtor, NULL, 572 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); 573 mb_limit += limit; 574 575 limit = nmbclusters; 576 mbufphdrcluster_cache = objcache_create("mbuf pkt hdr + cluster", 577 &limit, 64, mbufphdrcluster_ctor, mbufcluster_dtor, NULL, 578 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); 579 mb_limit += limit; 580 581 /* 582 * Adjust backing kmalloc pools' limit 583 * 584 * NOTE: We raise the limit by another 1/8 to take the effect 585 * of loosememuse into account. 586 */ 587 cl_limit += cl_limit / 8; 588 kmalloc_raise_limit(mclmeta_malloc_args.mtype, 589 mclmeta_malloc_args.objsize * cl_limit); 590 kmalloc_raise_limit(M_MBUFCL, MCLBYTES * cl_limit); 591 592 mb_limit += mb_limit / 8; 593 kmalloc_raise_limit(mbuf_malloc_args.mtype, 594 mbuf_malloc_args.objsize * mb_limit); 595 } 596 597 /* 598 * Return the number of references to this mbuf's data. 0 is returned 599 * if the mbuf is not M_EXT, a reference count is returned if it is 600 * M_EXT | M_EXT_CLUSTER, and 99 is returned if it is a special M_EXT. 601 */ 602 int 603 m_sharecount(struct mbuf *m) 604 { 605 switch (m->m_flags & (M_EXT | M_EXT_CLUSTER)) { 606 case 0: 607 return (0); 608 case M_EXT: 609 return (99); 610 case M_EXT | M_EXT_CLUSTER: 611 return (((struct mbcluster *)m->m_ext.ext_arg)->mcl_refs); 612 } 613 /* NOTREACHED */ 614 return (0); /* to shut up compiler */ 615 } 616 617 /* 618 * change mbuf to new type 619 */ 620 void 621 m_chtype(struct mbuf *m, int type) 622 { 623 struct globaldata *gd = mycpu; 624 625 atomic_add_long_nonlocked(&mbtypes[gd->gd_cpuid][type], 1); 626 atomic_subtract_long_nonlocked(&mbtypes[gd->gd_cpuid][m->m_type], 1); 627 atomic_set_short_nonlocked(&m->m_type, type); 628 } 629 630 static void 631 m_reclaim(void) 632 { 633 struct domain *dp; 634 struct protosw *pr; 635 636 crit_enter(); 637 SLIST_FOREACH(dp, &domains, dom_next) { 638 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) { 639 if (pr->pr_drain) 640 (*pr->pr_drain)(); 641 } 642 } 643 crit_exit(); 644 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_drain, 1); 645 } 646 647 static void __inline 648 updatestats(struct mbuf *m, int type) 649 { 650 struct globaldata *gd = mycpu; 651 m->m_type = type; 652 653 mbuftrack(m); 654 655 atomic_add_long_nonlocked(&mbtypes[gd->gd_cpuid][type], 1); 656 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mbufs, 1); 657 658 } 659 660 /* 661 * Allocate an mbuf. 662 */ 663 struct mbuf * 664 m_get(int how, int type) 665 { 666 struct mbuf *m; 667 int ntries = 0; 668 int ocf = MBTOM(how); 669 670 retryonce: 671 672 m = objcache_get(mbuf_cache, ocf); 673 674 if (m == NULL) { 675 if ((how & MB_TRYWAIT) && ntries++ == 0) { 676 struct objcache *reclaimlist[] = { 677 mbufphdr_cache, 678 mbufcluster_cache, mbufphdrcluster_cache 679 }; 680 const int nreclaims = __arysize(reclaimlist); 681 682 if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf)) 683 m_reclaim(); 684 goto retryonce; 685 } 686 return (NULL); 687 } 688 689 updatestats(m, type); 690 return (m); 691 } 692 693 struct mbuf * 694 m_gethdr(int how, int type) 695 { 696 struct mbuf *m; 697 int ocf = MBTOM(how); 698 int ntries = 0; 699 700 retryonce: 701 702 m = objcache_get(mbufphdr_cache, ocf); 703 704 if (m == NULL) { 705 if ((how & MB_TRYWAIT) && ntries++ == 0) { 706 struct objcache *reclaimlist[] = { 707 mbuf_cache, 708 mbufcluster_cache, mbufphdrcluster_cache 709 }; 710 const int nreclaims = __arysize(reclaimlist); 711 712 if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf)) 713 m_reclaim(); 714 goto retryonce; 715 } 716 return (NULL); 717 } 718 719 updatestats(m, type); 720 return (m); 721 } 722 723 /* 724 * Get a mbuf (not a mbuf cluster!) and zero it. 725 * Deprecated. 726 */ 727 struct mbuf * 728 m_getclr(int how, int type) 729 { 730 struct mbuf *m; 731 732 m = m_get(how, type); 733 if (m != NULL) 734 bzero(m->m_data, MLEN); 735 return (m); 736 } 737 738 /* 739 * Returns an mbuf with an attached cluster. 740 * Because many network drivers use this kind of buffers a lot, it is 741 * convenient to keep a small pool of free buffers of this kind. 742 * Even a small size such as 10 gives about 10% improvement in the 743 * forwarding rate in a bridge or router. 744 */ 745 struct mbuf * 746 m_getcl(int how, short type, int flags) 747 { 748 struct mbuf *m; 749 int ocflags = MBTOM(how); 750 int ntries = 0; 751 752 retryonce: 753 754 if (flags & M_PKTHDR) 755 m = objcache_get(mbufphdrcluster_cache, ocflags); 756 else 757 m = objcache_get(mbufcluster_cache, ocflags); 758 759 if (m == NULL) { 760 if ((how & MB_TRYWAIT) && ntries++ == 0) { 761 struct objcache *reclaimlist[1]; 762 763 if (flags & M_PKTHDR) 764 reclaimlist[0] = mbufcluster_cache; 765 else 766 reclaimlist[0] = mbufphdrcluster_cache; 767 if (!objcache_reclaimlist(reclaimlist, 1, ocflags)) 768 m_reclaim(); 769 goto retryonce; 770 } 771 return (NULL); 772 } 773 774 m->m_type = type; 775 776 mbuftrack(m); 777 778 atomic_add_long_nonlocked(&mbtypes[mycpu->gd_cpuid][type], 1); 779 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1); 780 return (m); 781 } 782 783 /* 784 * Allocate chain of requested length. 785 */ 786 struct mbuf * 787 m_getc(int len, int how, int type) 788 { 789 struct mbuf *n, *nfirst = NULL, **ntail = &nfirst; 790 int nsize; 791 792 while (len > 0) { 793 n = m_getl(len, how, type, 0, &nsize); 794 if (n == NULL) 795 goto failed; 796 n->m_len = 0; 797 *ntail = n; 798 ntail = &n->m_next; 799 len -= nsize; 800 } 801 return (nfirst); 802 803 failed: 804 m_freem(nfirst); 805 return (NULL); 806 } 807 808 /* 809 * Allocate len-worth of mbufs and/or mbuf clusters (whatever fits best) 810 * and return a pointer to the head of the allocated chain. If m0 is 811 * non-null, then we assume that it is a single mbuf or an mbuf chain to 812 * which we want len bytes worth of mbufs and/or clusters attached, and so 813 * if we succeed in allocating it, we will just return a pointer to m0. 814 * 815 * If we happen to fail at any point during the allocation, we will free 816 * up everything we have already allocated and return NULL. 817 * 818 * Deprecated. Use m_getc() and m_cat() instead. 819 */ 820 struct mbuf * 821 m_getm(struct mbuf *m0, int len, int type, int how) 822 { 823 struct mbuf *nfirst; 824 825 nfirst = m_getc(len, how, type); 826 827 if (m0 != NULL) { 828 m_last(m0)->m_next = nfirst; 829 return (m0); 830 } 831 832 return (nfirst); 833 } 834 835 /* 836 * Adds a cluster to a normal mbuf, M_EXT is set on success. 837 * Deprecated. Use m_getcl() instead. 838 */ 839 void 840 m_mclget(struct mbuf *m, int how) 841 { 842 struct mbcluster *mcl; 843 844 KKASSERT((m->m_flags & M_EXT) == 0); 845 mcl = objcache_get(mclmeta_cache, MBTOM(how)); 846 if (mcl != NULL) { 847 linkcluster(m, mcl); 848 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1); 849 } 850 } 851 852 /* 853 * Updates to mbcluster must be MPSAFE. Only an entity which already has 854 * a reference to the cluster can ref it, so we are in no danger of 855 * racing an add with a subtract. But the operation must still be atomic 856 * since multiple entities may have a reference on the cluster. 857 * 858 * m_mclfree() is almost the same but it must contend with two entities 859 * freeing the cluster at the same time. If there is only one reference 860 * count we are the only entity referencing the cluster and no further 861 * locking is required. Otherwise we must protect against a race to 0 862 * with the serializer. 863 */ 864 static void 865 m_mclref(void *arg) 866 { 867 struct mbcluster *mcl = arg; 868 869 atomic_add_int(&mcl->mcl_refs, 1); 870 } 871 872 /* 873 * When dereferencing a cluster we have to deal with a N->0 race, where 874 * N entities free their references simultaniously. To do this we use 875 * atomic_fetchadd_int(). 876 */ 877 static void 878 m_mclfree(void *arg) 879 { 880 struct mbcluster *mcl = arg; 881 882 if (atomic_fetchadd_int(&mcl->mcl_refs, -1) == 1) 883 objcache_put(mclmeta_cache, mcl); 884 } 885 886 /* 887 * Free a single mbuf and any associated external storage. The successor, 888 * if any, is returned. 889 * 890 * We do need to check non-first mbuf for m_aux, since some of existing 891 * code does not call M_PREPEND properly. 892 * (example: call to bpf_mtap from drivers) 893 */ 894 struct mbuf * 895 m_free(struct mbuf *m) 896 { 897 struct mbuf *n; 898 struct globaldata *gd = mycpu; 899 900 KASSERT(m->m_type != MT_FREE, ("freeing free mbuf %p", m)); 901 atomic_subtract_long_nonlocked(&mbtypes[gd->gd_cpuid][m->m_type], 1); 902 903 n = m->m_next; 904 905 /* 906 * Make sure the mbuf is in constructed state before returning it 907 * to the objcache. 908 */ 909 m->m_next = NULL; 910 mbufuntrack(m); 911 #ifdef notyet 912 KKASSERT(m->m_nextpkt == NULL); 913 #else 914 if (m->m_nextpkt != NULL) { 915 static int afewtimes = 10; 916 917 if (afewtimes-- > 0) { 918 kprintf("mfree: m->m_nextpkt != NULL\n"); 919 print_backtrace(); 920 } 921 m->m_nextpkt = NULL; 922 } 923 #endif 924 if (m->m_flags & M_PKTHDR) { 925 m_tag_delete_chain(m); /* eliminate XXX JH */ 926 } 927 928 m->m_flags &= (M_EXT | M_EXT_CLUSTER | M_CLCACHE | M_PHCACHE); 929 930 /* 931 * Clean the M_PKTHDR state so we can return the mbuf to its original 932 * cache. This is based on the PHCACHE flag which tells us whether 933 * the mbuf was originally allocated out of a packet-header cache 934 * or a non-packet-header cache. 935 */ 936 if (m->m_flags & M_PHCACHE) { 937 m->m_flags |= M_PKTHDR; 938 m->m_pkthdr.rcvif = NULL; /* eliminate XXX JH */ 939 m->m_pkthdr.csum_flags = 0; /* eliminate XXX JH */ 940 m->m_pkthdr.fw_flags = 0; /* eliminate XXX JH */ 941 SLIST_INIT(&m->m_pkthdr.tags); 942 } 943 944 /* 945 * Handle remaining flags combinations. M_CLCACHE tells us whether 946 * the mbuf was originally allocated from a cluster cache or not, 947 * and is totally separate from whether the mbuf is currently 948 * associated with a cluster. 949 */ 950 crit_enter(); 951 switch(m->m_flags & (M_CLCACHE | M_EXT | M_EXT_CLUSTER)) { 952 case M_CLCACHE | M_EXT | M_EXT_CLUSTER: 953 /* 954 * mbuf+cluster cache case. The mbuf was allocated from the 955 * combined mbuf_cluster cache and can be returned to the 956 * cache if the cluster hasn't been shared. 957 */ 958 if (m_sharecount(m) == 1) { 959 /* 960 * The cluster has not been shared, we can just 961 * reset the data pointer and return the mbuf 962 * to the cluster cache. Note that the reference 963 * count is left intact (it is still associated with 964 * an mbuf). 965 */ 966 m->m_data = m->m_ext.ext_buf; 967 if (m->m_flags & M_PHCACHE) 968 objcache_put(mbufphdrcluster_cache, m); 969 else 970 objcache_put(mbufcluster_cache, m); 971 atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1); 972 } else { 973 /* 974 * Hell. Someone else has a ref on this cluster, 975 * we have to disconnect it which means we can't 976 * put it back into the mbufcluster_cache, we 977 * have to destroy the mbuf. 978 * 979 * Other mbuf references to the cluster will typically 980 * be M_EXT | M_EXT_CLUSTER but without M_CLCACHE. 981 * 982 * XXX we could try to connect another cluster to 983 * it. 984 */ 985 m->m_ext.ext_free(m->m_ext.ext_arg); 986 m->m_flags &= ~(M_EXT | M_EXT_CLUSTER); 987 if (m->m_flags & M_PHCACHE) 988 objcache_dtor(mbufphdrcluster_cache, m); 989 else 990 objcache_dtor(mbufcluster_cache, m); 991 } 992 break; 993 case M_EXT | M_EXT_CLUSTER: 994 /* 995 * Normal cluster associated with an mbuf that was allocated 996 * from the normal mbuf pool rather then the cluster pool. 997 * The cluster has to be independantly disassociated from the 998 * mbuf. 999 */ 1000 if (m_sharecount(m) == 1) 1001 atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1); 1002 /* fall through */ 1003 case M_EXT: 1004 /* 1005 * Normal cluster association case, disconnect the cluster from 1006 * the mbuf. The cluster may or may not be custom. 1007 */ 1008 m->m_ext.ext_free(m->m_ext.ext_arg); 1009 m->m_flags &= ~(M_EXT | M_EXT_CLUSTER); 1010 /* fall through */ 1011 case 0: 1012 /* 1013 * return the mbuf to the mbuf cache. 1014 */ 1015 if (m->m_flags & M_PHCACHE) { 1016 m->m_data = m->m_pktdat; 1017 objcache_put(mbufphdr_cache, m); 1018 } else { 1019 m->m_data = m->m_dat; 1020 objcache_put(mbuf_cache, m); 1021 } 1022 atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mbufs, 1); 1023 break; 1024 default: 1025 if (!panicstr) 1026 panic("bad mbuf flags %p %08x\n", m, m->m_flags); 1027 break; 1028 } 1029 crit_exit(); 1030 return (n); 1031 } 1032 1033 void 1034 m_freem(struct mbuf *m) 1035 { 1036 crit_enter(); 1037 while (m) 1038 m = m_free(m); 1039 crit_exit(); 1040 } 1041 1042 /* 1043 * mbuf utility routines 1044 */ 1045 1046 /* 1047 * Lesser-used path for M_PREPEND: allocate new mbuf to prepend to chain and 1048 * copy junk along. 1049 */ 1050 struct mbuf * 1051 m_prepend(struct mbuf *m, int len, int how) 1052 { 1053 struct mbuf *mn; 1054 1055 if (m->m_flags & M_PKTHDR) 1056 mn = m_gethdr(how, m->m_type); 1057 else 1058 mn = m_get(how, m->m_type); 1059 if (mn == NULL) { 1060 m_freem(m); 1061 return (NULL); 1062 } 1063 if (m->m_flags & M_PKTHDR) 1064 M_MOVE_PKTHDR(mn, m); 1065 mn->m_next = m; 1066 m = mn; 1067 if (len < MHLEN) 1068 MH_ALIGN(m, len); 1069 m->m_len = len; 1070 return (m); 1071 } 1072 1073 /* 1074 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 1075 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 1076 * The wait parameter is a choice of MB_WAIT/MB_DONTWAIT from caller. 1077 * Note that the copy is read-only, because clusters are not copied, 1078 * only their reference counts are incremented. 1079 */ 1080 struct mbuf * 1081 m_copym(const struct mbuf *m, int off0, int len, int wait) 1082 { 1083 struct mbuf *n, **np; 1084 int off = off0; 1085 struct mbuf *top; 1086 int copyhdr = 0; 1087 1088 KASSERT(off >= 0, ("m_copym, negative off %d", off)); 1089 KASSERT(len >= 0, ("m_copym, negative len %d", len)); 1090 if (off == 0 && m->m_flags & M_PKTHDR) 1091 copyhdr = 1; 1092 while (off > 0) { 1093 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain")); 1094 if (off < m->m_len) 1095 break; 1096 off -= m->m_len; 1097 m = m->m_next; 1098 } 1099 np = ⊤ 1100 top = 0; 1101 while (len > 0) { 1102 if (m == NULL) { 1103 KASSERT(len == M_COPYALL, 1104 ("m_copym, length > size of mbuf chain")); 1105 break; 1106 } 1107 /* 1108 * Because we are sharing any cluster attachment below, 1109 * be sure to get an mbuf that does not have a cluster 1110 * associated with it. 1111 */ 1112 if (copyhdr) 1113 n = m_gethdr(wait, m->m_type); 1114 else 1115 n = m_get(wait, m->m_type); 1116 *np = n; 1117 if (n == NULL) 1118 goto nospace; 1119 if (copyhdr) { 1120 if (!m_dup_pkthdr(n, m, wait)) 1121 goto nospace; 1122 if (len == M_COPYALL) 1123 n->m_pkthdr.len -= off0; 1124 else 1125 n->m_pkthdr.len = len; 1126 copyhdr = 0; 1127 } 1128 n->m_len = min(len, m->m_len - off); 1129 if (m->m_flags & M_EXT) { 1130 KKASSERT((n->m_flags & M_EXT) == 0); 1131 n->m_data = m->m_data + off; 1132 m->m_ext.ext_ref(m->m_ext.ext_arg); 1133 n->m_ext = m->m_ext; 1134 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER); 1135 } else { 1136 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 1137 (unsigned)n->m_len); 1138 } 1139 if (len != M_COPYALL) 1140 len -= n->m_len; 1141 off = 0; 1142 m = m->m_next; 1143 np = &n->m_next; 1144 } 1145 if (top == NULL) 1146 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1); 1147 return (top); 1148 nospace: 1149 m_freem(top); 1150 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1); 1151 return (NULL); 1152 } 1153 1154 /* 1155 * Copy an entire packet, including header (which must be present). 1156 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'. 1157 * Note that the copy is read-only, because clusters are not copied, 1158 * only their reference counts are incremented. 1159 * Preserve alignment of the first mbuf so if the creator has left 1160 * some room at the beginning (e.g. for inserting protocol headers) 1161 * the copies also have the room available. 1162 */ 1163 struct mbuf * 1164 m_copypacket(struct mbuf *m, int how) 1165 { 1166 struct mbuf *top, *n, *o; 1167 1168 n = m_gethdr(how, m->m_type); 1169 top = n; 1170 if (!n) 1171 goto nospace; 1172 1173 if (!m_dup_pkthdr(n, m, how)) 1174 goto nospace; 1175 n->m_len = m->m_len; 1176 if (m->m_flags & M_EXT) { 1177 KKASSERT((n->m_flags & M_EXT) == 0); 1178 n->m_data = m->m_data; 1179 m->m_ext.ext_ref(m->m_ext.ext_arg); 1180 n->m_ext = m->m_ext; 1181 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER); 1182 } else { 1183 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat ); 1184 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 1185 } 1186 1187 m = m->m_next; 1188 while (m) { 1189 o = m_get(how, m->m_type); 1190 if (!o) 1191 goto nospace; 1192 1193 n->m_next = o; 1194 n = n->m_next; 1195 1196 n->m_len = m->m_len; 1197 if (m->m_flags & M_EXT) { 1198 KKASSERT((n->m_flags & M_EXT) == 0); 1199 n->m_data = m->m_data; 1200 m->m_ext.ext_ref(m->m_ext.ext_arg); 1201 n->m_ext = m->m_ext; 1202 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER); 1203 } else { 1204 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 1205 } 1206 1207 m = m->m_next; 1208 } 1209 return top; 1210 nospace: 1211 m_freem(top); 1212 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1); 1213 return (NULL); 1214 } 1215 1216 /* 1217 * Copy data from an mbuf chain starting "off" bytes from the beginning, 1218 * continuing for "len" bytes, into the indicated buffer. 1219 */ 1220 void 1221 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp) 1222 { 1223 unsigned count; 1224 1225 KASSERT(off >= 0, ("m_copydata, negative off %d", off)); 1226 KASSERT(len >= 0, ("m_copydata, negative len %d", len)); 1227 while (off > 0) { 1228 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain")); 1229 if (off < m->m_len) 1230 break; 1231 off -= m->m_len; 1232 m = m->m_next; 1233 } 1234 while (len > 0) { 1235 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain")); 1236 count = min(m->m_len - off, len); 1237 bcopy(mtod(m, caddr_t) + off, cp, count); 1238 len -= count; 1239 cp += count; 1240 off = 0; 1241 m = m->m_next; 1242 } 1243 } 1244 1245 /* 1246 * Copy a packet header mbuf chain into a completely new chain, including 1247 * copying any mbuf clusters. Use this instead of m_copypacket() when 1248 * you need a writable copy of an mbuf chain. 1249 */ 1250 struct mbuf * 1251 m_dup(struct mbuf *m, int how) 1252 { 1253 struct mbuf **p, *top = NULL; 1254 int remain, moff, nsize; 1255 1256 /* Sanity check */ 1257 if (m == NULL) 1258 return (NULL); 1259 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __func__)); 1260 1261 /* While there's more data, get a new mbuf, tack it on, and fill it */ 1262 remain = m->m_pkthdr.len; 1263 moff = 0; 1264 p = ⊤ 1265 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */ 1266 struct mbuf *n; 1267 1268 /* Get the next new mbuf */ 1269 n = m_getl(remain, how, m->m_type, top == NULL ? M_PKTHDR : 0, 1270 &nsize); 1271 if (n == NULL) 1272 goto nospace; 1273 if (top == NULL) 1274 if (!m_dup_pkthdr(n, m, how)) 1275 goto nospace0; 1276 1277 /* Link it into the new chain */ 1278 *p = n; 1279 p = &n->m_next; 1280 1281 /* Copy data from original mbuf(s) into new mbuf */ 1282 n->m_len = 0; 1283 while (n->m_len < nsize && m != NULL) { 1284 int chunk = min(nsize - n->m_len, m->m_len - moff); 1285 1286 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk); 1287 moff += chunk; 1288 n->m_len += chunk; 1289 remain -= chunk; 1290 if (moff == m->m_len) { 1291 m = m->m_next; 1292 moff = 0; 1293 } 1294 } 1295 1296 /* Check correct total mbuf length */ 1297 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL), 1298 ("%s: bogus m_pkthdr.len", __func__)); 1299 } 1300 return (top); 1301 1302 nospace: 1303 m_freem(top); 1304 nospace0: 1305 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1); 1306 return (NULL); 1307 } 1308 1309 /* 1310 * Concatenate mbuf chain n to m. 1311 * Both chains must be of the same type (e.g. MT_DATA). 1312 * Any m_pkthdr is not updated. 1313 */ 1314 void 1315 m_cat(struct mbuf *m, struct mbuf *n) 1316 { 1317 m = m_last(m); 1318 while (n) { 1319 if (m->m_flags & M_EXT || 1320 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 1321 /* just join the two chains */ 1322 m->m_next = n; 1323 return; 1324 } 1325 /* splat the data from one into the other */ 1326 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 1327 (u_int)n->m_len); 1328 m->m_len += n->m_len; 1329 n = m_free(n); 1330 } 1331 } 1332 1333 void 1334 m_adj(struct mbuf *mp, int req_len) 1335 { 1336 int len = req_len; 1337 struct mbuf *m; 1338 int count; 1339 1340 if ((m = mp) == NULL) 1341 return; 1342 if (len >= 0) { 1343 /* 1344 * Trim from head. 1345 */ 1346 while (m != NULL && len > 0) { 1347 if (m->m_len <= len) { 1348 len -= m->m_len; 1349 m->m_len = 0; 1350 m = m->m_next; 1351 } else { 1352 m->m_len -= len; 1353 m->m_data += len; 1354 len = 0; 1355 } 1356 } 1357 m = mp; 1358 if (mp->m_flags & M_PKTHDR) 1359 m->m_pkthdr.len -= (req_len - len); 1360 } else { 1361 /* 1362 * Trim from tail. Scan the mbuf chain, 1363 * calculating its length and finding the last mbuf. 1364 * If the adjustment only affects this mbuf, then just 1365 * adjust and return. Otherwise, rescan and truncate 1366 * after the remaining size. 1367 */ 1368 len = -len; 1369 count = 0; 1370 for (;;) { 1371 count += m->m_len; 1372 if (m->m_next == NULL) 1373 break; 1374 m = m->m_next; 1375 } 1376 if (m->m_len >= len) { 1377 m->m_len -= len; 1378 if (mp->m_flags & M_PKTHDR) 1379 mp->m_pkthdr.len -= len; 1380 return; 1381 } 1382 count -= len; 1383 if (count < 0) 1384 count = 0; 1385 /* 1386 * Correct length for chain is "count". 1387 * Find the mbuf with last data, adjust its length, 1388 * and toss data from remaining mbufs on chain. 1389 */ 1390 m = mp; 1391 if (m->m_flags & M_PKTHDR) 1392 m->m_pkthdr.len = count; 1393 for (; m; m = m->m_next) { 1394 if (m->m_len >= count) { 1395 m->m_len = count; 1396 break; 1397 } 1398 count -= m->m_len; 1399 } 1400 while (m->m_next) 1401 (m = m->m_next) ->m_len = 0; 1402 } 1403 } 1404 1405 /* 1406 * Rearrange an mbuf chain so that len bytes are contiguous 1407 * and in the data area of an mbuf (so that mtod will work for a structure 1408 * of size len). Returns the resulting mbuf chain on success, frees it and 1409 * returns null on failure. If there is room, it will add up to 1410 * max_protohdr-len extra bytes to the contiguous region in an attempt to 1411 * avoid being called next time. 1412 */ 1413 struct mbuf * 1414 m_pullup(struct mbuf *n, int len) 1415 { 1416 struct mbuf *m; 1417 int count; 1418 int space; 1419 1420 /* 1421 * If first mbuf has no cluster, and has room for len bytes 1422 * without shifting current data, pullup into it, 1423 * otherwise allocate a new mbuf to prepend to the chain. 1424 */ 1425 if (!(n->m_flags & M_EXT) && 1426 n->m_data + len < &n->m_dat[MLEN] && 1427 n->m_next) { 1428 if (n->m_len >= len) 1429 return (n); 1430 m = n; 1431 n = n->m_next; 1432 len -= m->m_len; 1433 } else { 1434 if (len > MHLEN) 1435 goto bad; 1436 if (n->m_flags & M_PKTHDR) 1437 m = m_gethdr(MB_DONTWAIT, n->m_type); 1438 else 1439 m = m_get(MB_DONTWAIT, n->m_type); 1440 if (m == NULL) 1441 goto bad; 1442 m->m_len = 0; 1443 if (n->m_flags & M_PKTHDR) 1444 M_MOVE_PKTHDR(m, n); 1445 } 1446 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 1447 do { 1448 count = min(min(max(len, max_protohdr), space), n->m_len); 1449 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 1450 (unsigned)count); 1451 len -= count; 1452 m->m_len += count; 1453 n->m_len -= count; 1454 space -= count; 1455 if (n->m_len) 1456 n->m_data += count; 1457 else 1458 n = m_free(n); 1459 } while (len > 0 && n); 1460 if (len > 0) { 1461 m_free(m); 1462 goto bad; 1463 } 1464 m->m_next = n; 1465 return (m); 1466 bad: 1467 m_freem(n); 1468 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1); 1469 return (NULL); 1470 } 1471 1472 /* 1473 * Partition an mbuf chain in two pieces, returning the tail -- 1474 * all but the first len0 bytes. In case of failure, it returns NULL and 1475 * attempts to restore the chain to its original state. 1476 * 1477 * Note that the resulting mbufs might be read-only, because the new 1478 * mbuf can end up sharing an mbuf cluster with the original mbuf if 1479 * the "breaking point" happens to lie within a cluster mbuf. Use the 1480 * M_WRITABLE() macro to check for this case. 1481 */ 1482 struct mbuf * 1483 m_split(struct mbuf *m0, int len0, int wait) 1484 { 1485 struct mbuf *m, *n; 1486 unsigned len = len0, remain; 1487 1488 for (m = m0; m && len > m->m_len; m = m->m_next) 1489 len -= m->m_len; 1490 if (m == NULL) 1491 return (NULL); 1492 remain = m->m_len - len; 1493 if (m0->m_flags & M_PKTHDR) { 1494 n = m_gethdr(wait, m0->m_type); 1495 if (n == NULL) 1496 return (NULL); 1497 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 1498 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 1499 m0->m_pkthdr.len = len0; 1500 if (m->m_flags & M_EXT) 1501 goto extpacket; 1502 if (remain > MHLEN) { 1503 /* m can't be the lead packet */ 1504 MH_ALIGN(n, 0); 1505 n->m_next = m_split(m, len, wait); 1506 if (n->m_next == NULL) { 1507 m_free(n); 1508 return (NULL); 1509 } else { 1510 n->m_len = 0; 1511 return (n); 1512 } 1513 } else 1514 MH_ALIGN(n, remain); 1515 } else if (remain == 0) { 1516 n = m->m_next; 1517 m->m_next = 0; 1518 return (n); 1519 } else { 1520 n = m_get(wait, m->m_type); 1521 if (n == NULL) 1522 return (NULL); 1523 M_ALIGN(n, remain); 1524 } 1525 extpacket: 1526 if (m->m_flags & M_EXT) { 1527 KKASSERT((n->m_flags & M_EXT) == 0); 1528 n->m_data = m->m_data + len; 1529 m->m_ext.ext_ref(m->m_ext.ext_arg); 1530 n->m_ext = m->m_ext; 1531 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER); 1532 } else { 1533 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 1534 } 1535 n->m_len = remain; 1536 m->m_len = len; 1537 n->m_next = m->m_next; 1538 m->m_next = 0; 1539 return (n); 1540 } 1541 1542 /* 1543 * Routine to copy from device local memory into mbufs. 1544 * Note: "offset" is ill-defined and always called as 0, so ignore it. 1545 */ 1546 struct mbuf * 1547 m_devget(char *buf, int len, int offset, struct ifnet *ifp, 1548 void (*copy)(volatile const void *from, volatile void *to, size_t length)) 1549 { 1550 struct mbuf *m, *mfirst = NULL, **mtail; 1551 int nsize, flags; 1552 1553 if (copy == NULL) 1554 copy = bcopy; 1555 mtail = &mfirst; 1556 flags = M_PKTHDR; 1557 1558 while (len > 0) { 1559 m = m_getl(len, MB_DONTWAIT, MT_DATA, flags, &nsize); 1560 if (m == NULL) { 1561 m_freem(mfirst); 1562 return (NULL); 1563 } 1564 m->m_len = min(len, nsize); 1565 1566 if (flags & M_PKTHDR) { 1567 if (len + max_linkhdr <= nsize) 1568 m->m_data += max_linkhdr; 1569 m->m_pkthdr.rcvif = ifp; 1570 m->m_pkthdr.len = len; 1571 flags = 0; 1572 } 1573 1574 copy(buf, m->m_data, (unsigned)m->m_len); 1575 buf += m->m_len; 1576 len -= m->m_len; 1577 *mtail = m; 1578 mtail = &m->m_next; 1579 } 1580 1581 return (mfirst); 1582 } 1583 1584 /* 1585 * Routine to pad mbuf to the specified length 'padto'. 1586 */ 1587 int 1588 m_devpad(struct mbuf *m, int padto) 1589 { 1590 struct mbuf *last = NULL; 1591 int padlen; 1592 1593 if (padto <= m->m_pkthdr.len) 1594 return 0; 1595 1596 padlen = padto - m->m_pkthdr.len; 1597 1598 /* if there's only the packet-header and we can pad there, use it. */ 1599 if (m->m_pkthdr.len == m->m_len && M_TRAILINGSPACE(m) >= padlen) { 1600 last = m; 1601 } else { 1602 /* 1603 * Walk packet chain to find last mbuf. We will either 1604 * pad there, or append a new mbuf and pad it 1605 */ 1606 for (last = m; last->m_next != NULL; last = last->m_next) 1607 ; /* EMPTY */ 1608 1609 /* `last' now points to last in chain. */ 1610 if (M_TRAILINGSPACE(last) < padlen) { 1611 struct mbuf *n; 1612 1613 /* Allocate new empty mbuf, pad it. Compact later. */ 1614 MGET(n, MB_DONTWAIT, MT_DATA); 1615 if (n == NULL) 1616 return ENOBUFS; 1617 n->m_len = 0; 1618 last->m_next = n; 1619 last = n; 1620 } 1621 } 1622 KKASSERT(M_TRAILINGSPACE(last) >= padlen); 1623 KKASSERT(M_WRITABLE(last)); 1624 1625 /* Now zero the pad area */ 1626 bzero(mtod(last, char *) + last->m_len, padlen); 1627 last->m_len += padlen; 1628 m->m_pkthdr.len += padlen; 1629 return 0; 1630 } 1631 1632 /* 1633 * Copy data from a buffer back into the indicated mbuf chain, 1634 * starting "off" bytes from the beginning, extending the mbuf 1635 * chain if necessary. 1636 */ 1637 void 1638 m_copyback(struct mbuf *m0, int off, int len, caddr_t cp) 1639 { 1640 int mlen; 1641 struct mbuf *m = m0, *n; 1642 int totlen = 0; 1643 1644 if (m0 == NULL) 1645 return; 1646 while (off > (mlen = m->m_len)) { 1647 off -= mlen; 1648 totlen += mlen; 1649 if (m->m_next == NULL) { 1650 n = m_getclr(MB_DONTWAIT, m->m_type); 1651 if (n == NULL) 1652 goto out; 1653 n->m_len = min(MLEN, len + off); 1654 m->m_next = n; 1655 } 1656 m = m->m_next; 1657 } 1658 while (len > 0) { 1659 mlen = min (m->m_len - off, len); 1660 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen); 1661 cp += mlen; 1662 len -= mlen; 1663 mlen += off; 1664 off = 0; 1665 totlen += mlen; 1666 if (len == 0) 1667 break; 1668 if (m->m_next == NULL) { 1669 n = m_get(MB_DONTWAIT, m->m_type); 1670 if (n == NULL) 1671 break; 1672 n->m_len = min(MLEN, len); 1673 m->m_next = n; 1674 } 1675 m = m->m_next; 1676 } 1677 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 1678 m->m_pkthdr.len = totlen; 1679 } 1680 1681 void 1682 m_print(const struct mbuf *m) 1683 { 1684 int len; 1685 const struct mbuf *m2; 1686 1687 len = m->m_pkthdr.len; 1688 m2 = m; 1689 while (len) { 1690 kprintf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-"); 1691 len -= m2->m_len; 1692 m2 = m2->m_next; 1693 } 1694 return; 1695 } 1696 1697 /* 1698 * "Move" mbuf pkthdr from "from" to "to". 1699 * "from" must have M_PKTHDR set, and "to" must be empty. 1700 */ 1701 void 1702 m_move_pkthdr(struct mbuf *to, struct mbuf *from) 1703 { 1704 KASSERT((to->m_flags & M_PKTHDR), ("m_move_pkthdr: not packet header")); 1705 1706 to->m_flags |= from->m_flags & M_COPYFLAGS; 1707 to->m_pkthdr = from->m_pkthdr; /* especially tags */ 1708 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */ 1709 } 1710 1711 /* 1712 * Duplicate "from"'s mbuf pkthdr in "to". 1713 * "from" must have M_PKTHDR set, and "to" must be empty. 1714 * In particular, this does a deep copy of the packet tags. 1715 */ 1716 int 1717 m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how) 1718 { 1719 KASSERT((to->m_flags & M_PKTHDR), ("m_dup_pkthdr: not packet header")); 1720 1721 to->m_flags = (from->m_flags & M_COPYFLAGS) | 1722 (to->m_flags & ~M_COPYFLAGS); 1723 to->m_pkthdr = from->m_pkthdr; 1724 SLIST_INIT(&to->m_pkthdr.tags); 1725 return (m_tag_copy_chain(to, from, how)); 1726 } 1727 1728 /* 1729 * Defragment a mbuf chain, returning the shortest possible 1730 * chain of mbufs and clusters. If allocation fails and 1731 * this cannot be completed, NULL will be returned, but 1732 * the passed in chain will be unchanged. Upon success, 1733 * the original chain will be freed, and the new chain 1734 * will be returned. 1735 * 1736 * If a non-packet header is passed in, the original 1737 * mbuf (chain?) will be returned unharmed. 1738 * 1739 * m_defrag_nofree doesn't free the passed in mbuf. 1740 */ 1741 struct mbuf * 1742 m_defrag(struct mbuf *m0, int how) 1743 { 1744 struct mbuf *m_new; 1745 1746 if ((m_new = m_defrag_nofree(m0, how)) == NULL) 1747 return (NULL); 1748 if (m_new != m0) 1749 m_freem(m0); 1750 return (m_new); 1751 } 1752 1753 struct mbuf * 1754 m_defrag_nofree(struct mbuf *m0, int how) 1755 { 1756 struct mbuf *m_new = NULL, *m_final = NULL; 1757 int progress = 0, length, nsize; 1758 1759 if (!(m0->m_flags & M_PKTHDR)) 1760 return (m0); 1761 1762 #ifdef MBUF_STRESS_TEST 1763 if (m_defragrandomfailures) { 1764 int temp = karc4random() & 0xff; 1765 if (temp == 0xba) 1766 goto nospace; 1767 } 1768 #endif 1769 1770 m_final = m_getl(m0->m_pkthdr.len, how, MT_DATA, M_PKTHDR, &nsize); 1771 if (m_final == NULL) 1772 goto nospace; 1773 m_final->m_len = 0; /* in case m0->m_pkthdr.len is zero */ 1774 1775 if (m_dup_pkthdr(m_final, m0, how) == 0) 1776 goto nospace; 1777 1778 m_new = m_final; 1779 1780 while (progress < m0->m_pkthdr.len) { 1781 length = m0->m_pkthdr.len - progress; 1782 if (length > MCLBYTES) 1783 length = MCLBYTES; 1784 1785 if (m_new == NULL) { 1786 m_new = m_getl(length, how, MT_DATA, 0, &nsize); 1787 if (m_new == NULL) 1788 goto nospace; 1789 } 1790 1791 m_copydata(m0, progress, length, mtod(m_new, caddr_t)); 1792 progress += length; 1793 m_new->m_len = length; 1794 if (m_new != m_final) 1795 m_cat(m_final, m_new); 1796 m_new = NULL; 1797 } 1798 if (m0->m_next == NULL) 1799 m_defraguseless++; 1800 m_defragpackets++; 1801 m_defragbytes += m_final->m_pkthdr.len; 1802 return (m_final); 1803 nospace: 1804 m_defragfailure++; 1805 if (m_new) 1806 m_free(m_new); 1807 m_freem(m_final); 1808 return (NULL); 1809 } 1810 1811 /* 1812 * Move data from uio into mbufs. 1813 */ 1814 struct mbuf * 1815 m_uiomove(struct uio *uio) 1816 { 1817 struct mbuf *m; /* current working mbuf */ 1818 struct mbuf *head = NULL; /* result mbuf chain */ 1819 struct mbuf **mp = &head; 1820 int resid = uio->uio_resid, nsize, flags = M_PKTHDR, error; 1821 1822 do { 1823 m = m_getl(resid, MB_WAIT, MT_DATA, flags, &nsize); 1824 if (flags) { 1825 m->m_pkthdr.len = 0; 1826 /* Leave room for protocol headers. */ 1827 if (resid < MHLEN) 1828 MH_ALIGN(m, resid); 1829 flags = 0; 1830 } 1831 m->m_len = min(nsize, resid); 1832 error = uiomove(mtod(m, caddr_t), m->m_len, uio); 1833 if (error) { 1834 m_free(m); 1835 goto failed; 1836 } 1837 *mp = m; 1838 mp = &m->m_next; 1839 head->m_pkthdr.len += m->m_len; 1840 resid -= m->m_len; 1841 } while (resid > 0); 1842 1843 return (head); 1844 1845 failed: 1846 m_freem(head); 1847 return (NULL); 1848 } 1849 1850 struct mbuf * 1851 m_last(struct mbuf *m) 1852 { 1853 while (m->m_next) 1854 m = m->m_next; 1855 return (m); 1856 } 1857 1858 /* 1859 * Return the number of bytes in an mbuf chain. 1860 * If lastm is not NULL, also return the last mbuf. 1861 */ 1862 u_int 1863 m_lengthm(struct mbuf *m, struct mbuf **lastm) 1864 { 1865 u_int len = 0; 1866 struct mbuf *prev = m; 1867 1868 while (m) { 1869 len += m->m_len; 1870 prev = m; 1871 m = m->m_next; 1872 } 1873 if (lastm != NULL) 1874 *lastm = prev; 1875 return (len); 1876 } 1877 1878 /* 1879 * Like m_lengthm(), except also keep track of mbuf usage. 1880 */ 1881 u_int 1882 m_countm(struct mbuf *m, struct mbuf **lastm, u_int *pmbcnt) 1883 { 1884 u_int len = 0, mbcnt = 0; 1885 struct mbuf *prev = m; 1886 1887 while (m) { 1888 len += m->m_len; 1889 mbcnt += MSIZE; 1890 if (m->m_flags & M_EXT) 1891 mbcnt += m->m_ext.ext_size; 1892 prev = m; 1893 m = m->m_next; 1894 } 1895 if (lastm != NULL) 1896 *lastm = prev; 1897 *pmbcnt = mbcnt; 1898 return (len); 1899 } 1900