1 /* 2 * Copyright (c) 1982, 1986, 1988, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 34 * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $ 35 * $DragonFly: src/sys/kern/uipc_mbuf.c,v 1.14 2003/12/28 06:11:32 dillon Exp $ 36 */ 37 38 #include "opt_param.h" 39 #include "opt_mbuf_stress_test.h" 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/malloc.h> 43 #include <sys/mbuf.h> 44 #include <sys/kernel.h> 45 #include <sys/sysctl.h> 46 #include <sys/domain.h> 47 #include <sys/protosw.h> 48 #include <sys/thread.h> 49 #include <sys/globaldata.h> 50 51 #include <vm/vm.h> 52 #include <vm/vm_kern.h> 53 #include <vm/vm_extern.h> 54 55 #ifdef INVARIANTS 56 #include <machine/cpu.h> 57 #endif 58 59 static void mbinit (void *); 60 SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL) 61 62 struct mbuf *mbutl; 63 struct mbuf *mbute; 64 char *mclrefcnt; 65 struct mbstat mbstat; 66 u_long mbtypes[MT_NTYPES]; 67 struct mbuf *mmbfree; 68 union mcluster *mclfree; 69 int max_linkhdr; 70 int max_protohdr; 71 int max_hdr; 72 int max_datalen; 73 int m_defragpackets; 74 int m_defragbytes; 75 int m_defraguseless; 76 int m_defragfailure; 77 #ifdef MBUF_STRESS_TEST 78 int m_defragrandomfailures; 79 #endif 80 81 int nmbclusters; 82 int nmbufs; 83 u_int m_mballoc_wid = 0; 84 u_int m_clalloc_wid = 0; 85 86 SYSCTL_DECL(_kern_ipc); 87 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW, 88 &max_linkhdr, 0, ""); 89 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW, 90 &max_protohdr, 0, ""); 91 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, ""); 92 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW, 93 &max_datalen, 0, ""); 94 SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW, 95 &mbuf_wait, 0, ""); 96 SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RW, &mbstat, mbstat, ""); 97 SYSCTL_OPAQUE(_kern_ipc, OID_AUTO, mbtypes, CTLFLAG_RD, mbtypes, 98 sizeof(mbtypes), "LU", ""); 99 SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD, 100 &nmbclusters, 0, "Maximum number of mbuf clusters available"); 101 SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0, 102 "Maximum number of mbufs available"); 103 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD, 104 &m_defragpackets, 0, ""); 105 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD, 106 &m_defragbytes, 0, ""); 107 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD, 108 &m_defraguseless, 0, ""); 109 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD, 110 &m_defragfailure, 0, ""); 111 #ifdef MBUF_STRESS_TEST 112 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW, 113 &m_defragrandomfailures, 0, ""); 114 #endif 115 116 static void m_reclaim (void); 117 118 #ifndef NMBCLUSTERS 119 #define NMBCLUSTERS (512 + maxusers * 16) 120 #endif 121 #ifndef NMBUFS 122 #define NMBUFS (nmbclusters * 4) 123 #endif 124 125 /* 126 * Perform sanity checks of tunables declared above. 127 */ 128 static void 129 tunable_mbinit(void *dummy) 130 { 131 132 /* 133 * This has to be done before VM init. 134 */ 135 nmbclusters = NMBCLUSTERS; 136 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters); 137 nmbufs = NMBUFS; 138 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs); 139 /* Sanity checks */ 140 if (nmbufs < nmbclusters * 2) 141 nmbufs = nmbclusters * 2; 142 143 return; 144 } 145 SYSINIT(tunable_mbinit, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_mbinit, NULL); 146 147 /* "number of clusters of pages" */ 148 #define NCL_INIT 1 149 150 #define NMB_INIT 16 151 152 /* ARGSUSED*/ 153 static void 154 mbinit(dummy) 155 void *dummy; 156 { 157 int s; 158 159 mmbfree = NULL; mclfree = NULL; 160 mbstat.m_msize = MSIZE; 161 mbstat.m_mclbytes = MCLBYTES; 162 mbstat.m_minclsize = MINCLSIZE; 163 mbstat.m_mlen = MLEN; 164 mbstat.m_mhlen = MHLEN; 165 166 s = splimp(); 167 if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0) 168 goto bad; 169 #if MCLBYTES <= PAGE_SIZE 170 if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0) 171 goto bad; 172 #else 173 /* It's OK to call contigmalloc in this context. */ 174 if (m_clalloc(16, M_WAIT) == 0) 175 goto bad; 176 #endif 177 splx(s); 178 return; 179 bad: 180 panic("mbinit"); 181 } 182 183 /* 184 * Allocate at least nmb mbufs and place on mbuf free list. 185 * Must be called at splimp. 186 */ 187 /* ARGSUSED */ 188 int 189 m_mballoc(nmb, how) 190 int nmb; 191 int how; 192 { 193 caddr_t p; 194 int i; 195 int nbytes; 196 197 /* 198 * If we've hit the mbuf limit, stop allocating from mb_map, 199 * (or trying to) in order to avoid dipping into the section of 200 * mb_map which we've "reserved" for clusters. 201 */ 202 if ((nmb + mbstat.m_mbufs) > nmbufs) 203 return (0); 204 205 /* 206 * Once we run out of map space, it will be impossible to get 207 * any more (nothing is ever freed back to the map) 208 * -- however you are not dead as m_reclaim might 209 * still be able to free a substantial amount of space. 210 * 211 * XXX Furthermore, we can also work with "recycled" mbufs (when 212 * we're calling with M_WAIT the sleep procedure will be woken 213 * up when an mbuf is freed. See m_mballoc_wait()). 214 */ 215 if (mb_map_full) 216 return (0); 217 218 nbytes = round_page(nmb * MSIZE); 219 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT); 220 if (p == 0 && how == M_WAIT) { 221 mbstat.m_wait++; 222 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_WAITOK); 223 } 224 225 /* 226 * Either the map is now full, or `how' is M_NOWAIT and there 227 * are no pages left. 228 */ 229 if (p == NULL) 230 return (0); 231 232 nmb = nbytes / MSIZE; 233 for (i = 0; i < nmb; i++) { 234 ((struct mbuf *)p)->m_next = mmbfree; 235 mmbfree = (struct mbuf *)p; 236 p += MSIZE; 237 } 238 mbstat.m_mbufs += nmb; 239 mbtypes[MT_FREE] += nmb; 240 return (1); 241 } 242 243 /* 244 * Once the mb_map has been exhausted and if the call to the allocation macros 245 * (or, in some cases, functions) is with M_WAIT, then it is necessary to rely 246 * solely on reclaimed mbufs. Here we wait for an mbuf to be freed for a 247 * designated (mbuf_wait) time. 248 */ 249 struct mbuf * 250 m_mballoc_wait(int caller, int type) 251 { 252 struct mbuf *p; 253 int s; 254 255 s = splimp(); 256 m_mballoc_wid++; 257 if ((tsleep(&m_mballoc_wid, 0, "mballc", mbuf_wait)) == EWOULDBLOCK) 258 m_mballoc_wid--; 259 splx(s); 260 261 /* 262 * Now that we (think) that we've got something, we will redo an 263 * MGET, but avoid getting into another instance of m_mballoc_wait() 264 * XXX: We retry to fetch _even_ if the sleep timed out. This is left 265 * this way, purposely, in the [unlikely] case that an mbuf was 266 * freed but the sleep was not awakened in time. 267 */ 268 p = NULL; 269 switch (caller) { 270 case MGET_C: 271 MGET(p, M_DONTWAIT, type); 272 break; 273 case MGETHDR_C: 274 MGETHDR(p, M_DONTWAIT, type); 275 break; 276 default: 277 panic("m_mballoc_wait: invalid caller (%d)", caller); 278 } 279 280 s = splimp(); 281 if (p != NULL) { /* We waited and got something... */ 282 mbstat.m_wait++; 283 /* Wake up another if we have more free. */ 284 if (mmbfree != NULL) 285 MMBWAKEUP(); 286 } 287 splx(s); 288 return (p); 289 } 290 291 #if MCLBYTES > PAGE_SIZE 292 static int i_want_my_mcl; 293 294 static void 295 kproc_mclalloc(void) 296 { 297 int status; 298 299 while (1) { 300 tsleep(&i_want_my_mcl, 0, "mclalloc", 0); 301 302 for (; i_want_my_mcl; i_want_my_mcl--) { 303 if (m_clalloc(1, M_WAIT) == 0) 304 printf("m_clalloc failed even in process context!\n"); 305 } 306 } 307 } 308 309 static struct thread *mclallocthread; 310 static struct kproc_desc mclalloc_kp = { 311 "mclalloc", 312 kproc_mclalloc, 313 &mclallocthread 314 }; 315 SYSINIT(mclallocthread, SI_SUB_KTHREAD_UPDATE, SI_ORDER_ANY, kproc_start, 316 &mclalloc_kp); 317 #endif 318 319 /* 320 * Allocate some number of mbuf clusters 321 * and place on cluster free list. 322 * Must be called at splimp. 323 */ 324 /* ARGSUSED */ 325 int 326 m_clalloc(ncl, how) 327 int ncl; 328 int how; 329 { 330 caddr_t p; 331 int i; 332 int npg; 333 334 /* 335 * If we've hit the mcluster number limit, stop allocating from 336 * mb_map, (or trying to) in order to avoid dipping into the section 337 * of mb_map which we've "reserved" for mbufs. 338 */ 339 if ((ncl + mbstat.m_clusters) > nmbclusters) 340 goto m_clalloc_fail; 341 342 /* 343 * Once we run out of map space, it will be impossible 344 * to get any more (nothing is ever freed back to the 345 * map). From this point on, we solely rely on freed 346 * mclusters. 347 */ 348 if (mb_map_full) 349 goto m_clalloc_fail; 350 351 #if MCLBYTES > PAGE_SIZE 352 if (how != M_WAIT) { 353 i_want_my_mcl += ncl; 354 wakeup(&i_want_my_mcl); 355 mbstat.m_wait++; 356 p = 0; 357 } else { 358 p = contigmalloc_map(MCLBYTES * ncl, M_DEVBUF, M_WAITOK, 0ul, 359 ~0ul, PAGE_SIZE, 0, mb_map); 360 } 361 #else 362 npg = ncl; 363 p = (caddr_t)kmem_malloc(mb_map, ctob(npg), 364 how != M_WAIT ? M_NOWAIT : M_WAITOK); 365 ncl = ncl * PAGE_SIZE / MCLBYTES; 366 #endif 367 /* 368 * Either the map is now full, or `how' is M_NOWAIT and there 369 * are no pages left. 370 */ 371 if (p == NULL) { 372 static int last_report ; /* when we did that (in ticks) */ 373 m_clalloc_fail: 374 mbstat.m_drops++; 375 if (ticks < last_report || (ticks - last_report) >= hz) { 376 last_report = ticks; 377 printf("All mbuf clusters exhausted, please see tuning(7).\n"); 378 } 379 return (0); 380 } 381 382 for (i = 0; i < ncl; i++) { 383 ((union mcluster *)p)->mcl_next = mclfree; 384 mclfree = (union mcluster *)p; 385 p += MCLBYTES; 386 mbstat.m_clfree++; 387 } 388 mbstat.m_clusters += ncl; 389 return (1); 390 } 391 392 /* 393 * Once the mb_map submap has been exhausted and the allocation is called with 394 * M_WAIT, we rely on the mclfree union pointers. If nothing is free, we will 395 * sleep for a designated amount of time (mbuf_wait) or until we're woken up 396 * due to sudden mcluster availability. 397 */ 398 caddr_t 399 m_clalloc_wait(void) 400 { 401 caddr_t p; 402 int s; 403 404 /* If in interrupt context, and INVARIANTS, maintain sanity and die. */ 405 KASSERT(mycpu->gd_intr_nesting_level == 0, ("CLALLOC: CANNOT WAIT IN INTERRUPT")); 406 407 /* Sleep until something's available or until we expire. */ 408 m_clalloc_wid++; 409 if ((tsleep(&m_clalloc_wid, 0, "mclalc", mbuf_wait)) == EWOULDBLOCK) 410 m_clalloc_wid--; 411 412 /* 413 * Now that we (think) that we've got something, we will redo and 414 * MGET, but avoid getting into another instance of m_clalloc_wait() 415 */ 416 p = m_mclalloc(M_DONTWAIT); 417 418 s = splimp(); 419 if (p != NULL) { /* We waited and got something... */ 420 mbstat.m_wait++; 421 /* Wake up another if we have more free. */ 422 if (mclfree != NULL) 423 MCLWAKEUP(); 424 } 425 426 splx(s); 427 return (p); 428 } 429 430 /* 431 * When MGET fails, ask protocols to free space when short of memory, 432 * then re-attempt to allocate an mbuf. 433 */ 434 struct mbuf * 435 m_retry(i, t) 436 int i, t; 437 { 438 struct mbuf *m; 439 int ms; 440 441 /* 442 * Must only do the reclaim if not in an interrupt context. 443 */ 444 if (i == M_WAIT) { 445 KASSERT(mycpu->gd_intr_nesting_level == 0, 446 ("MBALLOC: CANNOT WAIT IN INTERRUPT")); 447 m_reclaim(); 448 } 449 450 ms = splimp(); 451 if (mmbfree == NULL) 452 (void)m_mballoc(1, i); 453 m = mmbfree; 454 if (m != NULL) { 455 mmbfree = m->m_next; 456 mbtypes[MT_FREE]--; 457 m->m_type = t; 458 mbtypes[t]++; 459 m->m_next = NULL; 460 m->m_nextpkt = NULL; 461 m->m_data = m->m_dat; 462 m->m_flags = 0; 463 splx(ms); 464 mbstat.m_wait++; 465 } else { 466 static int last_report ; /* when we did that (in ticks) */ 467 468 splx(ms); 469 mbstat.m_drops++; 470 if (ticks < last_report || (ticks - last_report) >= hz) { 471 last_report = ticks; 472 printf("All mbufs exhausted, please see tuning(7).\n"); 473 } 474 } 475 476 return (m); 477 } 478 479 /* 480 * As above; retry an MGETHDR. 481 */ 482 struct mbuf * 483 m_retryhdr(i, t) 484 int i, t; 485 { 486 struct mbuf *m; 487 int ms; 488 489 /* 490 * Must only do the reclaim if not in an interrupt context. 491 */ 492 if (i == M_WAIT) { 493 KASSERT(mycpu->gd_intr_nesting_level == 0, 494 ("MBALLOC: CANNOT WAIT IN INTERRUPT")); 495 m_reclaim(); 496 } 497 498 ms = splimp(); 499 if (mmbfree == NULL) 500 (void)m_mballoc(1, i); 501 m = mmbfree; 502 if (m != NULL) { 503 mmbfree = m->m_next; 504 mbtypes[MT_FREE]--; 505 m->m_type = t; 506 mbtypes[t]++; 507 m->m_next = NULL; 508 m->m_nextpkt = NULL; 509 m->m_data = m->m_pktdat; 510 m->m_flags = M_PKTHDR; 511 m->m_pkthdr.rcvif = NULL; 512 SLIST_INIT(&m->m_pkthdr.tags); 513 m->m_pkthdr.csum_flags = 0; 514 splx(ms); 515 mbstat.m_wait++; 516 } else { 517 static int last_report ; /* when we did that (in ticks) */ 518 519 splx(ms); 520 mbstat.m_drops++; 521 if (ticks < last_report || (ticks - last_report) >= hz) { 522 last_report = ticks; 523 printf("All mbufs exhausted, please see tuning(7).\n"); 524 } 525 } 526 527 return (m); 528 } 529 530 static void 531 m_reclaim() 532 { 533 struct domain *dp; 534 struct protosw *pr; 535 int s = splimp(); 536 537 for (dp = domains; dp; dp = dp->dom_next) 538 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 539 if (pr->pr_drain) 540 (*pr->pr_drain)(); 541 splx(s); 542 mbstat.m_drain++; 543 } 544 545 /* 546 * Space allocation routines. 547 * These are also available as macros 548 * for critical paths. 549 */ 550 struct mbuf * 551 m_get(how, type) 552 int how, type; 553 { 554 struct mbuf *m; 555 int ms; 556 557 ms = splimp(); 558 if (mmbfree == NULL) 559 (void)m_mballoc(1, how); 560 m = mmbfree; 561 if (m != NULL) { 562 mmbfree = m->m_next; 563 mbtypes[MT_FREE]--; 564 m->m_type = type; 565 mbtypes[type]++; 566 m->m_next = NULL; 567 m->m_nextpkt = NULL; 568 m->m_data = m->m_dat; 569 m->m_flags = 0; 570 splx(ms); 571 } else { 572 splx(ms); 573 m = m_retry(how, type); 574 if (m == NULL && how == M_WAIT) 575 m = m_mballoc_wait(MGET_C, type); 576 } 577 return (m); 578 } 579 580 struct mbuf * 581 m_gethdr(how, type) 582 int how, type; 583 { 584 struct mbuf *m; 585 int ms; 586 587 ms = splimp(); 588 if (mmbfree == NULL) 589 (void)m_mballoc(1, how); 590 m = mmbfree; 591 if (m != NULL) { 592 mmbfree = m->m_next; 593 mbtypes[MT_FREE]--; 594 m->m_type = type; 595 mbtypes[type]++; 596 m->m_next = NULL; 597 m->m_nextpkt = NULL; 598 m->m_data = m->m_pktdat; 599 m->m_flags = M_PKTHDR; 600 m->m_pkthdr.rcvif = NULL; 601 SLIST_INIT(&m->m_pkthdr.tags); 602 m->m_pkthdr.csum_flags = 0; 603 splx(ms); 604 } else { 605 splx(ms); 606 m = m_retryhdr(how, type); 607 if (m == NULL && how == M_WAIT) 608 m = m_mballoc_wait(MGETHDR_C, type); 609 } 610 return (m); 611 } 612 613 struct mbuf * 614 m_getclr(how, type) 615 int how, type; 616 { 617 struct mbuf *m; 618 619 MGET(m, how, type); 620 if (m == 0) 621 return (0); 622 bzero(mtod(m, caddr_t), MLEN); 623 return (m); 624 } 625 626 /* 627 * m_getcl() returns an mbuf with an attached cluster. 628 * Because many network drivers use this kind of buffers a lot, it is 629 * convenient to keep a small pool of free buffers of this kind. 630 * Even a small size such as 10 gives about 10% improvement in the 631 * forwarding rate in a bridge or router. 632 * The size of this free list is controlled by the sysctl variable 633 * mcl_pool_max. The list is populated on m_freem(), and used in 634 * m_getcl() if elements are available. 635 */ 636 static struct mbuf *mcl_pool; 637 static int mcl_pool_now; 638 static int mcl_pool_max = 0; 639 640 SYSCTL_INT(_kern_ipc, OID_AUTO, mcl_pool_max, CTLFLAG_RW, &mcl_pool_max, 0, 641 "Maximum number of mbufs+cluster in free list"); 642 SYSCTL_INT(_kern_ipc, OID_AUTO, mcl_pool_now, CTLFLAG_RD, &mcl_pool_now, 0, 643 "Current number of mbufs+cluster in free list"); 644 645 struct mbuf * 646 m_getcl(int how, short type, int flags) 647 { 648 int s = splimp(); 649 struct mbuf *mp; 650 651 if (flags & M_PKTHDR) { 652 if (type == MT_DATA && mcl_pool) { 653 mp = mcl_pool; 654 mcl_pool = mp->m_nextpkt; 655 mcl_pool_now--; 656 splx(s); 657 mp->m_nextpkt = NULL; 658 mp->m_data = mp->m_ext.ext_buf; 659 mp->m_flags = M_PKTHDR|M_EXT; 660 mp->m_pkthdr.rcvif = NULL; 661 mp->m_pkthdr.csum_flags = 0; 662 return mp; 663 } else 664 MGETHDR(mp, how, type); 665 } else 666 MGET(mp, how, type); 667 if (mp) { 668 MCLGET(mp, how); 669 if ( (mp->m_flags & M_EXT) == 0) { 670 m_free(mp); 671 mp = NULL; 672 } 673 } 674 splx(s); 675 return mp; 676 } 677 678 /* 679 * struct mbuf * 680 * m_getm(m, len, how, type) 681 * 682 * This will allocate len-worth of mbufs and/or mbuf clusters (whatever fits 683 * best) and return a pointer to the top of the allocated chain. If m is 684 * non-null, then we assume that it is a single mbuf or an mbuf chain to 685 * which we want len bytes worth of mbufs and/or clusters attached, and so 686 * if we succeed in allocating it, we will just return a pointer to m. 687 * 688 * If we happen to fail at any point during the allocation, we will free 689 * up everything we have already allocated and return NULL. 690 * 691 */ 692 struct mbuf * 693 m_getm(struct mbuf *m, int len, int how, int type) 694 { 695 struct mbuf *top, *tail, *mp, *mtail = NULL; 696 697 KASSERT(len >= 0, ("len is < 0 in m_getm")); 698 699 MGET(mp, how, type); 700 if (mp == NULL) 701 return (NULL); 702 else if (len > MINCLSIZE) { 703 MCLGET(mp, how); 704 if ((mp->m_flags & M_EXT) == 0) { 705 m_free(mp); 706 return (NULL); 707 } 708 } 709 mp->m_len = 0; 710 len -= M_TRAILINGSPACE(mp); 711 712 if (m != NULL) 713 for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next); 714 else 715 m = mp; 716 717 top = tail = mp; 718 while (len > 0) { 719 MGET(mp, how, type); 720 if (mp == NULL) 721 goto failed; 722 723 tail->m_next = mp; 724 tail = mp; 725 if (len > MINCLSIZE) { 726 MCLGET(mp, how); 727 if ((mp->m_flags & M_EXT) == 0) 728 goto failed; 729 } 730 731 mp->m_len = 0; 732 len -= M_TRAILINGSPACE(mp); 733 } 734 735 if (mtail != NULL) 736 mtail->m_next = top; 737 return (m); 738 739 failed: 740 m_freem(top); 741 return (NULL); 742 } 743 744 /* 745 * m_mclalloc() - Allocates an mbuf cluster. 746 */ 747 caddr_t 748 m_mclalloc(int how) 749 { 750 caddr_t mp; 751 int s; 752 753 s = splimp(); 754 755 if (mclfree == NULL) 756 m_clalloc(1, how); 757 mp = (caddr_t)mclfree; 758 if (mp != NULL) { 759 KKASSERT((struct mbuf *)mp >= mbutl && 760 (struct mbuf *)mp < mbute); 761 mclrefcnt[mtocl(mp)]++; 762 mbstat.m_clfree--; 763 mclfree = ((union mcluster *)mp)->mcl_next; 764 splx(s); 765 return(mp); 766 } 767 splx(s); 768 if (how == M_WAIT) 769 return(m_clalloc_wait()); 770 return(NULL); 771 } 772 773 /* 774 * m_mclget() - Adds a cluster to a normal mbuf, M_EXT is set on success. 775 */ 776 void 777 m_mclget(struct mbuf *m, int how) 778 { 779 m->m_ext.ext_buf = m_mclalloc(how); 780 if (m->m_ext.ext_buf != NULL) { 781 m->m_data = m->m_ext.ext_buf; 782 m->m_flags |= M_EXT; 783 m->m_ext.ext_free = NULL; 784 m->m_ext.ext_ref = NULL; 785 m->m_ext.ext_size = MCLBYTES; 786 } 787 } 788 789 static __inline void 790 _m_mclfree(caddr_t data) 791 { 792 union mcluster *mp = (union mcluster *)data; 793 794 KASSERT(mclrefcnt[mtocl(mp)] > 0, ("freeing free cluster")); 795 KKASSERT((struct mbuf *)mp >= mbutl && 796 (struct mbuf *)mp < mbute); 797 if (--mclrefcnt[mtocl(mp)] == 0) { 798 mp->mcl_next = mclfree; 799 mclfree = mp; 800 mbstat.m_clfree++; 801 MCLWAKEUP(); 802 } 803 } 804 805 void 806 m_mclfree(caddr_t mp) 807 { 808 int s = splimp(); 809 _m_mclfree(mp); 810 splx(s); 811 } 812 813 /* 814 * m_free() 815 * 816 * Free a single mbuf and any associated external storage. The successor, 817 * if any, is returned. 818 * 819 * We do need to check non-first mbuf for m_aux, since some of existing 820 * code does not call M_PREPEND properly. 821 * (example: call to bpf_mtap from drivers) 822 */ 823 struct mbuf * 824 m_free(struct mbuf *m) 825 { 826 int s; 827 struct mbuf *n; 828 829 s = splimp(); 830 KASSERT(m->m_type != MT_FREE, ("freeing free mbuf")); 831 mbtypes[m->m_type]--; 832 if ((m->m_flags & M_PKTHDR) != 0) 833 m_tag_delete_chain(m, NULL); 834 if (m->m_flags & M_EXT) { 835 if (m->m_ext.ext_free != NULL) { 836 m->m_ext.ext_free(m->m_ext.ext_buf, m->m_ext.ext_size); 837 } else { 838 _m_mclfree(m->m_ext.ext_buf); /* inlined */ 839 } 840 } 841 n = m->m_next; 842 m->m_type = MT_FREE; 843 mbtypes[MT_FREE]++; 844 m->m_next = mmbfree; 845 mmbfree = m; 846 MMBWAKEUP(); 847 splx(s); 848 849 return (n); 850 } 851 852 void 853 m_freem(struct mbuf *m) 854 { 855 int s = splimp(); 856 857 /* 858 * Try to keep a small pool of mbuf+cluster for quick use in 859 * device drivers. A good candidate is a M_PKTHDR buffer with 860 * only one cluster attached. Other mbufs, or those exceeding 861 * the pool size, are just m_free'd in the usual way. 862 * The following code makes sure that m_next, m_type, 863 * m_pkthdr.aux and m_ext.* are properly initialized. 864 * Other fields in the mbuf are initialized in m_getcl() 865 * upon allocation. 866 */ 867 if (mcl_pool_now < mcl_pool_max && m && m->m_next == NULL && 868 (m->m_flags & (M_PKTHDR|M_EXT)) == (M_PKTHDR|M_EXT) && 869 m->m_type == MT_DATA && M_EXT_WRITABLE(m) ) { 870 m_tag_delete_chain(m, NULL); 871 m->m_nextpkt = mcl_pool; 872 mcl_pool = m; 873 mcl_pool_now++; 874 } else { 875 while (m) 876 m = m_free(m); 877 } 878 splx(s); 879 } 880 881 /* 882 * Mbuffer utility routines. 883 */ 884 885 /* 886 * Lesser-used path for M_PREPEND: 887 * allocate new mbuf to prepend to chain, 888 * copy junk along. 889 */ 890 struct mbuf * 891 m_prepend(m, len, how) 892 struct mbuf *m; 893 int len, how; 894 { 895 struct mbuf *mn; 896 897 MGET(mn, how, m->m_type); 898 if (mn == (struct mbuf *)NULL) { 899 m_freem(m); 900 return ((struct mbuf *)NULL); 901 } 902 if (m->m_flags & M_PKTHDR) 903 M_MOVE_PKTHDR(mn, m); 904 mn->m_next = m; 905 m = mn; 906 if (len < MHLEN) 907 MH_ALIGN(m, len); 908 m->m_len = len; 909 return (m); 910 } 911 912 /* 913 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 914 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 915 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller. 916 * Note that the copy is read-only, because clusters are not copied, 917 * only their reference counts are incremented. 918 */ 919 #define MCFail (mbstat.m_mcfail) 920 921 struct mbuf * 922 m_copym(m, off0, len, wait) 923 const struct mbuf *m; 924 int off0, wait; 925 int len; 926 { 927 struct mbuf *n, **np; 928 int off = off0; 929 struct mbuf *top; 930 int copyhdr = 0; 931 932 KASSERT(off >= 0, ("m_copym, negative off %d", off)); 933 KASSERT(len >= 0, ("m_copym, negative len %d", len)); 934 if (off == 0 && m->m_flags & M_PKTHDR) 935 copyhdr = 1; 936 while (off > 0) { 937 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain")); 938 if (off < m->m_len) 939 break; 940 off -= m->m_len; 941 m = m->m_next; 942 } 943 np = ⊤ 944 top = 0; 945 while (len > 0) { 946 if (m == 0) { 947 KASSERT(len == M_COPYALL, 948 ("m_copym, length > size of mbuf chain")); 949 break; 950 } 951 MGET(n, wait, m->m_type); 952 *np = n; 953 if (n == 0) 954 goto nospace; 955 if (copyhdr) { 956 if (!m_dup_pkthdr(n, m, wait)) 957 goto nospace; 958 if (len == M_COPYALL) 959 n->m_pkthdr.len -= off0; 960 else 961 n->m_pkthdr.len = len; 962 copyhdr = 0; 963 } 964 n->m_len = min(len, m->m_len - off); 965 if (m->m_flags & M_EXT) { 966 n->m_data = m->m_data + off; 967 if (m->m_ext.ext_ref == NULL) { 968 atomic_add_char( 969 &mclrefcnt[mtocl(m->m_ext.ext_buf)], 1); 970 } else { 971 int s = splimp(); 972 973 (*m->m_ext.ext_ref)(m->m_ext.ext_buf, 974 m->m_ext.ext_size); 975 splx(s); 976 } 977 n->m_ext = m->m_ext; 978 n->m_flags |= M_EXT; 979 } else 980 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 981 (unsigned)n->m_len); 982 if (len != M_COPYALL) 983 len -= n->m_len; 984 off = 0; 985 m = m->m_next; 986 np = &n->m_next; 987 } 988 if (top == 0) 989 MCFail++; 990 return (top); 991 nospace: 992 m_freem(top); 993 MCFail++; 994 return (0); 995 } 996 997 /* 998 * Copy an entire packet, including header (which must be present). 999 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'. 1000 * Note that the copy is read-only, because clusters are not copied, 1001 * only their reference counts are incremented. 1002 * Preserve alignment of the first mbuf so if the creator has left 1003 * some room at the beginning (e.g. for inserting protocol headers) 1004 * the copies also have the room available. 1005 */ 1006 struct mbuf * 1007 m_copypacket(m, how) 1008 struct mbuf *m; 1009 int how; 1010 { 1011 struct mbuf *top, *n, *o; 1012 1013 MGET(n, how, m->m_type); 1014 top = n; 1015 if (!n) 1016 goto nospace; 1017 1018 if (!m_dup_pkthdr(n, m, how)) 1019 goto nospace; 1020 n->m_len = m->m_len; 1021 if (m->m_flags & M_EXT) { 1022 n->m_data = m->m_data; 1023 if (m->m_ext.ext_ref == NULL) 1024 atomic_add_char(&mclrefcnt[mtocl(m->m_ext.ext_buf)], 1); 1025 else { 1026 int s = splimp(); 1027 1028 (*m->m_ext.ext_ref)(m->m_ext.ext_buf, 1029 m->m_ext.ext_size); 1030 splx(s); 1031 } 1032 n->m_ext = m->m_ext; 1033 n->m_flags |= M_EXT; 1034 } else { 1035 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat ); 1036 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 1037 } 1038 1039 m = m->m_next; 1040 while (m) { 1041 MGET(o, how, m->m_type); 1042 if (!o) 1043 goto nospace; 1044 1045 n->m_next = o; 1046 n = n->m_next; 1047 1048 n->m_len = m->m_len; 1049 if (m->m_flags & M_EXT) { 1050 n->m_data = m->m_data; 1051 if (m->m_ext.ext_ref == NULL) { 1052 atomic_add_char( 1053 &mclrefcnt[mtocl(m->m_ext.ext_buf)], 1); 1054 } else { 1055 int s = splimp(); 1056 1057 (*m->m_ext.ext_ref)(m->m_ext.ext_buf, 1058 m->m_ext.ext_size); 1059 splx(s); 1060 } 1061 n->m_ext = m->m_ext; 1062 n->m_flags |= M_EXT; 1063 } else { 1064 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 1065 } 1066 1067 m = m->m_next; 1068 } 1069 return top; 1070 nospace: 1071 m_freem(top); 1072 MCFail++; 1073 return 0; 1074 } 1075 1076 /* 1077 * Copy data from an mbuf chain starting "off" bytes from the beginning, 1078 * continuing for "len" bytes, into the indicated buffer. 1079 */ 1080 void 1081 m_copydata(m, off, len, cp) 1082 const struct mbuf *m; 1083 int off; 1084 int len; 1085 caddr_t cp; 1086 { 1087 unsigned count; 1088 1089 KASSERT(off >= 0, ("m_copydata, negative off %d", off)); 1090 KASSERT(len >= 0, ("m_copydata, negative len %d", len)); 1091 while (off > 0) { 1092 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain")); 1093 if (off < m->m_len) 1094 break; 1095 off -= m->m_len; 1096 m = m->m_next; 1097 } 1098 while (len > 0) { 1099 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain")); 1100 count = min(m->m_len - off, len); 1101 bcopy(mtod(m, caddr_t) + off, cp, count); 1102 len -= count; 1103 cp += count; 1104 off = 0; 1105 m = m->m_next; 1106 } 1107 } 1108 1109 /* 1110 * Copy a packet header mbuf chain into a completely new chain, including 1111 * copying any mbuf clusters. Use this instead of m_copypacket() when 1112 * you need a writable copy of an mbuf chain. 1113 */ 1114 struct mbuf * 1115 m_dup(m, how) 1116 struct mbuf *m; 1117 int how; 1118 { 1119 struct mbuf **p, *top = NULL; 1120 int remain, moff, nsize; 1121 1122 /* Sanity check */ 1123 if (m == NULL) 1124 return (0); 1125 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __FUNCTION__)); 1126 1127 /* While there's more data, get a new mbuf, tack it on, and fill it */ 1128 remain = m->m_pkthdr.len; 1129 moff = 0; 1130 p = ⊤ 1131 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */ 1132 struct mbuf *n; 1133 1134 /* Get the next new mbuf */ 1135 MGET(n, how, m->m_type); 1136 if (n == NULL) 1137 goto nospace; 1138 if (top == NULL) { /* first one, must be PKTHDR */ 1139 if (!m_dup_pkthdr(n, m, how)) 1140 goto nospace; 1141 nsize = MHLEN; 1142 } else /* not the first one */ 1143 nsize = MLEN; 1144 if (remain >= MINCLSIZE) { 1145 MCLGET(n, how); 1146 if ((n->m_flags & M_EXT) == 0) { 1147 (void)m_free(n); 1148 goto nospace; 1149 } 1150 nsize = MCLBYTES; 1151 } 1152 n->m_len = 0; 1153 1154 /* Link it into the new chain */ 1155 *p = n; 1156 p = &n->m_next; 1157 1158 /* Copy data from original mbuf(s) into new mbuf */ 1159 while (n->m_len < nsize && m != NULL) { 1160 int chunk = min(nsize - n->m_len, m->m_len - moff); 1161 1162 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk); 1163 moff += chunk; 1164 n->m_len += chunk; 1165 remain -= chunk; 1166 if (moff == m->m_len) { 1167 m = m->m_next; 1168 moff = 0; 1169 } 1170 } 1171 1172 /* Check correct total mbuf length */ 1173 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL), 1174 ("%s: bogus m_pkthdr.len", __FUNCTION__)); 1175 } 1176 return (top); 1177 1178 nospace: 1179 m_freem(top); 1180 MCFail++; 1181 return (0); 1182 } 1183 1184 /* 1185 * Concatenate mbuf chain n to m. 1186 * Both chains must be of the same type (e.g. MT_DATA). 1187 * Any m_pkthdr is not updated. 1188 */ 1189 void 1190 m_cat(m, n) 1191 struct mbuf *m, *n; 1192 { 1193 while (m->m_next) 1194 m = m->m_next; 1195 while (n) { 1196 if (m->m_flags & M_EXT || 1197 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 1198 /* just join the two chains */ 1199 m->m_next = n; 1200 return; 1201 } 1202 /* splat the data from one into the other */ 1203 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 1204 (u_int)n->m_len); 1205 m->m_len += n->m_len; 1206 n = m_free(n); 1207 } 1208 } 1209 1210 void 1211 m_adj(mp, req_len) 1212 struct mbuf *mp; 1213 int req_len; 1214 { 1215 int len = req_len; 1216 struct mbuf *m; 1217 int count; 1218 1219 if ((m = mp) == NULL) 1220 return; 1221 if (len >= 0) { 1222 /* 1223 * Trim from head. 1224 */ 1225 while (m != NULL && len > 0) { 1226 if (m->m_len <= len) { 1227 len -= m->m_len; 1228 m->m_len = 0; 1229 m = m->m_next; 1230 } else { 1231 m->m_len -= len; 1232 m->m_data += len; 1233 len = 0; 1234 } 1235 } 1236 m = mp; 1237 if (mp->m_flags & M_PKTHDR) 1238 m->m_pkthdr.len -= (req_len - len); 1239 } else { 1240 /* 1241 * Trim from tail. Scan the mbuf chain, 1242 * calculating its length and finding the last mbuf. 1243 * If the adjustment only affects this mbuf, then just 1244 * adjust and return. Otherwise, rescan and truncate 1245 * after the remaining size. 1246 */ 1247 len = -len; 1248 count = 0; 1249 for (;;) { 1250 count += m->m_len; 1251 if (m->m_next == (struct mbuf *)0) 1252 break; 1253 m = m->m_next; 1254 } 1255 if (m->m_len >= len) { 1256 m->m_len -= len; 1257 if (mp->m_flags & M_PKTHDR) 1258 mp->m_pkthdr.len -= len; 1259 return; 1260 } 1261 count -= len; 1262 if (count < 0) 1263 count = 0; 1264 /* 1265 * Correct length for chain is "count". 1266 * Find the mbuf with last data, adjust its length, 1267 * and toss data from remaining mbufs on chain. 1268 */ 1269 m = mp; 1270 if (m->m_flags & M_PKTHDR) 1271 m->m_pkthdr.len = count; 1272 for (; m; m = m->m_next) { 1273 if (m->m_len >= count) { 1274 m->m_len = count; 1275 break; 1276 } 1277 count -= m->m_len; 1278 } 1279 while (m->m_next) 1280 (m = m->m_next) ->m_len = 0; 1281 } 1282 } 1283 1284 /* 1285 * Rearange an mbuf chain so that len bytes are contiguous 1286 * and in the data area of an mbuf (so that mtod and dtom 1287 * will work for a structure of size len). Returns the resulting 1288 * mbuf chain on success, frees it and returns null on failure. 1289 * If there is room, it will add up to max_protohdr-len extra bytes to the 1290 * contiguous region in an attempt to avoid being called next time. 1291 */ 1292 #define MPFail (mbstat.m_mpfail) 1293 1294 struct mbuf * 1295 m_pullup(n, len) 1296 struct mbuf *n; 1297 int len; 1298 { 1299 struct mbuf *m; 1300 int count; 1301 int space; 1302 1303 /* 1304 * If first mbuf has no cluster, and has room for len bytes 1305 * without shifting current data, pullup into it, 1306 * otherwise allocate a new mbuf to prepend to the chain. 1307 */ 1308 if ((n->m_flags & M_EXT) == 0 && 1309 n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 1310 if (n->m_len >= len) 1311 return (n); 1312 m = n; 1313 n = n->m_next; 1314 len -= m->m_len; 1315 } else { 1316 if (len > MHLEN) 1317 goto bad; 1318 MGET(m, M_DONTWAIT, n->m_type); 1319 if (m == 0) 1320 goto bad; 1321 m->m_len = 0; 1322 if (n->m_flags & M_PKTHDR) 1323 M_MOVE_PKTHDR(m, n); 1324 } 1325 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 1326 do { 1327 count = min(min(max(len, max_protohdr), space), n->m_len); 1328 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 1329 (unsigned)count); 1330 len -= count; 1331 m->m_len += count; 1332 n->m_len -= count; 1333 space -= count; 1334 if (n->m_len) 1335 n->m_data += count; 1336 else 1337 n = m_free(n); 1338 } while (len > 0 && n); 1339 if (len > 0) { 1340 (void) m_free(m); 1341 goto bad; 1342 } 1343 m->m_next = n; 1344 return (m); 1345 bad: 1346 m_freem(n); 1347 MPFail++; 1348 return (0); 1349 } 1350 1351 /* 1352 * Partition an mbuf chain in two pieces, returning the tail -- 1353 * all but the first len0 bytes. In case of failure, it returns NULL and 1354 * attempts to restore the chain to its original state. 1355 * 1356 * Note that the resulting mbufs might be read-only, because the new 1357 * mbuf can end up sharing an mbuf cluster with the original mbuf if 1358 * the "breaking point" happens to lie within a cluster mbuf. Use the 1359 * M_WRITABLE() macro to check for this case. 1360 */ 1361 struct mbuf * 1362 m_split(m0, len0, wait) 1363 struct mbuf *m0; 1364 int len0, wait; 1365 { 1366 struct mbuf *m, *n; 1367 unsigned len = len0, remain; 1368 1369 for (m = m0; m && len > m->m_len; m = m->m_next) 1370 len -= m->m_len; 1371 if (m == 0) 1372 return (0); 1373 remain = m->m_len - len; 1374 if (m0->m_flags & M_PKTHDR) { 1375 MGETHDR(n, wait, m0->m_type); 1376 if (n == 0) 1377 return (0); 1378 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 1379 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 1380 m0->m_pkthdr.len = len0; 1381 if (m->m_flags & M_EXT) 1382 goto extpacket; 1383 if (remain > MHLEN) { 1384 /* m can't be the lead packet */ 1385 MH_ALIGN(n, 0); 1386 n->m_next = m_split(m, len, wait); 1387 if (n->m_next == 0) { 1388 (void) m_free(n); 1389 return (0); 1390 } else { 1391 n->m_len = 0; 1392 return (n); 1393 } 1394 } else 1395 MH_ALIGN(n, remain); 1396 } else if (remain == 0) { 1397 n = m->m_next; 1398 m->m_next = 0; 1399 return (n); 1400 } else { 1401 MGET(n, wait, m->m_type); 1402 if (n == 0) 1403 return (0); 1404 M_ALIGN(n, remain); 1405 } 1406 extpacket: 1407 if (m->m_flags & M_EXT) { 1408 n->m_flags |= M_EXT; 1409 n->m_ext = m->m_ext; 1410 if (m->m_ext.ext_ref == NULL) 1411 atomic_add_char(&mclrefcnt[mtocl(m->m_ext.ext_buf)], 1); 1412 else { 1413 int s = splimp(); 1414 1415 (*m->m_ext.ext_ref)(m->m_ext.ext_buf, 1416 m->m_ext.ext_size); 1417 splx(s); 1418 } 1419 n->m_data = m->m_data + len; 1420 } else { 1421 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 1422 } 1423 n->m_len = remain; 1424 m->m_len = len; 1425 n->m_next = m->m_next; 1426 m->m_next = 0; 1427 return (n); 1428 } 1429 /* 1430 * Routine to copy from device local memory into mbufs. 1431 */ 1432 struct mbuf * 1433 m_devget(buf, totlen, off0, ifp, copy) 1434 char *buf; 1435 int totlen, off0; 1436 struct ifnet *ifp; 1437 void (*copy) (char *from, caddr_t to, u_int len); 1438 { 1439 struct mbuf *m; 1440 struct mbuf *top = 0, **mp = ⊤ 1441 int off = off0, len; 1442 char *cp; 1443 char *epkt; 1444 1445 cp = buf; 1446 epkt = cp + totlen; 1447 if (off) { 1448 cp += off + 2 * sizeof(u_short); 1449 totlen -= 2 * sizeof(u_short); 1450 } 1451 MGETHDR(m, M_DONTWAIT, MT_DATA); 1452 if (m == 0) 1453 return (0); 1454 m->m_pkthdr.rcvif = ifp; 1455 m->m_pkthdr.len = totlen; 1456 m->m_len = MHLEN; 1457 1458 while (totlen > 0) { 1459 if (top) { 1460 MGET(m, M_DONTWAIT, MT_DATA); 1461 if (m == 0) { 1462 m_freem(top); 1463 return (0); 1464 } 1465 m->m_len = MLEN; 1466 } 1467 len = min(totlen, epkt - cp); 1468 if (len >= MINCLSIZE) { 1469 MCLGET(m, M_DONTWAIT); 1470 if (m->m_flags & M_EXT) 1471 m->m_len = len = min(len, MCLBYTES); 1472 else 1473 len = m->m_len; 1474 } else { 1475 /* 1476 * Place initial small packet/header at end of mbuf. 1477 */ 1478 if (len < m->m_len) { 1479 if (top == 0 && len + max_linkhdr <= m->m_len) 1480 m->m_data += max_linkhdr; 1481 m->m_len = len; 1482 } else 1483 len = m->m_len; 1484 } 1485 if (copy) 1486 copy(cp, mtod(m, caddr_t), (unsigned)len); 1487 else 1488 bcopy(cp, mtod(m, caddr_t), (unsigned)len); 1489 cp += len; 1490 *mp = m; 1491 mp = &m->m_next; 1492 totlen -= len; 1493 if (cp == epkt) 1494 cp = buf; 1495 } 1496 return (top); 1497 } 1498 1499 /* 1500 * Copy data from a buffer back into the indicated mbuf chain, 1501 * starting "off" bytes from the beginning, extending the mbuf 1502 * chain if necessary. 1503 */ 1504 void 1505 m_copyback(m0, off, len, cp) 1506 struct mbuf *m0; 1507 int off; 1508 int len; 1509 caddr_t cp; 1510 { 1511 int mlen; 1512 struct mbuf *m = m0, *n; 1513 int totlen = 0; 1514 1515 if (m0 == 0) 1516 return; 1517 while (off > (mlen = m->m_len)) { 1518 off -= mlen; 1519 totlen += mlen; 1520 if (m->m_next == 0) { 1521 n = m_getclr(M_DONTWAIT, m->m_type); 1522 if (n == 0) 1523 goto out; 1524 n->m_len = min(MLEN, len + off); 1525 m->m_next = n; 1526 } 1527 m = m->m_next; 1528 } 1529 while (len > 0) { 1530 mlen = min (m->m_len - off, len); 1531 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen); 1532 cp += mlen; 1533 len -= mlen; 1534 mlen += off; 1535 off = 0; 1536 totlen += mlen; 1537 if (len == 0) 1538 break; 1539 if (m->m_next == 0) { 1540 n = m_get(M_DONTWAIT, m->m_type); 1541 if (n == 0) 1542 break; 1543 n->m_len = min(MLEN, len); 1544 m->m_next = n; 1545 } 1546 m = m->m_next; 1547 } 1548 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 1549 m->m_pkthdr.len = totlen; 1550 } 1551 1552 void 1553 m_print(const struct mbuf *m) 1554 { 1555 int len; 1556 const struct mbuf *m2; 1557 1558 len = m->m_pkthdr.len; 1559 m2 = m; 1560 while (len) { 1561 printf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-"); 1562 len -= m2->m_len; 1563 m2 = m2->m_next; 1564 } 1565 return; 1566 } 1567 1568 /* 1569 * "Move" mbuf pkthdr from "from" to "to". 1570 * "from" must have M_PKTHDR set, and "to" must be empty. 1571 */ 1572 void 1573 m_move_pkthdr(struct mbuf *to, struct mbuf *from) 1574 { 1575 KASSERT((to->m_flags & M_EXT) == 0, ("m_move_pkthdr: to has cluster")); 1576 1577 to->m_flags = from->m_flags & M_COPYFLAGS; 1578 to->m_data = to->m_pktdat; 1579 to->m_pkthdr = from->m_pkthdr; /* especially tags */ 1580 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */ 1581 from->m_flags &= ~M_PKTHDR; 1582 } 1583 1584 /* 1585 * Duplicate "from"'s mbuf pkthdr in "to". 1586 * "from" must have M_PKTHDR set, and "to" must be empty. 1587 * In particular, this does a deep copy of the packet tags. 1588 */ 1589 int 1590 m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how) 1591 { 1592 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT); 1593 if ((to->m_flags & M_EXT) == 0) 1594 to->m_data = to->m_pktdat; 1595 to->m_pkthdr = from->m_pkthdr; 1596 SLIST_INIT(&to->m_pkthdr.tags); 1597 return (m_tag_copy_chain(to, from, how)); 1598 } 1599 1600 /* 1601 * Defragment a mbuf chain, returning the shortest possible 1602 * chain of mbufs and clusters. If allocation fails and 1603 * this cannot be completed, NULL will be returned, but 1604 * the passed in chain will be unchanged. Upon success, 1605 * the original chain will be freed, and the new chain 1606 * will be returned. 1607 * 1608 * If a non-packet header is passed in, the original 1609 * mbuf (chain?) will be returned unharmed. 1610 */ 1611 struct mbuf * 1612 m_defrag(struct mbuf *m0, int how) 1613 { 1614 struct mbuf *m_new = NULL, *m_final = NULL; 1615 int progress = 0, length; 1616 1617 if (!(m0->m_flags & M_PKTHDR)) 1618 return (m0); 1619 1620 #ifdef MBUF_STRESS_TEST 1621 if (m_defragrandomfailures) { 1622 int temp = arc4random() & 0xff; 1623 if (temp == 0xba) 1624 goto nospace; 1625 } 1626 #endif 1627 1628 if (m0->m_pkthdr.len > MHLEN) 1629 m_final = m_getcl(how, MT_DATA, M_PKTHDR); 1630 else 1631 m_final = m_gethdr(how, MT_DATA); 1632 1633 if (m_final == NULL) 1634 goto nospace; 1635 1636 if (m_dup_pkthdr(m_final, m0, how) == NULL) 1637 goto nospace; 1638 1639 m_new = m_final; 1640 1641 while (progress < m0->m_pkthdr.len) { 1642 length = m0->m_pkthdr.len - progress; 1643 if (length > MCLBYTES) 1644 length = MCLBYTES; 1645 1646 if (m_new == NULL) { 1647 if (length > MLEN) 1648 m_new = m_getcl(how, MT_DATA, 0); 1649 else 1650 m_new = m_get(how, MT_DATA); 1651 if (m_new == NULL) 1652 goto nospace; 1653 } 1654 1655 m_copydata(m0, progress, length, mtod(m_new, caddr_t)); 1656 progress += length; 1657 m_new->m_len = length; 1658 if (m_new != m_final) 1659 m_cat(m_final, m_new); 1660 m_new = NULL; 1661 } 1662 if (m0->m_next == NULL) 1663 m_defraguseless++; 1664 m_freem(m0); 1665 m0 = m_final; 1666 m_defragpackets++; 1667 m_defragbytes += m0->m_pkthdr.len; 1668 return (m0); 1669 nospace: 1670 m_defragfailure++; 1671 if (m_new) 1672 m_free(m_new); 1673 if (m_final) 1674 m_freem(m_final); 1675 return (NULL); 1676 } 1677