1 /* 2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved. 36 * 37 * License terms: all terms for the DragonFly license above plus the following: 38 * 39 * 4. All advertising materials mentioning features or use of this software 40 * must display the following acknowledgement: 41 * 42 * This product includes software developed by Jeffrey M. Hsu 43 * for the DragonFly Project. 44 * 45 * This requirement may be waived with permission from Jeffrey Hsu. 46 * This requirement will sunset and may be removed on July 8 2005, 47 * after which the standard DragonFly license (as shown above) will 48 * apply. 49 */ 50 51 /* 52 * Copyright (c) 1982, 1986, 1988, 1991, 1993 53 * The Regents of the University of California. All rights reserved. 54 * 55 * Redistribution and use in source and binary forms, with or without 56 * modification, are permitted provided that the following conditions 57 * are met: 58 * 1. Redistributions of source code must retain the above copyright 59 * notice, this list of conditions and the following disclaimer. 60 * 2. Redistributions in binary form must reproduce the above copyright 61 * notice, this list of conditions and the following disclaimer in the 62 * documentation and/or other materials provided with the distribution. 63 * 3. All advertising materials mentioning features or use of this software 64 * must display the following acknowledgement: 65 * This product includes software developed by the University of 66 * California, Berkeley and its contributors. 67 * 4. Neither the name of the University nor the names of its contributors 68 * may be used to endorse or promote products derived from this software 69 * without specific prior written permission. 70 * 71 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 72 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 73 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 74 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 75 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 76 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 77 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 78 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 79 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 80 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 81 * SUCH DAMAGE. 82 * 83 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 84 * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $ 85 * $DragonFly: src/sys/kern/uipc_mbuf.c,v 1.52 2005/06/17 18:58:02 dillon Exp $ 86 */ 87 88 #include "opt_param.h" 89 #include "opt_ddb.h" 90 #include "opt_mbuf_stress_test.h" 91 #include <sys/param.h> 92 #include <sys/systm.h> 93 #include <sys/malloc.h> 94 #include <sys/mbuf.h> 95 #include <sys/kernel.h> 96 #include <sys/sysctl.h> 97 #include <sys/domain.h> 98 #include <sys/objcache.h> 99 #include <sys/protosw.h> 100 #include <sys/uio.h> 101 #include <sys/thread.h> 102 #include <sys/globaldata.h> 103 #include <sys/thread2.h> 104 105 #include <vm/vm.h> 106 #include <vm/vm_kern.h> 107 #include <vm/vm_extern.h> 108 109 #ifdef INVARIANTS 110 #include <machine/cpu.h> 111 #endif 112 113 /* 114 * mbuf cluster meta-data 115 */ 116 struct mbcluster { 117 int32_t mcl_refs; 118 void *mcl_data; 119 }; 120 121 static void mbinit(void *); 122 SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL) 123 124 static u_long mbtypes[MT_NTYPES]; 125 126 struct mbstat mbstat; 127 int max_linkhdr; 128 int max_protohdr; 129 int max_hdr; 130 int max_datalen; 131 int m_defragpackets; 132 int m_defragbytes; 133 int m_defraguseless; 134 int m_defragfailure; 135 #ifdef MBUF_STRESS_TEST 136 int m_defragrandomfailures; 137 #endif 138 139 struct objcache *mbuf_cache, *mbufphdr_cache; 140 struct objcache *mclmeta_cache; 141 struct objcache *mbufcluster_cache, *mbufphdrcluster_cache; 142 143 int nmbclusters; 144 int nmbufs; 145 146 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW, 147 &max_linkhdr, 0, ""); 148 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW, 149 &max_protohdr, 0, ""); 150 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, ""); 151 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW, 152 &max_datalen, 0, ""); 153 SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW, 154 &mbuf_wait, 0, ""); 155 SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RW, &mbstat, mbstat, ""); 156 SYSCTL_OPAQUE(_kern_ipc, OID_AUTO, mbtypes, CTLFLAG_RD, mbtypes, 157 sizeof(mbtypes), "LU", ""); 158 SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RW, 159 &nmbclusters, 0, "Maximum number of mbuf clusters available"); 160 SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RW, &nmbufs, 0, 161 "Maximum number of mbufs available"); 162 163 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD, 164 &m_defragpackets, 0, ""); 165 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD, 166 &m_defragbytes, 0, ""); 167 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD, 168 &m_defraguseless, 0, ""); 169 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD, 170 &m_defragfailure, 0, ""); 171 #ifdef MBUF_STRESS_TEST 172 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW, 173 &m_defragrandomfailures, 0, ""); 174 #endif 175 176 static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf"); 177 static MALLOC_DEFINE(M_MBUFCL, "mbufcl", "mbufcl"); 178 static MALLOC_DEFINE(M_MCLMETA, "mclmeta", "mclmeta"); 179 180 static void m_reclaim (void); 181 static void m_mclref(void *arg); 182 static void m_mclfree(void *arg); 183 184 #ifndef NMBCLUSTERS 185 #define NMBCLUSTERS (512 + maxusers * 16) 186 #endif 187 #ifndef NMBUFS 188 #define NMBUFS (nmbclusters * 2) 189 #endif 190 191 /* 192 * Perform sanity checks of tunables declared above. 193 */ 194 static void 195 tunable_mbinit(void *dummy) 196 { 197 198 /* 199 * This has to be done before VM init. 200 */ 201 nmbclusters = NMBCLUSTERS; 202 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters); 203 nmbufs = NMBUFS; 204 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs); 205 /* Sanity checks */ 206 if (nmbufs < nmbclusters * 2) 207 nmbufs = nmbclusters * 2; 208 209 return; 210 } 211 SYSINIT(tunable_mbinit, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_mbinit, NULL); 212 213 /* "number of clusters of pages" */ 214 #define NCL_INIT 1 215 216 #define NMB_INIT 16 217 218 /* 219 * The mbuf object cache only guarantees that m_next and m_nextpkt are 220 * NULL and that m_data points to the beginning of the data area. In 221 * particular, m_len and m_pkthdr.len are uninitialized. It is the 222 * responsibility of the caller to initialize those fields before use. 223 */ 224 225 static boolean_t __inline 226 mbuf_ctor(void *obj, void *private, int ocflags) 227 { 228 struct mbuf *m = obj; 229 230 m->m_next = NULL; 231 m->m_nextpkt = NULL; 232 m->m_data = m->m_dat; 233 m->m_flags = 0; 234 235 return (TRUE); 236 } 237 238 /* 239 * Initialize the mbuf and the packet header fields. 240 */ 241 static boolean_t 242 mbufphdr_ctor(void *obj, void *private, int ocflags) 243 { 244 struct mbuf *m = obj; 245 246 m->m_next = NULL; 247 m->m_nextpkt = NULL; 248 m->m_data = m->m_pktdat; 249 m->m_flags = M_PKTHDR | M_PHCACHE; 250 251 m->m_pkthdr.rcvif = NULL; /* eliminate XXX JH */ 252 SLIST_INIT(&m->m_pkthdr.tags); 253 m->m_pkthdr.csum_flags = 0; /* eliminate XXX JH */ 254 m->m_pkthdr.fw_flags = 0; /* eliminate XXX JH */ 255 256 return (TRUE); 257 } 258 259 /* 260 * A mbcluster object consists of 2K (MCLBYTES) cluster and a refcount. 261 */ 262 static boolean_t 263 mclmeta_ctor(void *obj, void *private, int ocflags) 264 { 265 struct mbcluster *cl = obj; 266 void *buf; 267 268 if (ocflags & M_NOWAIT) 269 buf = malloc(MCLBYTES, M_MBUFCL, M_NOWAIT | M_ZERO); 270 else 271 buf = malloc(MCLBYTES, M_MBUFCL, M_INTWAIT | M_ZERO); 272 if (buf == NULL) 273 return (FALSE); 274 cl->mcl_refs = 0; 275 cl->mcl_data = buf; 276 return (TRUE); 277 } 278 279 static void 280 mclmeta_dtor(void *obj, void *private) 281 { 282 struct mbcluster *mcl = obj; 283 284 KKASSERT(mcl->mcl_refs == 0); 285 free(mcl->mcl_data, M_MBUFCL); 286 } 287 288 static void 289 linkcluster(struct mbuf *m, struct mbcluster *cl) 290 { 291 /* 292 * Add the cluster to the mbuf. The caller will detect that the 293 * mbuf now has an attached cluster. 294 */ 295 m->m_ext.ext_arg = cl; 296 m->m_ext.ext_buf = cl->mcl_data; 297 m->m_ext.ext_ref = m_mclref; 298 m->m_ext.ext_free = m_mclfree; 299 m->m_ext.ext_size = MCLBYTES; 300 ++cl->mcl_refs; 301 302 m->m_data = m->m_ext.ext_buf; 303 m->m_flags |= M_EXT | M_EXT_CLUSTER; 304 } 305 306 static boolean_t 307 mbufphdrcluster_ctor(void *obj, void *private, int ocflags) 308 { 309 struct mbuf *m = obj; 310 struct mbcluster *cl; 311 312 mbufphdr_ctor(obj, private, ocflags); 313 cl = objcache_get(mclmeta_cache, ocflags); 314 if (cl == NULL) 315 return (FALSE); 316 m->m_flags |= M_CLCACHE; 317 linkcluster(m, cl); 318 return (TRUE); 319 } 320 321 static boolean_t 322 mbufcluster_ctor(void *obj, void *private, int ocflags) 323 { 324 struct mbuf *m = obj; 325 struct mbcluster *cl; 326 327 mbuf_ctor(obj, private, ocflags); 328 cl = objcache_get(mclmeta_cache, ocflags); 329 if (cl == NULL) 330 return (FALSE); 331 m->m_flags |= M_CLCACHE; 332 linkcluster(m, cl); 333 return (TRUE); 334 } 335 336 /* 337 * Used for both the cluster and cluster PHDR caches. 338 * 339 * The mbuf may have lost its cluster due to sharing, deal 340 * with the situation by checking M_EXT. 341 */ 342 static void 343 mbufcluster_dtor(void *obj, void *private) 344 { 345 struct mbuf *m = obj; 346 struct mbcluster *mcl; 347 348 if (m->m_flags & M_EXT) { 349 KKASSERT((m->m_flags & M_EXT_CLUSTER) != 0); 350 mcl = m->m_ext.ext_arg; 351 KKASSERT(mcl->mcl_refs == 1); 352 mcl->mcl_refs = 0; 353 objcache_put(mclmeta_cache, mcl); 354 } 355 } 356 357 struct objcache_malloc_args mbuf_malloc_args = { MSIZE, M_MBUF }; 358 struct objcache_malloc_args mclmeta_malloc_args = 359 { sizeof(struct mbcluster), M_MCLMETA }; 360 361 /* ARGSUSED*/ 362 static void 363 mbinit(void *dummy) 364 { 365 mbstat.m_msize = MSIZE; 366 mbstat.m_mclbytes = MCLBYTES; 367 mbstat.m_minclsize = MINCLSIZE; 368 mbstat.m_mlen = MLEN; 369 mbstat.m_mhlen = MHLEN; 370 371 mbuf_cache = objcache_create("mbuf", nmbufs, 0, 372 mbuf_ctor, null_dtor, NULL, 373 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); 374 mbufphdr_cache = objcache_create("mbuf pkt hdr", nmbufs, 64, 375 mbufphdr_ctor, null_dtor, NULL, 376 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); 377 mclmeta_cache = objcache_create("cluster mbuf", nmbclusters , 0, 378 mclmeta_ctor, mclmeta_dtor, NULL, 379 objcache_malloc_alloc, objcache_malloc_free, &mclmeta_malloc_args); 380 mbufcluster_cache = objcache_create("mbuf + cluster", nmbclusters, 0, 381 mbufcluster_ctor, mbufcluster_dtor, NULL, 382 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); 383 mbufphdrcluster_cache = objcache_create("mbuf pkt hdr + cluster", 384 nmbclusters, 64, mbufphdrcluster_ctor, mbufcluster_dtor, NULL, 385 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); 386 return; 387 } 388 389 /* 390 * Return the number of references to this mbuf's data. 0 is returned 391 * if the mbuf is not M_EXT, a reference count is returned if it is 392 * M_EXT | M_EXT_CLUSTER, and 99 is returned if it is a special M_EXT. 393 */ 394 int 395 m_sharecount(struct mbuf *m) 396 { 397 switch (m->m_flags & (M_EXT | M_EXT_CLUSTER)) { 398 case 0: 399 return (0); 400 case M_EXT: 401 return (99); 402 case M_EXT | M_EXT_CLUSTER: 403 return (((struct mbcluster *)m->m_ext.ext_arg)->mcl_refs); 404 } 405 /* NOTREACHED */ 406 return (0); /* to shut up compiler */ 407 } 408 409 /* 410 * change mbuf to new type 411 */ 412 void 413 m_chtype(struct mbuf *m, int type) 414 { 415 crit_enter(); 416 ++mbtypes[type]; 417 --mbtypes[m->m_type]; 418 m->m_type = type; 419 crit_exit(); 420 } 421 422 static void 423 m_reclaim(void) 424 { 425 struct domain *dp; 426 struct protosw *pr; 427 428 crit_enter(); 429 SLIST_FOREACH(dp, &domains, dom_next) { 430 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) { 431 if (pr->pr_drain) 432 (*pr->pr_drain)(); 433 } 434 } 435 crit_exit(); 436 mbstat.m_drain++; 437 } 438 439 static void __inline 440 updatestats(struct mbuf *m, int type) 441 { 442 m->m_type = type; 443 444 crit_enter(); 445 ++mbtypes[type]; 446 ++mbstat.m_mbufs; 447 crit_exit(); 448 } 449 450 /* 451 * Allocate an mbuf. 452 */ 453 struct mbuf * 454 m_get(int how, int type) 455 { 456 struct mbuf *m; 457 int ntries = 0; 458 int ocf = MBTOM(how); 459 460 retryonce: 461 462 m = objcache_get(mbuf_cache, ocf); 463 464 if (m == NULL) { 465 if ((how & MB_TRYWAIT) && ntries++ == 0) { 466 struct objcache *reclaimlist[] = { 467 mbufphdr_cache, 468 mbufcluster_cache, mbufphdrcluster_cache 469 }; 470 const int nreclaims = __arysize(reclaimlist); 471 472 if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf)) 473 m_reclaim(); 474 goto retryonce; 475 } 476 return (NULL); 477 } 478 479 updatestats(m, type); 480 return (m); 481 } 482 483 struct mbuf * 484 m_gethdr(int how, int type) 485 { 486 struct mbuf *m; 487 int ocf = MBTOM(how); 488 int ntries = 0; 489 490 retryonce: 491 492 m = objcache_get(mbufphdr_cache, ocf); 493 494 if (m == NULL) { 495 if ((how & MB_TRYWAIT) && ntries++ == 0) { 496 struct objcache *reclaimlist[] = { 497 mbuf_cache, 498 mbufcluster_cache, mbufphdrcluster_cache 499 }; 500 const int nreclaims = __arysize(reclaimlist); 501 502 if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf)) 503 m_reclaim(); 504 goto retryonce; 505 } 506 return (NULL); 507 } 508 509 updatestats(m, type); 510 return (m); 511 } 512 513 /* 514 * Get a mbuf (not a mbuf cluster!) and zero it. 515 * Deprecated. 516 */ 517 struct mbuf * 518 m_getclr(int how, int type) 519 { 520 struct mbuf *m; 521 522 m = m_get(how, type); 523 if (m != NULL) 524 bzero(m->m_data, MLEN); 525 return (m); 526 } 527 528 /* 529 * Returns an mbuf with an attached cluster. 530 * Because many network drivers use this kind of buffers a lot, it is 531 * convenient to keep a small pool of free buffers of this kind. 532 * Even a small size such as 10 gives about 10% improvement in the 533 * forwarding rate in a bridge or router. 534 */ 535 struct mbuf * 536 m_getcl(int how, short type, int flags) 537 { 538 struct mbuf *m; 539 int ocflags = MBTOM(how); 540 int ntries = 0; 541 542 retryonce: 543 544 if (flags & M_PKTHDR) 545 m = objcache_get(mbufphdrcluster_cache, ocflags); 546 else 547 m = objcache_get(mbufcluster_cache, ocflags); 548 549 if (m == NULL) { 550 if ((how & MB_TRYWAIT) && ntries++ == 0) { 551 struct objcache *reclaimlist[1]; 552 553 if (flags & M_PKTHDR) 554 reclaimlist[0] = mbufcluster_cache; 555 else 556 reclaimlist[0] = mbufphdrcluster_cache; 557 if (!objcache_reclaimlist(reclaimlist, 1, ocflags)) 558 m_reclaim(); 559 goto retryonce; 560 } 561 return (NULL); 562 } 563 564 m->m_type = type; 565 566 crit_enter(); 567 ++mbtypes[type]; 568 ++mbstat.m_clusters; 569 crit_exit(); 570 return (m); 571 } 572 573 /* 574 * Allocate chain of requested length. 575 */ 576 struct mbuf * 577 m_getc(int len, int how, int type) 578 { 579 struct mbuf *n, *nfirst = NULL, **ntail = &nfirst; 580 int nsize; 581 582 while (len > 0) { 583 n = m_getl(len, how, type, 0, &nsize); 584 if (n == NULL) 585 goto failed; 586 n->m_len = 0; 587 *ntail = n; 588 ntail = &n->m_next; 589 len -= nsize; 590 } 591 return (nfirst); 592 593 failed: 594 m_freem(nfirst); 595 return (NULL); 596 } 597 598 /* 599 * Allocate len-worth of mbufs and/or mbuf clusters (whatever fits best) 600 * and return a pointer to the head of the allocated chain. If m0 is 601 * non-null, then we assume that it is a single mbuf or an mbuf chain to 602 * which we want len bytes worth of mbufs and/or clusters attached, and so 603 * if we succeed in allocating it, we will just return a pointer to m0. 604 * 605 * If we happen to fail at any point during the allocation, we will free 606 * up everything we have already allocated and return NULL. 607 * 608 * Deprecated. Use m_getc() and m_cat() instead. 609 */ 610 struct mbuf * 611 m_getm(struct mbuf *m0, int len, int how, int type) 612 { 613 struct mbuf *nfirst; 614 615 nfirst = m_getc(len, how, type); 616 617 if (m0 != NULL) { 618 m_last(m0)->m_next = nfirst; 619 return (m0); 620 } 621 622 return (nfirst); 623 } 624 625 /* 626 * Adds a cluster to a normal mbuf, M_EXT is set on success. 627 * Deprecated. Use m_getcl() instead. 628 */ 629 void 630 m_mclget(struct mbuf *m, int how) 631 { 632 struct mbcluster *mcl; 633 634 KKASSERT((m->m_flags & M_EXT) == 0); 635 mcl = objcache_get(mclmeta_cache, MBTOM(how)); 636 if (mcl != NULL) { 637 linkcluster(m, mcl); 638 crit_enter(); 639 ++mbstat.m_clusters; 640 /* leave the m_mbufs count intact for original mbuf */ 641 crit_exit(); 642 } 643 } 644 645 static void 646 m_mclref(void *arg) 647 { 648 struct mbcluster *mcl = arg; 649 650 atomic_add_int(&mcl->mcl_refs, 1); 651 } 652 653 static void 654 m_mclfree(void *arg) 655 { 656 struct mbcluster *mcl = arg; 657 658 /* XXX interrupt race. Currently called from a critical section */ 659 if (mcl->mcl_refs > 1) { 660 atomic_subtract_int(&mcl->mcl_refs, 1); 661 } else { 662 KKASSERT(mcl->mcl_refs == 1); 663 mcl->mcl_refs = 0; 664 objcache_put(mclmeta_cache, mcl); 665 } 666 } 667 668 extern void db_print_backtrace(void); 669 670 /* 671 * Free a single mbuf and any associated external storage. The successor, 672 * if any, is returned. 673 * 674 * We do need to check non-first mbuf for m_aux, since some of existing 675 * code does not call M_PREPEND properly. 676 * (example: call to bpf_mtap from drivers) 677 */ 678 struct mbuf * 679 m_free(struct mbuf *m) 680 { 681 struct mbuf *n; 682 683 KASSERT(m->m_type != MT_FREE, ("freeing free mbuf %p", m)); 684 --mbtypes[m->m_type]; 685 686 n = m->m_next; 687 688 /* 689 * Make sure the mbuf is in constructed state before returning it 690 * to the objcache. 691 */ 692 m->m_next = NULL; 693 #ifdef notyet 694 KKASSERT(m->m_nextpkt == NULL); 695 #else 696 if (m->m_nextpkt != NULL) { 697 #ifdef DDB 698 static int afewtimes = 10; 699 700 if (afewtimes-- > 0) { 701 printf("mfree: m->m_nextpkt != NULL\n"); 702 db_print_backtrace(); 703 } 704 #endif 705 m->m_nextpkt = NULL; 706 } 707 #endif 708 if (m->m_flags & M_PKTHDR) { 709 m_tag_delete_chain(m); /* eliminate XXX JH */ 710 } 711 712 m->m_flags &= (M_EXT | M_EXT_CLUSTER | M_CLCACHE | M_PHCACHE); 713 714 /* 715 * Clean the M_PKTHDR state so we can return the mbuf to its original 716 * cache. This is based on the PHCACHE flag which tells us whether 717 * the mbuf was originally allocated out of a packet-header cache 718 * or a non-packet-header cache. 719 */ 720 if (m->m_flags & M_PHCACHE) { 721 m->m_flags |= M_PKTHDR; 722 m->m_pkthdr.rcvif = NULL; /* eliminate XXX JH */ 723 m->m_pkthdr.csum_flags = 0; /* eliminate XXX JH */ 724 m->m_pkthdr.fw_flags = 0; /* eliminate XXX JH */ 725 SLIST_INIT(&m->m_pkthdr.tags); 726 } 727 728 /* 729 * Handle remaining flags combinations. M_CLCACHE tells us whether 730 * the mbuf was originally allocated from a cluster cache or not, 731 * and is totally separate from whether the mbuf is currently 732 * associated with a cluster. 733 */ 734 crit_enter(); 735 switch(m->m_flags & (M_CLCACHE | M_EXT | M_EXT_CLUSTER)) { 736 case M_CLCACHE | M_EXT | M_EXT_CLUSTER: 737 /* 738 * mbuf+cluster cache case. The mbuf was allocated from the 739 * combined mbuf_cluster cache and can be returned to the 740 * cache if the cluster hasn't been shared. 741 */ 742 if (m_sharecount(m) == 1) { 743 /* 744 * The cluster has not been shared, we can just 745 * reset the data pointer and return the mbuf 746 * to the cluster cache. Note that the reference 747 * count is left intact (it is still associated with 748 * an mbuf). 749 */ 750 m->m_data = m->m_ext.ext_buf; 751 if (m->m_flags & M_PHCACHE) 752 objcache_put(mbufphdrcluster_cache, m); 753 else 754 objcache_put(mbufcluster_cache, m); 755 } else { 756 /* 757 * Hell. Someone else has a ref on this cluster, 758 * we have to disconnect it which means we can't 759 * put it back into the mbufcluster_cache, we 760 * have to destroy the mbuf. 761 * 762 * XXX we could try to connect another cluster to 763 * it. 764 */ 765 m->m_ext.ext_free(m->m_ext.ext_arg); 766 m->m_flags &= ~(M_EXT | M_EXT_CLUSTER); 767 if (m->m_flags & M_PHCACHE) 768 objcache_dtor(mbufphdrcluster_cache, m); 769 else 770 objcache_dtor(mbufcluster_cache, m); 771 } 772 --mbstat.m_clusters; 773 break; 774 case M_EXT | M_EXT_CLUSTER: 775 /* 776 * Normal cluster associated with an mbuf that was allocated 777 * from the normal mbuf pool rather then the cluster pool. 778 * The cluster has to be independantly disassociated from the 779 * mbuf. 780 */ 781 --mbstat.m_clusters; 782 /* fall through */ 783 case M_EXT: 784 /* 785 * Normal cluster association case, disconnect the cluster from 786 * the mbuf. The cluster may or may not be custom. 787 */ 788 m->m_ext.ext_free(m->m_ext.ext_arg); 789 m->m_flags &= ~(M_EXT | M_EXT_CLUSTER); 790 /* fall through */ 791 case 0: 792 /* 793 * return the mbuf to the mbuf cache. 794 */ 795 if (m->m_flags & M_PHCACHE) { 796 m->m_data = m->m_pktdat; 797 objcache_put(mbufphdr_cache, m); 798 } else { 799 m->m_data = m->m_dat; 800 objcache_put(mbuf_cache, m); 801 } 802 --mbstat.m_mbufs; 803 break; 804 default: 805 if (!panicstr) 806 panic("bad mbuf flags %p %08x\n", m, m->m_flags); 807 break; 808 } 809 crit_exit(); 810 return (n); 811 } 812 813 void 814 m_freem(struct mbuf *m) 815 { 816 crit_enter(); 817 while (m) 818 m = m_free(m); 819 crit_exit(); 820 } 821 822 /* 823 * mbuf utility routines 824 */ 825 826 /* 827 * Lesser-used path for M_PREPEND: allocate new mbuf to prepend to chain and 828 * copy junk along. 829 */ 830 struct mbuf * 831 m_prepend(struct mbuf *m, int len, int how) 832 { 833 struct mbuf *mn; 834 835 if (m->m_flags & M_PKTHDR) 836 mn = m_gethdr(how, m->m_type); 837 else 838 mn = m_get(how, m->m_type); 839 if (mn == NULL) { 840 m_freem(m); 841 return (NULL); 842 } 843 if (m->m_flags & M_PKTHDR) 844 M_MOVE_PKTHDR(mn, m); 845 mn->m_next = m; 846 m = mn; 847 if (len < MHLEN) 848 MH_ALIGN(m, len); 849 m->m_len = len; 850 return (m); 851 } 852 853 /* 854 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 855 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 856 * The wait parameter is a choice of MB_WAIT/MB_DONTWAIT from caller. 857 * Note that the copy is read-only, because clusters are not copied, 858 * only their reference counts are incremented. 859 */ 860 struct mbuf * 861 m_copym(const struct mbuf *m, int off0, int len, int wait) 862 { 863 struct mbuf *n, **np; 864 int off = off0; 865 struct mbuf *top; 866 int copyhdr = 0; 867 868 KASSERT(off >= 0, ("m_copym, negative off %d", off)); 869 KASSERT(len >= 0, ("m_copym, negative len %d", len)); 870 if (off == 0 && m->m_flags & M_PKTHDR) 871 copyhdr = 1; 872 while (off > 0) { 873 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain")); 874 if (off < m->m_len) 875 break; 876 off -= m->m_len; 877 m = m->m_next; 878 } 879 np = ⊤ 880 top = 0; 881 while (len > 0) { 882 if (m == NULL) { 883 KASSERT(len == M_COPYALL, 884 ("m_copym, length > size of mbuf chain")); 885 break; 886 } 887 /* 888 * Because we are sharing any cluster attachment below, 889 * be sure to get an mbuf that does not have a cluster 890 * associated with it. 891 */ 892 if (copyhdr) 893 n = m_gethdr(wait, m->m_type); 894 else 895 n = m_get(wait, m->m_type); 896 *np = n; 897 if (n == NULL) 898 goto nospace; 899 if (copyhdr) { 900 if (!m_dup_pkthdr(n, m, wait)) 901 goto nospace; 902 if (len == M_COPYALL) 903 n->m_pkthdr.len -= off0; 904 else 905 n->m_pkthdr.len = len; 906 copyhdr = 0; 907 } 908 n->m_len = min(len, m->m_len - off); 909 if (m->m_flags & M_EXT) { 910 KKASSERT((n->m_flags & M_EXT) == 0); 911 n->m_data = m->m_data + off; 912 m->m_ext.ext_ref(m->m_ext.ext_arg); 913 n->m_ext = m->m_ext; 914 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER); 915 } else { 916 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 917 (unsigned)n->m_len); 918 } 919 if (len != M_COPYALL) 920 len -= n->m_len; 921 off = 0; 922 m = m->m_next; 923 np = &n->m_next; 924 } 925 if (top == NULL) 926 mbstat.m_mcfail++; 927 return (top); 928 nospace: 929 m_freem(top); 930 mbstat.m_mcfail++; 931 return (NULL); 932 } 933 934 /* 935 * Copy an entire packet, including header (which must be present). 936 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'. 937 * Note that the copy is read-only, because clusters are not copied, 938 * only their reference counts are incremented. 939 * Preserve alignment of the first mbuf so if the creator has left 940 * some room at the beginning (e.g. for inserting protocol headers) 941 * the copies also have the room available. 942 */ 943 struct mbuf * 944 m_copypacket(struct mbuf *m, int how) 945 { 946 struct mbuf *top, *n, *o; 947 948 n = m_gethdr(how, m->m_type); 949 top = n; 950 if (!n) 951 goto nospace; 952 953 if (!m_dup_pkthdr(n, m, how)) 954 goto nospace; 955 n->m_len = m->m_len; 956 if (m->m_flags & M_EXT) { 957 KKASSERT((n->m_flags & M_EXT) == 0); 958 n->m_data = m->m_data; 959 m->m_ext.ext_ref(m->m_ext.ext_arg); 960 n->m_ext = m->m_ext; 961 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER); 962 } else { 963 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat ); 964 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 965 } 966 967 m = m->m_next; 968 while (m) { 969 o = m_get(how, m->m_type); 970 if (!o) 971 goto nospace; 972 973 n->m_next = o; 974 n = n->m_next; 975 976 n->m_len = m->m_len; 977 if (m->m_flags & M_EXT) { 978 KKASSERT((n->m_flags & M_EXT) == 0); 979 n->m_data = m->m_data; 980 m->m_ext.ext_ref(m->m_ext.ext_arg); 981 n->m_ext = m->m_ext; 982 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER); 983 } else { 984 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 985 } 986 987 m = m->m_next; 988 } 989 return top; 990 nospace: 991 m_freem(top); 992 mbstat.m_mcfail++; 993 return (NULL); 994 } 995 996 /* 997 * Copy data from an mbuf chain starting "off" bytes from the beginning, 998 * continuing for "len" bytes, into the indicated buffer. 999 */ 1000 void 1001 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp) 1002 { 1003 unsigned count; 1004 1005 KASSERT(off >= 0, ("m_copydata, negative off %d", off)); 1006 KASSERT(len >= 0, ("m_copydata, negative len %d", len)); 1007 while (off > 0) { 1008 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain")); 1009 if (off < m->m_len) 1010 break; 1011 off -= m->m_len; 1012 m = m->m_next; 1013 } 1014 while (len > 0) { 1015 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain")); 1016 count = min(m->m_len - off, len); 1017 bcopy(mtod(m, caddr_t) + off, cp, count); 1018 len -= count; 1019 cp += count; 1020 off = 0; 1021 m = m->m_next; 1022 } 1023 } 1024 1025 /* 1026 * Copy a packet header mbuf chain into a completely new chain, including 1027 * copying any mbuf clusters. Use this instead of m_copypacket() when 1028 * you need a writable copy of an mbuf chain. 1029 */ 1030 struct mbuf * 1031 m_dup(struct mbuf *m, int how) 1032 { 1033 struct mbuf **p, *top = NULL; 1034 int remain, moff, nsize; 1035 1036 /* Sanity check */ 1037 if (m == NULL) 1038 return (NULL); 1039 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __func__)); 1040 1041 /* While there's more data, get a new mbuf, tack it on, and fill it */ 1042 remain = m->m_pkthdr.len; 1043 moff = 0; 1044 p = ⊤ 1045 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */ 1046 struct mbuf *n; 1047 1048 /* Get the next new mbuf */ 1049 n = m_getl(remain, how, m->m_type, top == NULL ? M_PKTHDR : 0, 1050 &nsize); 1051 if (n == NULL) 1052 goto nospace; 1053 if (top == NULL) 1054 if (!m_dup_pkthdr(n, m, how)) 1055 goto nospace0; 1056 1057 /* Link it into the new chain */ 1058 *p = n; 1059 p = &n->m_next; 1060 1061 /* Copy data from original mbuf(s) into new mbuf */ 1062 n->m_len = 0; 1063 while (n->m_len < nsize && m != NULL) { 1064 int chunk = min(nsize - n->m_len, m->m_len - moff); 1065 1066 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk); 1067 moff += chunk; 1068 n->m_len += chunk; 1069 remain -= chunk; 1070 if (moff == m->m_len) { 1071 m = m->m_next; 1072 moff = 0; 1073 } 1074 } 1075 1076 /* Check correct total mbuf length */ 1077 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL), 1078 ("%s: bogus m_pkthdr.len", __func__)); 1079 } 1080 return (top); 1081 1082 nospace: 1083 m_freem(top); 1084 nospace0: 1085 mbstat.m_mcfail++; 1086 return (NULL); 1087 } 1088 1089 /* 1090 * Concatenate mbuf chain n to m. 1091 * Both chains must be of the same type (e.g. MT_DATA). 1092 * Any m_pkthdr is not updated. 1093 */ 1094 void 1095 m_cat(struct mbuf *m, struct mbuf *n) 1096 { 1097 m = m_last(m); 1098 while (n) { 1099 if (m->m_flags & M_EXT || 1100 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 1101 /* just join the two chains */ 1102 m->m_next = n; 1103 return; 1104 } 1105 /* splat the data from one into the other */ 1106 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 1107 (u_int)n->m_len); 1108 m->m_len += n->m_len; 1109 n = m_free(n); 1110 } 1111 } 1112 1113 void 1114 m_adj(struct mbuf *mp, int req_len) 1115 { 1116 int len = req_len; 1117 struct mbuf *m; 1118 int count; 1119 1120 if ((m = mp) == NULL) 1121 return; 1122 if (len >= 0) { 1123 /* 1124 * Trim from head. 1125 */ 1126 while (m != NULL && len > 0) { 1127 if (m->m_len <= len) { 1128 len -= m->m_len; 1129 m->m_len = 0; 1130 m = m->m_next; 1131 } else { 1132 m->m_len -= len; 1133 m->m_data += len; 1134 len = 0; 1135 } 1136 } 1137 m = mp; 1138 if (mp->m_flags & M_PKTHDR) 1139 m->m_pkthdr.len -= (req_len - len); 1140 } else { 1141 /* 1142 * Trim from tail. Scan the mbuf chain, 1143 * calculating its length and finding the last mbuf. 1144 * If the adjustment only affects this mbuf, then just 1145 * adjust and return. Otherwise, rescan and truncate 1146 * after the remaining size. 1147 */ 1148 len = -len; 1149 count = 0; 1150 for (;;) { 1151 count += m->m_len; 1152 if (m->m_next == (struct mbuf *)0) 1153 break; 1154 m = m->m_next; 1155 } 1156 if (m->m_len >= len) { 1157 m->m_len -= len; 1158 if (mp->m_flags & M_PKTHDR) 1159 mp->m_pkthdr.len -= len; 1160 return; 1161 } 1162 count -= len; 1163 if (count < 0) 1164 count = 0; 1165 /* 1166 * Correct length for chain is "count". 1167 * Find the mbuf with last data, adjust its length, 1168 * and toss data from remaining mbufs on chain. 1169 */ 1170 m = mp; 1171 if (m->m_flags & M_PKTHDR) 1172 m->m_pkthdr.len = count; 1173 for (; m; m = m->m_next) { 1174 if (m->m_len >= count) { 1175 m->m_len = count; 1176 break; 1177 } 1178 count -= m->m_len; 1179 } 1180 while (m->m_next) 1181 (m = m->m_next) ->m_len = 0; 1182 } 1183 } 1184 1185 /* 1186 * Rearrange an mbuf chain so that len bytes are contiguous 1187 * and in the data area of an mbuf (so that mtod will work for a structure 1188 * of size len). Returns the resulting mbuf chain on success, frees it and 1189 * returns null on failure. If there is room, it will add up to 1190 * max_protohdr-len extra bytes to the contiguous region in an attempt to 1191 * avoid being called next time. 1192 */ 1193 struct mbuf * 1194 m_pullup(struct mbuf *n, int len) 1195 { 1196 struct mbuf *m; 1197 int count; 1198 int space; 1199 1200 /* 1201 * If first mbuf has no cluster, and has room for len bytes 1202 * without shifting current data, pullup into it, 1203 * otherwise allocate a new mbuf to prepend to the chain. 1204 */ 1205 if (!(n->m_flags & M_EXT) && 1206 n->m_data + len < &n->m_dat[MLEN] && 1207 n->m_next) { 1208 if (n->m_len >= len) 1209 return (n); 1210 m = n; 1211 n = n->m_next; 1212 len -= m->m_len; 1213 } else { 1214 if (len > MHLEN) 1215 goto bad; 1216 if (n->m_flags & M_PKTHDR) 1217 m = m_gethdr(MB_DONTWAIT, n->m_type); 1218 else 1219 m = m_get(MB_DONTWAIT, n->m_type); 1220 if (m == NULL) 1221 goto bad; 1222 m->m_len = 0; 1223 if (n->m_flags & M_PKTHDR) 1224 M_MOVE_PKTHDR(m, n); 1225 } 1226 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 1227 do { 1228 count = min(min(max(len, max_protohdr), space), n->m_len); 1229 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 1230 (unsigned)count); 1231 len -= count; 1232 m->m_len += count; 1233 n->m_len -= count; 1234 space -= count; 1235 if (n->m_len) 1236 n->m_data += count; 1237 else 1238 n = m_free(n); 1239 } while (len > 0 && n); 1240 if (len > 0) { 1241 m_free(m); 1242 goto bad; 1243 } 1244 m->m_next = n; 1245 return (m); 1246 bad: 1247 m_freem(n); 1248 mbstat.m_mpfail++; 1249 return (NULL); 1250 } 1251 1252 /* 1253 * Partition an mbuf chain in two pieces, returning the tail -- 1254 * all but the first len0 bytes. In case of failure, it returns NULL and 1255 * attempts to restore the chain to its original state. 1256 * 1257 * Note that the resulting mbufs might be read-only, because the new 1258 * mbuf can end up sharing an mbuf cluster with the original mbuf if 1259 * the "breaking point" happens to lie within a cluster mbuf. Use the 1260 * M_WRITABLE() macro to check for this case. 1261 */ 1262 struct mbuf * 1263 m_split(struct mbuf *m0, int len0, int wait) 1264 { 1265 struct mbuf *m, *n; 1266 unsigned len = len0, remain; 1267 1268 for (m = m0; m && len > m->m_len; m = m->m_next) 1269 len -= m->m_len; 1270 if (m == NULL) 1271 return (NULL); 1272 remain = m->m_len - len; 1273 if (m0->m_flags & M_PKTHDR) { 1274 n = m_gethdr(wait, m0->m_type); 1275 if (n == NULL) 1276 return (NULL); 1277 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 1278 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 1279 m0->m_pkthdr.len = len0; 1280 if (m->m_flags & M_EXT) 1281 goto extpacket; 1282 if (remain > MHLEN) { 1283 /* m can't be the lead packet */ 1284 MH_ALIGN(n, 0); 1285 n->m_next = m_split(m, len, wait); 1286 if (n->m_next == NULL) { 1287 m_free(n); 1288 return (NULL); 1289 } else { 1290 n->m_len = 0; 1291 return (n); 1292 } 1293 } else 1294 MH_ALIGN(n, remain); 1295 } else if (remain == 0) { 1296 n = m->m_next; 1297 m->m_next = 0; 1298 return (n); 1299 } else { 1300 n = m_get(wait, m->m_type); 1301 if (n == NULL) 1302 return (NULL); 1303 M_ALIGN(n, remain); 1304 } 1305 extpacket: 1306 if (m->m_flags & M_EXT) { 1307 KKASSERT((n->m_flags & M_EXT) == 0); 1308 n->m_data = m->m_data + len; 1309 m->m_ext.ext_ref(m->m_ext.ext_arg); 1310 n->m_ext = m->m_ext; 1311 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER); 1312 } else { 1313 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 1314 } 1315 n->m_len = remain; 1316 m->m_len = len; 1317 n->m_next = m->m_next; 1318 m->m_next = 0; 1319 return (n); 1320 } 1321 1322 /* 1323 * Routine to copy from device local memory into mbufs. 1324 * Note: "offset" is ill-defined and always called as 0, so ignore it. 1325 */ 1326 struct mbuf * 1327 m_devget(char *buf, int len, int offset, struct ifnet *ifp, 1328 void (*copy)(volatile const void *from, volatile void *to, size_t length)) 1329 { 1330 struct mbuf *m, *mfirst = NULL, **mtail; 1331 int nsize, flags; 1332 1333 if (copy == NULL) 1334 copy = bcopy; 1335 mtail = &mfirst; 1336 flags = M_PKTHDR; 1337 1338 while (len > 0) { 1339 m = m_getl(len, MB_DONTWAIT, MT_DATA, flags, &nsize); 1340 if (m == NULL) { 1341 m_freem(mfirst); 1342 return (NULL); 1343 } 1344 m->m_len = min(len, nsize); 1345 1346 if (flags & M_PKTHDR) { 1347 if (len + max_linkhdr <= nsize) 1348 m->m_data += max_linkhdr; 1349 m->m_pkthdr.rcvif = ifp; 1350 m->m_pkthdr.len = len; 1351 flags = 0; 1352 } 1353 1354 copy(buf, m->m_data, (unsigned)m->m_len); 1355 buf += m->m_len; 1356 len -= m->m_len; 1357 *mtail = m; 1358 mtail = &m->m_next; 1359 } 1360 1361 return (mfirst); 1362 } 1363 1364 /* 1365 * Copy data from a buffer back into the indicated mbuf chain, 1366 * starting "off" bytes from the beginning, extending the mbuf 1367 * chain if necessary. 1368 */ 1369 void 1370 m_copyback(struct mbuf *m0, int off, int len, caddr_t cp) 1371 { 1372 int mlen; 1373 struct mbuf *m = m0, *n; 1374 int totlen = 0; 1375 1376 if (m0 == NULL) 1377 return; 1378 while (off > (mlen = m->m_len)) { 1379 off -= mlen; 1380 totlen += mlen; 1381 if (m->m_next == NULL) { 1382 n = m_getclr(MB_DONTWAIT, m->m_type); 1383 if (n == NULL) 1384 goto out; 1385 n->m_len = min(MLEN, len + off); 1386 m->m_next = n; 1387 } 1388 m = m->m_next; 1389 } 1390 while (len > 0) { 1391 mlen = min (m->m_len - off, len); 1392 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen); 1393 cp += mlen; 1394 len -= mlen; 1395 mlen += off; 1396 off = 0; 1397 totlen += mlen; 1398 if (len == 0) 1399 break; 1400 if (m->m_next == NULL) { 1401 n = m_get(MB_DONTWAIT, m->m_type); 1402 if (n == NULL) 1403 break; 1404 n->m_len = min(MLEN, len); 1405 m->m_next = n; 1406 } 1407 m = m->m_next; 1408 } 1409 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 1410 m->m_pkthdr.len = totlen; 1411 } 1412 1413 void 1414 m_print(const struct mbuf *m) 1415 { 1416 int len; 1417 const struct mbuf *m2; 1418 1419 len = m->m_pkthdr.len; 1420 m2 = m; 1421 while (len) { 1422 printf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-"); 1423 len -= m2->m_len; 1424 m2 = m2->m_next; 1425 } 1426 return; 1427 } 1428 1429 /* 1430 * "Move" mbuf pkthdr from "from" to "to". 1431 * "from" must have M_PKTHDR set, and "to" must be empty. 1432 */ 1433 void 1434 m_move_pkthdr(struct mbuf *to, struct mbuf *from) 1435 { 1436 KASSERT((to->m_flags & M_PKTHDR), ("m_move_pkthdr: not packet header")); 1437 1438 to->m_flags |= from->m_flags & M_COPYFLAGS; 1439 to->m_pkthdr = from->m_pkthdr; /* especially tags */ 1440 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */ 1441 } 1442 1443 /* 1444 * Duplicate "from"'s mbuf pkthdr in "to". 1445 * "from" must have M_PKTHDR set, and "to" must be empty. 1446 * In particular, this does a deep copy of the packet tags. 1447 */ 1448 int 1449 m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how) 1450 { 1451 KASSERT((to->m_flags & M_PKTHDR), ("m_dup_pkthdr: not packet header")); 1452 1453 to->m_flags = (from->m_flags & M_COPYFLAGS) | 1454 (to->m_flags & ~M_COPYFLAGS); 1455 to->m_pkthdr = from->m_pkthdr; 1456 SLIST_INIT(&to->m_pkthdr.tags); 1457 return (m_tag_copy_chain(to, from, how)); 1458 } 1459 1460 /* 1461 * Defragment a mbuf chain, returning the shortest possible 1462 * chain of mbufs and clusters. If allocation fails and 1463 * this cannot be completed, NULL will be returned, but 1464 * the passed in chain will be unchanged. Upon success, 1465 * the original chain will be freed, and the new chain 1466 * will be returned. 1467 * 1468 * If a non-packet header is passed in, the original 1469 * mbuf (chain?) will be returned unharmed. 1470 * 1471 * m_defrag_nofree doesn't free the passed in mbuf. 1472 */ 1473 struct mbuf * 1474 m_defrag(struct mbuf *m0, int how) 1475 { 1476 struct mbuf *m_new; 1477 1478 if ((m_new = m_defrag_nofree(m0, how)) == NULL) 1479 return (NULL); 1480 if (m_new != m0) 1481 m_freem(m0); 1482 return (m_new); 1483 } 1484 1485 struct mbuf * 1486 m_defrag_nofree(struct mbuf *m0, int how) 1487 { 1488 struct mbuf *m_new = NULL, *m_final = NULL; 1489 int progress = 0, length, nsize; 1490 1491 if (!(m0->m_flags & M_PKTHDR)) 1492 return (m0); 1493 1494 #ifdef MBUF_STRESS_TEST 1495 if (m_defragrandomfailures) { 1496 int temp = arc4random() & 0xff; 1497 if (temp == 0xba) 1498 goto nospace; 1499 } 1500 #endif 1501 1502 m_final = m_getl(m0->m_pkthdr.len, how, MT_DATA, M_PKTHDR, &nsize); 1503 if (m_final == NULL) 1504 goto nospace; 1505 m_final->m_len = 0; /* in case m0->m_pkthdr.len is zero */ 1506 1507 if (m_dup_pkthdr(m_final, m0, how) == NULL) 1508 goto nospace; 1509 1510 m_new = m_final; 1511 1512 while (progress < m0->m_pkthdr.len) { 1513 length = m0->m_pkthdr.len - progress; 1514 if (length > MCLBYTES) 1515 length = MCLBYTES; 1516 1517 if (m_new == NULL) { 1518 m_new = m_getl(length, how, MT_DATA, 0, &nsize); 1519 if (m_new == NULL) 1520 goto nospace; 1521 } 1522 1523 m_copydata(m0, progress, length, mtod(m_new, caddr_t)); 1524 progress += length; 1525 m_new->m_len = length; 1526 if (m_new != m_final) 1527 m_cat(m_final, m_new); 1528 m_new = NULL; 1529 } 1530 if (m0->m_next == NULL) 1531 m_defraguseless++; 1532 m_defragpackets++; 1533 m_defragbytes += m_final->m_pkthdr.len; 1534 return (m_final); 1535 nospace: 1536 m_defragfailure++; 1537 if (m_new) 1538 m_free(m_new); 1539 m_freem(m_final); 1540 return (NULL); 1541 } 1542 1543 /* 1544 * Move data from uio into mbufs. 1545 */ 1546 struct mbuf * 1547 m_uiomove(struct uio *uio) 1548 { 1549 struct mbuf *m; /* current working mbuf */ 1550 struct mbuf *head = NULL; /* result mbuf chain */ 1551 struct mbuf **mp = &head; 1552 int resid = uio->uio_resid, nsize, flags = M_PKTHDR, error; 1553 1554 do { 1555 m = m_getl(resid, MB_WAIT, MT_DATA, flags, &nsize); 1556 if (flags) { 1557 m->m_pkthdr.len = 0; 1558 /* Leave room for protocol headers. */ 1559 if (resid < MHLEN) 1560 MH_ALIGN(m, resid); 1561 flags = 0; 1562 } 1563 m->m_len = min(nsize, resid); 1564 error = uiomove(mtod(m, caddr_t), m->m_len, uio); 1565 if (error) { 1566 m_free(m); 1567 goto failed; 1568 } 1569 *mp = m; 1570 mp = &m->m_next; 1571 head->m_pkthdr.len += m->m_len; 1572 resid -= m->m_len; 1573 } while (resid > 0); 1574 1575 return (head); 1576 1577 failed: 1578 m_freem(head); 1579 return (NULL); 1580 } 1581 1582 struct mbuf * 1583 m_last(struct mbuf *m) 1584 { 1585 while (m->m_next) 1586 m = m->m_next; 1587 return (m); 1588 } 1589 1590 /* 1591 * Return the number of bytes in an mbuf chain. 1592 * If lastm is not NULL, also return the last mbuf. 1593 */ 1594 u_int 1595 m_lengthm(struct mbuf *m, struct mbuf **lastm) 1596 { 1597 u_int len = 0; 1598 struct mbuf *prev = m; 1599 1600 while (m) { 1601 len += m->m_len; 1602 prev = m; 1603 m = m->m_next; 1604 } 1605 if (lastm != NULL) 1606 *lastm = prev; 1607 return (len); 1608 } 1609 1610 /* 1611 * Like m_lengthm(), except also keep track of mbuf usage. 1612 */ 1613 u_int 1614 m_countm(struct mbuf *m, struct mbuf **lastm, u_int *pmbcnt) 1615 { 1616 u_int len = 0, mbcnt = 0; 1617 struct mbuf *prev = m; 1618 1619 while (m) { 1620 len += m->m_len; 1621 mbcnt += MSIZE; 1622 if (m->m_flags & M_EXT) 1623 mbcnt += m->m_ext.ext_size; 1624 prev = m; 1625 m = m->m_next; 1626 } 1627 if (lastm != NULL) 1628 *lastm = prev; 1629 *pmbcnt = mbcnt; 1630 return (len); 1631 } 1632