1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved. 5 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 6 * 7 * This code is derived from software contributed to The DragonFly Project 8 * by Jeffrey M. Hsu. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 36 /* 37 * Copyright (c) 1982, 1986, 1988, 1991, 1993 38 * The Regents of the University of California. All rights reserved. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in the 47 * documentation and/or other materials provided with the distribution. 48 * 3. All advertising materials mentioning features or use of this software 49 * must display the following acknowledgement: 50 * This product includes software developed by the University of 51 * California, Berkeley and its contributors. 52 * 4. Neither the name of the University nor the names of its contributors 53 * may be used to endorse or promote products derived from this software 54 * without specific prior written permission. 55 * 56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 66 * SUCH DAMAGE. 67 * 68 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 69 * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $ 70 * $DragonFly: src/sys/kern/uipc_mbuf.c,v 1.70 2008/11/20 14:21:01 sephe Exp $ 71 */ 72 73 #include "opt_param.h" 74 #include "opt_mbuf_stress_test.h" 75 #include <sys/param.h> 76 #include <sys/systm.h> 77 #include <sys/file.h> 78 #include <sys/malloc.h> 79 #include <sys/mbuf.h> 80 #include <sys/kernel.h> 81 #include <sys/sysctl.h> 82 #include <sys/domain.h> 83 #include <sys/objcache.h> 84 #include <sys/tree.h> 85 #include <sys/protosw.h> 86 #include <sys/uio.h> 87 #include <sys/thread.h> 88 #include <sys/globaldata.h> 89 90 #include <sys/thread2.h> 91 #include <sys/spinlock2.h> 92 93 #include <machine/atomic.h> 94 #include <machine/limits.h> 95 96 #include <vm/vm.h> 97 #include <vm/vm_kern.h> 98 #include <vm/vm_extern.h> 99 100 #ifdef INVARIANTS 101 #include <machine/cpu.h> 102 #endif 103 104 /* 105 * mbuf cluster meta-data 106 */ 107 struct mbcluster { 108 int32_t mcl_refs; 109 void *mcl_data; 110 }; 111 112 /* 113 * mbuf tracking for debugging purposes 114 */ 115 #ifdef MBUF_DEBUG 116 117 static MALLOC_DEFINE(M_MTRACK, "mtrack", "mtrack"); 118 119 struct mbctrack; 120 RB_HEAD(mbuf_rb_tree, mbtrack); 121 RB_PROTOTYPE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *); 122 123 struct mbtrack { 124 RB_ENTRY(mbtrack) rb_node; 125 int trackid; 126 struct mbuf *m; 127 }; 128 129 static int 130 mbtrack_cmp(struct mbtrack *mb1, struct mbtrack *mb2) 131 { 132 if (mb1->m < mb2->m) 133 return(-1); 134 if (mb1->m > mb2->m) 135 return(1); 136 return(0); 137 } 138 139 RB_GENERATE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *, m); 140 141 struct mbuf_rb_tree mbuf_track_root; 142 static struct spinlock mbuf_track_spin = SPINLOCK_INITIALIZER(mbuf_track_spin); 143 144 static void 145 mbuftrack(struct mbuf *m) 146 { 147 struct mbtrack *mbt; 148 149 mbt = kmalloc(sizeof(*mbt), M_MTRACK, M_INTWAIT|M_ZERO); 150 spin_lock(&mbuf_track_spin); 151 mbt->m = m; 152 if (mbuf_rb_tree_RB_INSERT(&mbuf_track_root, mbt)) { 153 spin_unlock(&mbuf_track_spin); 154 panic("mbuftrack: mbuf %p already being tracked\n", m); 155 } 156 spin_unlock(&mbuf_track_spin); 157 } 158 159 static void 160 mbufuntrack(struct mbuf *m) 161 { 162 struct mbtrack *mbt; 163 164 spin_lock(&mbuf_track_spin); 165 mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m); 166 if (mbt == NULL) { 167 spin_unlock(&mbuf_track_spin); 168 panic("mbufuntrack: mbuf %p was not tracked\n", m); 169 } else { 170 mbuf_rb_tree_RB_REMOVE(&mbuf_track_root, mbt); 171 spin_unlock(&mbuf_track_spin); 172 kfree(mbt, M_MTRACK); 173 } 174 } 175 176 void 177 mbuftrackid(struct mbuf *m, int trackid) 178 { 179 struct mbtrack *mbt; 180 struct mbuf *n; 181 182 spin_lock(&mbuf_track_spin); 183 while (m) { 184 n = m->m_nextpkt; 185 while (m) { 186 mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m); 187 if (mbt == NULL) { 188 spin_unlock(&mbuf_track_spin); 189 panic("mbuftrackid: mbuf %p not tracked", m); 190 } 191 mbt->trackid = trackid; 192 m = m->m_next; 193 } 194 m = n; 195 } 196 spin_unlock(&mbuf_track_spin); 197 } 198 199 static int 200 mbuftrack_callback(struct mbtrack *mbt, void *arg) 201 { 202 struct sysctl_req *req = arg; 203 char buf[64]; 204 int error; 205 206 ksnprintf(buf, sizeof(buf), "mbuf %p track %d\n", mbt->m, mbt->trackid); 207 208 spin_unlock(&mbuf_track_spin); 209 error = SYSCTL_OUT(req, buf, strlen(buf)); 210 spin_lock(&mbuf_track_spin); 211 if (error) 212 return(-error); 213 return(0); 214 } 215 216 static int 217 mbuftrack_show(SYSCTL_HANDLER_ARGS) 218 { 219 int error; 220 221 spin_lock(&mbuf_track_spin); 222 error = mbuf_rb_tree_RB_SCAN(&mbuf_track_root, NULL, 223 mbuftrack_callback, req); 224 spin_unlock(&mbuf_track_spin); 225 return (-error); 226 } 227 SYSCTL_PROC(_kern_ipc, OID_AUTO, showmbufs, CTLFLAG_RD|CTLTYPE_STRING, 228 0, 0, mbuftrack_show, "A", "Show all in-use mbufs"); 229 230 #else 231 232 #define mbuftrack(m) 233 #define mbufuntrack(m) 234 235 #endif 236 237 static void mbinit(void *); 238 SYSINIT(mbuf, SI_BOOT2_MACHDEP, SI_ORDER_FIRST, mbinit, NULL) 239 240 static u_long mbtypes[SMP_MAXCPU][MT_NTYPES]; 241 242 static struct mbstat mbstat[SMP_MAXCPU]; 243 int max_linkhdr; 244 int max_protohdr; 245 int max_hdr; 246 int max_datalen; 247 int m_defragpackets; 248 int m_defragbytes; 249 int m_defraguseless; 250 int m_defragfailure; 251 #ifdef MBUF_STRESS_TEST 252 int m_defragrandomfailures; 253 #endif 254 255 struct objcache *mbuf_cache, *mbufphdr_cache; 256 struct objcache *mclmeta_cache, *mjclmeta_cache; 257 struct objcache *mbufcluster_cache, *mbufphdrcluster_cache; 258 struct objcache *mbufjcluster_cache, *mbufphdrjcluster_cache; 259 260 int nmbclusters; 261 int nmbufs; 262 263 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW, 264 &max_linkhdr, 0, "Max size of a link-level header"); 265 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW, 266 &max_protohdr, 0, "Max size of a protocol header"); 267 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, 268 "Max size of link+protocol headers"); 269 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW, 270 &max_datalen, 0, "Max data payload size without headers"); 271 SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW, 272 &mbuf_wait, 0, "Time in ticks to sleep after failed mbuf allocations"); 273 static int do_mbstat(SYSCTL_HANDLER_ARGS); 274 275 SYSCTL_PROC(_kern_ipc, KIPC_MBSTAT, mbstat, CTLTYPE_STRUCT|CTLFLAG_RD, 276 0, 0, do_mbstat, "S,mbstat", "mbuf usage statistics"); 277 278 static int do_mbtypes(SYSCTL_HANDLER_ARGS); 279 280 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbtypes, CTLTYPE_ULONG|CTLFLAG_RD, 281 0, 0, do_mbtypes, "LU", ""); 282 283 static int 284 do_mbstat(SYSCTL_HANDLER_ARGS) 285 { 286 struct mbstat mbstat_total; 287 struct mbstat *mbstat_totalp; 288 int i; 289 290 bzero(&mbstat_total, sizeof(mbstat_total)); 291 mbstat_totalp = &mbstat_total; 292 293 for (i = 0; i < ncpus; i++) 294 { 295 mbstat_total.m_mbufs += mbstat[i].m_mbufs; 296 mbstat_total.m_clusters += mbstat[i].m_clusters; 297 mbstat_total.m_spare += mbstat[i].m_spare; 298 mbstat_total.m_clfree += mbstat[i].m_clfree; 299 mbstat_total.m_drops += mbstat[i].m_drops; 300 mbstat_total.m_wait += mbstat[i].m_wait; 301 mbstat_total.m_drain += mbstat[i].m_drain; 302 mbstat_total.m_mcfail += mbstat[i].m_mcfail; 303 mbstat_total.m_mpfail += mbstat[i].m_mpfail; 304 305 } 306 /* 307 * The following fields are not cumulative fields so just 308 * get their values once. 309 */ 310 mbstat_total.m_msize = mbstat[0].m_msize; 311 mbstat_total.m_mclbytes = mbstat[0].m_mclbytes; 312 mbstat_total.m_minclsize = mbstat[0].m_minclsize; 313 mbstat_total.m_mlen = mbstat[0].m_mlen; 314 mbstat_total.m_mhlen = mbstat[0].m_mhlen; 315 316 return(sysctl_handle_opaque(oidp, mbstat_totalp, sizeof(mbstat_total), req)); 317 } 318 319 static int 320 do_mbtypes(SYSCTL_HANDLER_ARGS) 321 { 322 u_long totals[MT_NTYPES]; 323 int i, j; 324 325 for (i = 0; i < MT_NTYPES; i++) 326 totals[i] = 0; 327 328 for (i = 0; i < ncpus; i++) 329 { 330 for (j = 0; j < MT_NTYPES; j++) 331 totals[j] += mbtypes[i][j]; 332 } 333 334 return(sysctl_handle_opaque(oidp, totals, sizeof(totals), req)); 335 } 336 337 /* 338 * These are read-only because we do not currently have any code 339 * to adjust the objcache limits after the fact. The variables 340 * may only be set as boot-time tunables. 341 */ 342 SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD, 343 &nmbclusters, 0, "Maximum number of mbuf clusters available"); 344 SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0, 345 "Maximum number of mbufs available"); 346 347 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD, 348 &m_defragpackets, 0, "Number of defragment packets"); 349 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD, 350 &m_defragbytes, 0, "Number of defragment bytes"); 351 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD, 352 &m_defraguseless, 0, "Number of useless defragment mbuf chain operations"); 353 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD, 354 &m_defragfailure, 0, "Number of failed defragment mbuf chain operations"); 355 #ifdef MBUF_STRESS_TEST 356 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW, 357 &m_defragrandomfailures, 0, ""); 358 #endif 359 360 static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf"); 361 static MALLOC_DEFINE(M_MBUFCL, "mbufcl", "mbufcl"); 362 static MALLOC_DEFINE(M_MJBUFCL, "mbufcl", "mbufcl"); 363 static MALLOC_DEFINE(M_MCLMETA, "mclmeta", "mclmeta"); 364 static MALLOC_DEFINE(M_MJCLMETA, "mjclmeta", "mjclmeta"); 365 366 static void m_reclaim (void); 367 static void m_mclref(void *arg); 368 static void m_mclfree(void *arg); 369 370 /* 371 * NOTE: Default NMBUFS must take into account a possible DOS attack 372 * using fd passing on unix domain sockets. 373 */ 374 #ifndef NMBCLUSTERS 375 #define NMBCLUSTERS (512 + maxusers * 16) 376 #endif 377 #ifndef NMBUFS 378 #define NMBUFS (nmbclusters * 2 + maxfiles) 379 #endif 380 381 /* 382 * Perform sanity checks of tunables declared above. 383 */ 384 static void 385 tunable_mbinit(void *dummy) 386 { 387 /* 388 * This has to be done before VM init. 389 */ 390 nmbclusters = NMBCLUSTERS; 391 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters); 392 nmbufs = NMBUFS; 393 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs); 394 /* Sanity checks */ 395 if (nmbufs < nmbclusters * 2) 396 nmbufs = nmbclusters * 2; 397 } 398 SYSINIT(tunable_mbinit, SI_BOOT1_TUNABLES, SI_ORDER_ANY, 399 tunable_mbinit, NULL); 400 401 /* "number of clusters of pages" */ 402 #define NCL_INIT 1 403 404 #define NMB_INIT 16 405 406 /* 407 * The mbuf object cache only guarantees that m_next and m_nextpkt are 408 * NULL and that m_data points to the beginning of the data area. In 409 * particular, m_len and m_pkthdr.len are uninitialized. It is the 410 * responsibility of the caller to initialize those fields before use. 411 */ 412 413 static boolean_t __inline 414 mbuf_ctor(void *obj, void *private, int ocflags) 415 { 416 struct mbuf *m = obj; 417 418 m->m_next = NULL; 419 m->m_nextpkt = NULL; 420 m->m_data = m->m_dat; 421 m->m_flags = 0; 422 423 return (TRUE); 424 } 425 426 /* 427 * Initialize the mbuf and the packet header fields. 428 */ 429 static boolean_t 430 mbufphdr_ctor(void *obj, void *private, int ocflags) 431 { 432 struct mbuf *m = obj; 433 434 m->m_next = NULL; 435 m->m_nextpkt = NULL; 436 m->m_data = m->m_pktdat; 437 m->m_flags = M_PKTHDR | M_PHCACHE; 438 439 m->m_pkthdr.rcvif = NULL; /* eliminate XXX JH */ 440 SLIST_INIT(&m->m_pkthdr.tags); 441 m->m_pkthdr.csum_flags = 0; /* eliminate XXX JH */ 442 m->m_pkthdr.fw_flags = 0; /* eliminate XXX JH */ 443 444 return (TRUE); 445 } 446 447 /* 448 * A mbcluster object consists of 2K (MCLBYTES) cluster and a refcount. 449 */ 450 static boolean_t 451 mclmeta_ctor(void *obj, void *private, int ocflags) 452 { 453 struct mbcluster *cl = obj; 454 void *buf; 455 456 if (ocflags & M_NOWAIT) 457 buf = kmalloc(MCLBYTES, M_MBUFCL, M_NOWAIT | M_ZERO); 458 else 459 buf = kmalloc(MCLBYTES, M_MBUFCL, M_INTWAIT | M_ZERO); 460 if (buf == NULL) 461 return (FALSE); 462 cl->mcl_refs = 0; 463 cl->mcl_data = buf; 464 return (TRUE); 465 } 466 467 static boolean_t 468 mjclmeta_ctor(void *obj, void *private, int ocflags) 469 { 470 struct mbcluster *cl = obj; 471 void *buf; 472 473 if (ocflags & M_NOWAIT) 474 buf = kmalloc(MJUMPAGESIZE, M_MBUFCL, M_NOWAIT | M_ZERO); 475 else 476 buf = kmalloc(MJUMPAGESIZE, M_MBUFCL, M_INTWAIT | M_ZERO); 477 if (buf == NULL) 478 return (FALSE); 479 cl->mcl_refs = 0; 480 cl->mcl_data = buf; 481 return (TRUE); 482 } 483 484 static void 485 mclmeta_dtor(void *obj, void *private) 486 { 487 struct mbcluster *mcl = obj; 488 489 KKASSERT(mcl->mcl_refs == 0); 490 kfree(mcl->mcl_data, M_MBUFCL); 491 } 492 493 static void 494 linkjcluster(struct mbuf *m, struct mbcluster *cl, uint size) 495 { 496 /* 497 * Add the cluster to the mbuf. The caller will detect that the 498 * mbuf now has an attached cluster. 499 */ 500 m->m_ext.ext_arg = cl; 501 m->m_ext.ext_buf = cl->mcl_data; 502 m->m_ext.ext_ref = m_mclref; 503 m->m_ext.ext_free = m_mclfree; 504 m->m_ext.ext_size = size; 505 atomic_add_int(&cl->mcl_refs, 1); 506 507 m->m_data = m->m_ext.ext_buf; 508 m->m_flags |= M_EXT | M_EXT_CLUSTER; 509 } 510 511 static void 512 linkcluster(struct mbuf *m, struct mbcluster *cl) 513 { 514 linkjcluster(m, cl, MCLBYTES); 515 } 516 517 static boolean_t 518 mbufphdrcluster_ctor(void *obj, void *private, int ocflags) 519 { 520 struct mbuf *m = obj; 521 struct mbcluster *cl; 522 523 mbufphdr_ctor(obj, private, ocflags); 524 cl = objcache_get(mclmeta_cache, ocflags); 525 if (cl == NULL) { 526 ++mbstat[mycpu->gd_cpuid].m_drops; 527 return (FALSE); 528 } 529 m->m_flags |= M_CLCACHE; 530 linkcluster(m, cl); 531 return (TRUE); 532 } 533 534 static boolean_t 535 mbufphdrjcluster_ctor(void *obj, void *private, int ocflags) 536 { 537 struct mbuf *m = obj; 538 struct mbcluster *cl; 539 540 mbufphdr_ctor(obj, private, ocflags); 541 cl = objcache_get(mjclmeta_cache, ocflags); 542 if (cl == NULL) { 543 ++mbstat[mycpu->gd_cpuid].m_drops; 544 return (FALSE); 545 } 546 m->m_flags |= M_CLCACHE; 547 linkjcluster(m, cl, MJUMPAGESIZE); 548 return (TRUE); 549 } 550 551 static boolean_t 552 mbufcluster_ctor(void *obj, void *private, int ocflags) 553 { 554 struct mbuf *m = obj; 555 struct mbcluster *cl; 556 557 mbuf_ctor(obj, private, ocflags); 558 cl = objcache_get(mclmeta_cache, ocflags); 559 if (cl == NULL) { 560 ++mbstat[mycpu->gd_cpuid].m_drops; 561 return (FALSE); 562 } 563 m->m_flags |= M_CLCACHE; 564 linkcluster(m, cl); 565 return (TRUE); 566 } 567 568 static boolean_t 569 mbufjcluster_ctor(void *obj, void *private, int ocflags) 570 { 571 struct mbuf *m = obj; 572 struct mbcluster *cl; 573 574 mbuf_ctor(obj, private, ocflags); 575 cl = objcache_get(mjclmeta_cache, ocflags); 576 if (cl == NULL) { 577 ++mbstat[mycpu->gd_cpuid].m_drops; 578 return (FALSE); 579 } 580 m->m_flags |= M_CLCACHE; 581 linkjcluster(m, cl, MJUMPAGESIZE); 582 return (TRUE); 583 } 584 585 /* 586 * Used for both the cluster and cluster PHDR caches. 587 * 588 * The mbuf may have lost its cluster due to sharing, deal 589 * with the situation by checking M_EXT. 590 */ 591 static void 592 mbufcluster_dtor(void *obj, void *private) 593 { 594 struct mbuf *m = obj; 595 struct mbcluster *mcl; 596 597 if (m->m_flags & M_EXT) { 598 KKASSERT((m->m_flags & M_EXT_CLUSTER) != 0); 599 mcl = m->m_ext.ext_arg; 600 KKASSERT(mcl->mcl_refs == 1); 601 mcl->mcl_refs = 0; 602 if (m->m_flags & M_EXT && m->m_ext.ext_size != MCLBYTES) 603 objcache_put(mjclmeta_cache, mcl); 604 else 605 objcache_put(mclmeta_cache, mcl); 606 } 607 } 608 609 struct objcache_malloc_args mbuf_malloc_args = { MSIZE, M_MBUF }; 610 struct objcache_malloc_args mclmeta_malloc_args = 611 { sizeof(struct mbcluster), M_MCLMETA }; 612 613 /* ARGSUSED*/ 614 static void 615 mbinit(void *dummy) 616 { 617 int mb_limit, cl_limit; 618 int limit; 619 int i; 620 621 /* 622 * Initialize statistics 623 */ 624 for (i = 0; i < ncpus; i++) { 625 atomic_set_long_nonlocked(&mbstat[i].m_msize, MSIZE); 626 atomic_set_long_nonlocked(&mbstat[i].m_mclbytes, MCLBYTES); 627 atomic_set_long_nonlocked(&mbstat[i].m_mjumpagesize, MJUMPAGESIZE); 628 atomic_set_long_nonlocked(&mbstat[i].m_minclsize, MINCLSIZE); 629 atomic_set_long_nonlocked(&mbstat[i].m_mlen, MLEN); 630 atomic_set_long_nonlocked(&mbstat[i].m_mhlen, MHLEN); 631 } 632 633 /* 634 * Create objtect caches and save cluster limits, which will 635 * be used to adjust backing kmalloc pools' limit later. 636 */ 637 638 mb_limit = cl_limit = 0; 639 640 limit = nmbufs; 641 mbuf_cache = objcache_create("mbuf", &limit, 0, 642 mbuf_ctor, NULL, NULL, 643 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); 644 mb_limit += limit; 645 646 limit = nmbufs; 647 mbufphdr_cache = objcache_create("mbuf pkt hdr", &limit, 128, 648 mbufphdr_ctor, NULL, NULL, 649 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); 650 mb_limit += limit; 651 652 cl_limit = nmbclusters; 653 mclmeta_cache = objcache_create("cluster mbuf", &cl_limit, 0, 654 mclmeta_ctor, mclmeta_dtor, NULL, 655 objcache_malloc_alloc, objcache_malloc_free, &mclmeta_malloc_args); 656 657 cl_limit = nmbclusters; 658 mjclmeta_cache = objcache_create("jcluster mbuf", &cl_limit, 0, 659 mjclmeta_ctor, mclmeta_dtor, NULL, 660 objcache_malloc_alloc, objcache_malloc_free, &mclmeta_malloc_args); 661 662 limit = nmbclusters; 663 mbufcluster_cache = objcache_create("mbuf + cluster", &limit, 0, 664 mbufcluster_ctor, mbufcluster_dtor, NULL, 665 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); 666 mb_limit += limit; 667 668 limit = nmbclusters; 669 mbufphdrcluster_cache = objcache_create("mbuf pkt hdr + cluster", 670 &limit, 128, mbufphdrcluster_ctor, mbufcluster_dtor, NULL, 671 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); 672 mb_limit += limit; 673 674 limit = nmbclusters; 675 mbufjcluster_cache = objcache_create("mbuf + jcluster", &limit, 0, 676 mbufjcluster_ctor, mbufcluster_dtor, NULL, 677 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); 678 mb_limit += limit; 679 680 limit = nmbclusters; 681 mbufphdrjcluster_cache = objcache_create("mbuf pkt hdr + jcluster", 682 &limit, 64, mbufphdrjcluster_ctor, mbufcluster_dtor, NULL, 683 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); 684 mb_limit += limit; 685 686 /* 687 * Adjust backing kmalloc pools' limit 688 * 689 * NOTE: We raise the limit by another 1/8 to take the effect 690 * of loosememuse into account. 691 */ 692 cl_limit += cl_limit / 8; 693 kmalloc_raise_limit(mclmeta_malloc_args.mtype, 694 mclmeta_malloc_args.objsize * cl_limit); 695 kmalloc_raise_limit(M_MBUFCL, MCLBYTES * cl_limit * 3/4 + MJUMPAGESIZE * cl_limit / 4); 696 /*kmalloc_raise_limit(M_MBUFCL, MCLBYTES * cl_limit);*/ 697 698 mb_limit += mb_limit / 8; 699 kmalloc_raise_limit(mbuf_malloc_args.mtype, 700 mbuf_malloc_args.objsize * mb_limit); 701 } 702 703 /* 704 * Return the number of references to this mbuf's data. 0 is returned 705 * if the mbuf is not M_EXT, a reference count is returned if it is 706 * M_EXT | M_EXT_CLUSTER, and 99 is returned if it is a special M_EXT. 707 */ 708 int 709 m_sharecount(struct mbuf *m) 710 { 711 switch (m->m_flags & (M_EXT | M_EXT_CLUSTER)) { 712 case 0: 713 return (0); 714 case M_EXT: 715 return (99); 716 case M_EXT | M_EXT_CLUSTER: 717 return (((struct mbcluster *)m->m_ext.ext_arg)->mcl_refs); 718 } 719 /* NOTREACHED */ 720 return (0); /* to shut up compiler */ 721 } 722 723 /* 724 * change mbuf to new type 725 */ 726 void 727 m_chtype(struct mbuf *m, int type) 728 { 729 struct globaldata *gd = mycpu; 730 731 atomic_add_long_nonlocked(&mbtypes[gd->gd_cpuid][type], 1); 732 atomic_subtract_long_nonlocked(&mbtypes[gd->gd_cpuid][m->m_type], 1); 733 atomic_set_short_nonlocked(&m->m_type, type); 734 } 735 736 static void 737 m_reclaim(void) 738 { 739 struct domain *dp; 740 struct protosw *pr; 741 742 kprintf("Debug: m_reclaim() called\n"); 743 744 SLIST_FOREACH(dp, &domains, dom_next) { 745 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) { 746 if (pr->pr_drain) 747 (*pr->pr_drain)(); 748 } 749 } 750 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_drain, 1); 751 } 752 753 static void __inline 754 updatestats(struct mbuf *m, int type) 755 { 756 struct globaldata *gd = mycpu; 757 758 m->m_type = type; 759 mbuftrack(m); 760 #ifdef MBUF_DEBUG 761 KASSERT(m->m_next == NULL, ("mbuf %p: bad m_next in get", m)); 762 KASSERT(m->m_nextpkt == NULL, ("mbuf %p: bad m_nextpkt in get", m)); 763 #endif 764 765 atomic_add_long_nonlocked(&mbtypes[gd->gd_cpuid][type], 1); 766 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mbufs, 1); 767 768 } 769 770 /* 771 * Allocate an mbuf. 772 */ 773 struct mbuf * 774 m_get(int how, int type) 775 { 776 struct mbuf *m; 777 int ntries = 0; 778 int ocf = MBTOM(how); 779 780 retryonce: 781 782 m = objcache_get(mbuf_cache, ocf); 783 784 if (m == NULL) { 785 if ((how & MB_TRYWAIT) && ntries++ == 0) { 786 struct objcache *reclaimlist[] = { 787 mbufphdr_cache, 788 mbufcluster_cache, 789 mbufphdrcluster_cache, 790 mbufjcluster_cache, 791 mbufphdrjcluster_cache 792 }; 793 const int nreclaims = NELEM(reclaimlist); 794 795 if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf)) 796 m_reclaim(); 797 goto retryonce; 798 } 799 ++mbstat[mycpu->gd_cpuid].m_drops; 800 return (NULL); 801 } 802 #ifdef MBUF_DEBUG 803 KASSERT(m->m_data == m->m_dat, ("mbuf %p: bad m_data in get", m)); 804 #endif 805 m->m_len = 0; 806 807 updatestats(m, type); 808 return (m); 809 } 810 811 struct mbuf * 812 m_gethdr(int how, int type) 813 { 814 struct mbuf *m; 815 int ocf = MBTOM(how); 816 int ntries = 0; 817 818 retryonce: 819 820 m = objcache_get(mbufphdr_cache, ocf); 821 822 if (m == NULL) { 823 if ((how & MB_TRYWAIT) && ntries++ == 0) { 824 struct objcache *reclaimlist[] = { 825 mbuf_cache, 826 mbufcluster_cache, mbufphdrcluster_cache, 827 mbufjcluster_cache, mbufphdrjcluster_cache 828 }; 829 const int nreclaims = NELEM(reclaimlist); 830 831 if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf)) 832 m_reclaim(); 833 goto retryonce; 834 } 835 ++mbstat[mycpu->gd_cpuid].m_drops; 836 return (NULL); 837 } 838 #ifdef MBUF_DEBUG 839 KASSERT(m->m_data == m->m_pktdat, ("mbuf %p: bad m_data in get", m)); 840 #endif 841 m->m_len = 0; 842 m->m_pkthdr.len = 0; 843 844 updatestats(m, type); 845 return (m); 846 } 847 848 /* 849 * Get a mbuf (not a mbuf cluster!) and zero it. 850 * Deprecated. 851 */ 852 struct mbuf * 853 m_getclr(int how, int type) 854 { 855 struct mbuf *m; 856 857 m = m_get(how, type); 858 if (m != NULL) 859 bzero(m->m_data, MLEN); 860 return (m); 861 } 862 863 struct mbuf * 864 m_getjcl(int how, short type, int flags, size_t size) 865 { 866 struct mbuf *m = NULL; 867 struct objcache *mbclc, *mbphclc; 868 int ocflags = MBTOM(how); 869 int ntries = 0; 870 871 switch (size) { 872 case MCLBYTES: 873 mbclc = mbufcluster_cache; 874 mbphclc = mbufphdrcluster_cache; 875 break; 876 default: 877 mbclc = mbufjcluster_cache; 878 mbphclc = mbufphdrjcluster_cache; 879 break; 880 } 881 882 retryonce: 883 884 if (flags & M_PKTHDR) 885 m = objcache_get(mbphclc, ocflags); 886 else 887 m = objcache_get(mbclc, ocflags); 888 889 if (m == NULL) { 890 if ((how & MB_TRYWAIT) && ntries++ == 0) { 891 struct objcache *reclaimlist[1]; 892 893 if (flags & M_PKTHDR) 894 reclaimlist[0] = mbclc; 895 else 896 reclaimlist[0] = mbphclc; 897 if (!objcache_reclaimlist(reclaimlist, 1, ocflags)) 898 m_reclaim(); 899 goto retryonce; 900 } 901 ++mbstat[mycpu->gd_cpuid].m_drops; 902 return (NULL); 903 } 904 905 #ifdef MBUF_DEBUG 906 KASSERT(m->m_data == m->m_ext.ext_buf, 907 ("mbuf %p: bad m_data in get", m)); 908 #endif 909 m->m_type = type; 910 m->m_len = 0; 911 m->m_pkthdr.len = 0; /* just do it unconditonally */ 912 913 mbuftrack(m); 914 915 atomic_add_long_nonlocked(&mbtypes[mycpu->gd_cpuid][type], 1); 916 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1); 917 return (m); 918 } 919 920 /* 921 * Returns an mbuf with an attached cluster. 922 * Because many network drivers use this kind of buffers a lot, it is 923 * convenient to keep a small pool of free buffers of this kind. 924 * Even a small size such as 10 gives about 10% improvement in the 925 * forwarding rate in a bridge or router. 926 */ 927 struct mbuf * 928 m_getcl(int how, short type, int flags) 929 { 930 return (m_getjcl(how, type, flags, MCLBYTES)); 931 } 932 933 /* 934 * Allocate chain of requested length. 935 */ 936 struct mbuf * 937 m_getc(int len, int how, int type) 938 { 939 struct mbuf *n, *nfirst = NULL, **ntail = &nfirst; 940 int nsize; 941 942 while (len > 0) { 943 n = m_getl(len, how, type, 0, &nsize); 944 if (n == NULL) 945 goto failed; 946 n->m_len = 0; 947 *ntail = n; 948 ntail = &n->m_next; 949 len -= nsize; 950 } 951 return (nfirst); 952 953 failed: 954 m_freem(nfirst); 955 return (NULL); 956 } 957 958 /* 959 * Allocate len-worth of mbufs and/or mbuf clusters (whatever fits best) 960 * and return a pointer to the head of the allocated chain. If m0 is 961 * non-null, then we assume that it is a single mbuf or an mbuf chain to 962 * which we want len bytes worth of mbufs and/or clusters attached, and so 963 * if we succeed in allocating it, we will just return a pointer to m0. 964 * 965 * If we happen to fail at any point during the allocation, we will free 966 * up everything we have already allocated and return NULL. 967 * 968 * Deprecated. Use m_getc() and m_cat() instead. 969 */ 970 struct mbuf * 971 m_getm(struct mbuf *m0, int len, int type, int how) 972 { 973 struct mbuf *nfirst; 974 975 nfirst = m_getc(len, how, type); 976 977 if (m0 != NULL) { 978 m_last(m0)->m_next = nfirst; 979 return (m0); 980 } 981 982 return (nfirst); 983 } 984 985 /* 986 * Adds a cluster to a normal mbuf, M_EXT is set on success. 987 * Deprecated. Use m_getcl() instead. 988 */ 989 void 990 m_mclget(struct mbuf *m, int how) 991 { 992 struct mbcluster *mcl; 993 994 KKASSERT((m->m_flags & M_EXT) == 0); 995 mcl = objcache_get(mclmeta_cache, MBTOM(how)); 996 if (mcl != NULL) { 997 linkcluster(m, mcl); 998 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 999 1); 1000 } else { 1001 ++mbstat[mycpu->gd_cpuid].m_drops; 1002 } 1003 } 1004 1005 /* 1006 * Updates to mbcluster must be MPSAFE. Only an entity which already has 1007 * a reference to the cluster can ref it, so we are in no danger of 1008 * racing an add with a subtract. But the operation must still be atomic 1009 * since multiple entities may have a reference on the cluster. 1010 * 1011 * m_mclfree() is almost the same but it must contend with two entities 1012 * freeing the cluster at the same time. 1013 */ 1014 static void 1015 m_mclref(void *arg) 1016 { 1017 struct mbcluster *mcl = arg; 1018 1019 atomic_add_int(&mcl->mcl_refs, 1); 1020 } 1021 1022 /* 1023 * When dereferencing a cluster we have to deal with a N->0 race, where 1024 * N entities free their references simultaniously. To do this we use 1025 * atomic_fetchadd_int(). 1026 */ 1027 static void 1028 m_mclfree(void *arg) 1029 { 1030 struct mbcluster *mcl = arg; 1031 1032 if (atomic_fetchadd_int(&mcl->mcl_refs, -1) == 1) 1033 objcache_put(mclmeta_cache, mcl); 1034 } 1035 1036 /* 1037 * Free a single mbuf and any associated external storage. The successor, 1038 * if any, is returned. 1039 * 1040 * We do need to check non-first mbuf for m_aux, since some of existing 1041 * code does not call M_PREPEND properly. 1042 * (example: call to bpf_mtap from drivers) 1043 */ 1044 1045 #ifdef MBUF_DEBUG 1046 1047 struct mbuf * 1048 _m_free(struct mbuf *m, const char *func) 1049 1050 #else 1051 1052 struct mbuf * 1053 m_free(struct mbuf *m) 1054 1055 #endif 1056 { 1057 struct mbuf *n; 1058 struct globaldata *gd = mycpu; 1059 1060 KASSERT(m->m_type != MT_FREE, ("freeing free mbuf %p", m)); 1061 KASSERT(M_TRAILINGSPACE(m) >= 0, ("overflowed mbuf %p", m)); 1062 atomic_subtract_long_nonlocked(&mbtypes[gd->gd_cpuid][m->m_type], 1); 1063 1064 n = m->m_next; 1065 1066 /* 1067 * Make sure the mbuf is in constructed state before returning it 1068 * to the objcache. 1069 */ 1070 m->m_next = NULL; 1071 mbufuntrack(m); 1072 #ifdef MBUF_DEBUG 1073 m->m_hdr.mh_lastfunc = func; 1074 #endif 1075 #ifdef notyet 1076 KKASSERT(m->m_nextpkt == NULL); 1077 #else 1078 if (m->m_nextpkt != NULL) { 1079 static int afewtimes = 10; 1080 1081 if (afewtimes-- > 0) { 1082 kprintf("mfree: m->m_nextpkt != NULL\n"); 1083 print_backtrace(-1); 1084 } 1085 m->m_nextpkt = NULL; 1086 } 1087 #endif 1088 if (m->m_flags & M_PKTHDR) { 1089 m_tag_delete_chain(m); /* eliminate XXX JH */ 1090 } 1091 1092 m->m_flags &= (M_EXT | M_EXT_CLUSTER | M_CLCACHE | M_PHCACHE); 1093 1094 /* 1095 * Clean the M_PKTHDR state so we can return the mbuf to its original 1096 * cache. This is based on the PHCACHE flag which tells us whether 1097 * the mbuf was originally allocated out of a packet-header cache 1098 * or a non-packet-header cache. 1099 */ 1100 if (m->m_flags & M_PHCACHE) { 1101 m->m_flags |= M_PKTHDR; 1102 m->m_pkthdr.rcvif = NULL; /* eliminate XXX JH */ 1103 m->m_pkthdr.csum_flags = 0; /* eliminate XXX JH */ 1104 m->m_pkthdr.fw_flags = 0; /* eliminate XXX JH */ 1105 SLIST_INIT(&m->m_pkthdr.tags); 1106 } 1107 1108 /* 1109 * Handle remaining flags combinations. M_CLCACHE tells us whether 1110 * the mbuf was originally allocated from a cluster cache or not, 1111 * and is totally separate from whether the mbuf is currently 1112 * associated with a cluster. 1113 */ 1114 switch(m->m_flags & (M_CLCACHE | M_EXT | M_EXT_CLUSTER)) { 1115 case M_CLCACHE | M_EXT | M_EXT_CLUSTER: 1116 /* 1117 * mbuf+cluster cache case. The mbuf was allocated from the 1118 * combined mbuf_cluster cache and can be returned to the 1119 * cache if the cluster hasn't been shared. 1120 */ 1121 if (m_sharecount(m) == 1) { 1122 /* 1123 * The cluster has not been shared, we can just 1124 * reset the data pointer and return the mbuf 1125 * to the cluster cache. Note that the reference 1126 * count is left intact (it is still associated with 1127 * an mbuf). 1128 */ 1129 m->m_data = m->m_ext.ext_buf; 1130 if (m->m_flags & M_EXT && m->m_ext.ext_size != MCLBYTES) { 1131 if (m->m_flags & M_PHCACHE) 1132 objcache_put(mbufphdrjcluster_cache, m); 1133 else 1134 objcache_put(mbufjcluster_cache, m); 1135 } else { 1136 if (m->m_flags & M_PHCACHE) 1137 objcache_put(mbufphdrcluster_cache, m); 1138 else 1139 objcache_put(mbufcluster_cache, m); 1140 } 1141 atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1); 1142 } else { 1143 /* 1144 * Hell. Someone else has a ref on this cluster, 1145 * we have to disconnect it which means we can't 1146 * put it back into the mbufcluster_cache, we 1147 * have to destroy the mbuf. 1148 * 1149 * Other mbuf references to the cluster will typically 1150 * be M_EXT | M_EXT_CLUSTER but without M_CLCACHE. 1151 * 1152 * XXX we could try to connect another cluster to 1153 * it. 1154 */ 1155 1156 m->m_ext.ext_free(m->m_ext.ext_arg); 1157 m->m_flags &= ~(M_EXT | M_EXT_CLUSTER); 1158 if (m->m_ext.ext_size == MCLBYTES) { 1159 if (m->m_flags & M_PHCACHE) 1160 objcache_dtor(mbufphdrcluster_cache, m); 1161 else 1162 objcache_dtor(mbufcluster_cache, m); 1163 } else { 1164 if (m->m_flags & M_PHCACHE) 1165 objcache_dtor(mbufphdrjcluster_cache, m); 1166 else 1167 objcache_dtor(mbufjcluster_cache, m); 1168 } 1169 } 1170 break; 1171 case M_EXT | M_EXT_CLUSTER: 1172 /* 1173 * Normal cluster associated with an mbuf that was allocated 1174 * from the normal mbuf pool rather then the cluster pool. 1175 * The cluster has to be independantly disassociated from the 1176 * mbuf. 1177 */ 1178 if (m_sharecount(m) == 1) 1179 atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1); 1180 /* fall through */ 1181 case M_EXT: 1182 /* 1183 * Normal cluster association case, disconnect the cluster from 1184 * the mbuf. The cluster may or may not be custom. 1185 */ 1186 m->m_ext.ext_free(m->m_ext.ext_arg); 1187 m->m_flags &= ~(M_EXT | M_EXT_CLUSTER); 1188 /* fall through */ 1189 case 0: 1190 /* 1191 * return the mbuf to the mbuf cache. 1192 */ 1193 if (m->m_flags & M_PHCACHE) { 1194 m->m_data = m->m_pktdat; 1195 objcache_put(mbufphdr_cache, m); 1196 } else { 1197 m->m_data = m->m_dat; 1198 objcache_put(mbuf_cache, m); 1199 } 1200 atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mbufs, 1); 1201 break; 1202 default: 1203 if (!panicstr) 1204 panic("bad mbuf flags %p %08x\n", m, m->m_flags); 1205 break; 1206 } 1207 return (n); 1208 } 1209 1210 #ifdef MBUF_DEBUG 1211 1212 void 1213 _m_freem(struct mbuf *m, const char *func) 1214 { 1215 while (m) 1216 m = _m_free(m, func); 1217 } 1218 1219 #else 1220 1221 void 1222 m_freem(struct mbuf *m) 1223 { 1224 while (m) 1225 m = m_free(m); 1226 } 1227 1228 #endif 1229 1230 /* 1231 * mbuf utility routines 1232 */ 1233 1234 /* 1235 * Lesser-used path for M_PREPEND: allocate new mbuf to prepend to chain and 1236 * copy junk along. 1237 */ 1238 struct mbuf * 1239 m_prepend(struct mbuf *m, int len, int how) 1240 { 1241 struct mbuf *mn; 1242 1243 if (m->m_flags & M_PKTHDR) 1244 mn = m_gethdr(how, m->m_type); 1245 else 1246 mn = m_get(how, m->m_type); 1247 if (mn == NULL) { 1248 m_freem(m); 1249 return (NULL); 1250 } 1251 if (m->m_flags & M_PKTHDR) 1252 M_MOVE_PKTHDR(mn, m); 1253 mn->m_next = m; 1254 m = mn; 1255 if (len < MHLEN) 1256 MH_ALIGN(m, len); 1257 m->m_len = len; 1258 return (m); 1259 } 1260 1261 /* 1262 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 1263 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 1264 * The wait parameter is a choice of MB_WAIT/MB_DONTWAIT from caller. 1265 * Note that the copy is read-only, because clusters are not copied, 1266 * only their reference counts are incremented. 1267 */ 1268 struct mbuf * 1269 m_copym(const struct mbuf *m, int off0, int len, int wait) 1270 { 1271 struct mbuf *n, **np; 1272 int off = off0; 1273 struct mbuf *top; 1274 int copyhdr = 0; 1275 1276 KASSERT(off >= 0, ("m_copym, negative off %d", off)); 1277 KASSERT(len >= 0, ("m_copym, negative len %d", len)); 1278 if (off == 0 && (m->m_flags & M_PKTHDR)) 1279 copyhdr = 1; 1280 while (off > 0) { 1281 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain")); 1282 if (off < m->m_len) 1283 break; 1284 off -= m->m_len; 1285 m = m->m_next; 1286 } 1287 np = ⊤ 1288 top = NULL; 1289 while (len > 0) { 1290 if (m == NULL) { 1291 KASSERT(len == M_COPYALL, 1292 ("m_copym, length > size of mbuf chain")); 1293 break; 1294 } 1295 /* 1296 * Because we are sharing any cluster attachment below, 1297 * be sure to get an mbuf that does not have a cluster 1298 * associated with it. 1299 */ 1300 if (copyhdr) 1301 n = m_gethdr(wait, m->m_type); 1302 else 1303 n = m_get(wait, m->m_type); 1304 *np = n; 1305 if (n == NULL) 1306 goto nospace; 1307 if (copyhdr) { 1308 if (!m_dup_pkthdr(n, m, wait)) 1309 goto nospace; 1310 if (len == M_COPYALL) 1311 n->m_pkthdr.len -= off0; 1312 else 1313 n->m_pkthdr.len = len; 1314 copyhdr = 0; 1315 } 1316 n->m_len = min(len, m->m_len - off); 1317 if (m->m_flags & M_EXT) { 1318 KKASSERT((n->m_flags & M_EXT) == 0); 1319 n->m_data = m->m_data + off; 1320 m->m_ext.ext_ref(m->m_ext.ext_arg); 1321 n->m_ext = m->m_ext; 1322 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER); 1323 } else { 1324 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 1325 (unsigned)n->m_len); 1326 } 1327 if (len != M_COPYALL) 1328 len -= n->m_len; 1329 off = 0; 1330 m = m->m_next; 1331 np = &n->m_next; 1332 } 1333 if (top == NULL) 1334 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1); 1335 return (top); 1336 nospace: 1337 m_freem(top); 1338 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1); 1339 return (NULL); 1340 } 1341 1342 /* 1343 * Copy an entire packet, including header (which must be present). 1344 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'. 1345 * Note that the copy is read-only, because clusters are not copied, 1346 * only their reference counts are incremented. 1347 * Preserve alignment of the first mbuf so if the creator has left 1348 * some room at the beginning (e.g. for inserting protocol headers) 1349 * the copies also have the room available. 1350 */ 1351 struct mbuf * 1352 m_copypacket(struct mbuf *m, int how) 1353 { 1354 struct mbuf *top, *n, *o; 1355 1356 n = m_gethdr(how, m->m_type); 1357 top = n; 1358 if (!n) 1359 goto nospace; 1360 1361 if (!m_dup_pkthdr(n, m, how)) 1362 goto nospace; 1363 n->m_len = m->m_len; 1364 if (m->m_flags & M_EXT) { 1365 KKASSERT((n->m_flags & M_EXT) == 0); 1366 n->m_data = m->m_data; 1367 m->m_ext.ext_ref(m->m_ext.ext_arg); 1368 n->m_ext = m->m_ext; 1369 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER); 1370 } else { 1371 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat ); 1372 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 1373 } 1374 1375 m = m->m_next; 1376 while (m) { 1377 o = m_get(how, m->m_type); 1378 if (!o) 1379 goto nospace; 1380 1381 n->m_next = o; 1382 n = n->m_next; 1383 1384 n->m_len = m->m_len; 1385 if (m->m_flags & M_EXT) { 1386 KKASSERT((n->m_flags & M_EXT) == 0); 1387 n->m_data = m->m_data; 1388 m->m_ext.ext_ref(m->m_ext.ext_arg); 1389 n->m_ext = m->m_ext; 1390 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER); 1391 } else { 1392 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 1393 } 1394 1395 m = m->m_next; 1396 } 1397 return top; 1398 nospace: 1399 m_freem(top); 1400 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1); 1401 return (NULL); 1402 } 1403 1404 /* 1405 * Copy data from an mbuf chain starting "off" bytes from the beginning, 1406 * continuing for "len" bytes, into the indicated buffer. 1407 */ 1408 void 1409 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp) 1410 { 1411 unsigned count; 1412 1413 KASSERT(off >= 0, ("m_copydata, negative off %d", off)); 1414 KASSERT(len >= 0, ("m_copydata, negative len %d", len)); 1415 while (off > 0) { 1416 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain")); 1417 if (off < m->m_len) 1418 break; 1419 off -= m->m_len; 1420 m = m->m_next; 1421 } 1422 while (len > 0) { 1423 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain")); 1424 count = min(m->m_len - off, len); 1425 bcopy(mtod(m, caddr_t) + off, cp, count); 1426 len -= count; 1427 cp += count; 1428 off = 0; 1429 m = m->m_next; 1430 } 1431 } 1432 1433 /* 1434 * Copy a packet header mbuf chain into a completely new chain, including 1435 * copying any mbuf clusters. Use this instead of m_copypacket() when 1436 * you need a writable copy of an mbuf chain. 1437 */ 1438 struct mbuf * 1439 m_dup(struct mbuf *m, int how) 1440 { 1441 struct mbuf **p, *top = NULL; 1442 int remain, moff, nsize; 1443 1444 /* Sanity check */ 1445 if (m == NULL) 1446 return (NULL); 1447 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __func__)); 1448 1449 /* While there's more data, get a new mbuf, tack it on, and fill it */ 1450 remain = m->m_pkthdr.len; 1451 moff = 0; 1452 p = ⊤ 1453 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */ 1454 struct mbuf *n; 1455 1456 /* Get the next new mbuf */ 1457 n = m_getl(remain, how, m->m_type, top == NULL ? M_PKTHDR : 0, 1458 &nsize); 1459 if (n == NULL) 1460 goto nospace; 1461 if (top == NULL) 1462 if (!m_dup_pkthdr(n, m, how)) 1463 goto nospace0; 1464 1465 /* Link it into the new chain */ 1466 *p = n; 1467 p = &n->m_next; 1468 1469 /* Copy data from original mbuf(s) into new mbuf */ 1470 n->m_len = 0; 1471 while (n->m_len < nsize && m != NULL) { 1472 int chunk = min(nsize - n->m_len, m->m_len - moff); 1473 1474 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk); 1475 moff += chunk; 1476 n->m_len += chunk; 1477 remain -= chunk; 1478 if (moff == m->m_len) { 1479 m = m->m_next; 1480 moff = 0; 1481 } 1482 } 1483 1484 /* Check correct total mbuf length */ 1485 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL), 1486 ("%s: bogus m_pkthdr.len", __func__)); 1487 } 1488 return (top); 1489 1490 nospace: 1491 m_freem(top); 1492 nospace0: 1493 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1); 1494 return (NULL); 1495 } 1496 1497 /* 1498 * Copy the non-packet mbuf data chain into a new set of mbufs, including 1499 * copying any mbuf clusters. This is typically used to realign a data 1500 * chain by nfs_realign(). 1501 * 1502 * The original chain is left intact. how should be MB_WAIT or MB_DONTWAIT 1503 * and NULL can be returned if MB_DONTWAIT is passed. 1504 * 1505 * Be careful to use cluster mbufs, a large mbuf chain converted to non 1506 * cluster mbufs can exhaust our supply of mbufs. 1507 */ 1508 struct mbuf * 1509 m_dup_data(struct mbuf *m, int how) 1510 { 1511 struct mbuf **p, *n, *top = NULL; 1512 int mlen, moff, chunk, gsize, nsize; 1513 1514 /* 1515 * Degenerate case 1516 */ 1517 if (m == NULL) 1518 return (NULL); 1519 1520 /* 1521 * Optimize the mbuf allocation but do not get too carried away. 1522 */ 1523 if (m->m_next || m->m_len > MLEN) 1524 if (m->m_flags & M_EXT && m->m_ext.ext_size == MCLBYTES) 1525 gsize = MCLBYTES; 1526 else 1527 gsize = MJUMPAGESIZE; 1528 else 1529 gsize = MLEN; 1530 1531 /* Chain control */ 1532 p = ⊤ 1533 n = NULL; 1534 nsize = 0; 1535 1536 /* 1537 * Scan the mbuf chain until nothing is left, the new mbuf chain 1538 * will be allocated on the fly as needed. 1539 */ 1540 while (m) { 1541 mlen = m->m_len; 1542 moff = 0; 1543 1544 while (mlen) { 1545 KKASSERT(m->m_type == MT_DATA); 1546 if (n == NULL) { 1547 n = m_getl(gsize, how, MT_DATA, 0, &nsize); 1548 n->m_len = 0; 1549 if (n == NULL) 1550 goto nospace; 1551 *p = n; 1552 p = &n->m_next; 1553 } 1554 chunk = imin(mlen, nsize); 1555 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk); 1556 mlen -= chunk; 1557 moff += chunk; 1558 n->m_len += chunk; 1559 nsize -= chunk; 1560 if (nsize == 0) 1561 n = NULL; 1562 } 1563 m = m->m_next; 1564 } 1565 *p = NULL; 1566 return(top); 1567 nospace: 1568 *p = NULL; 1569 m_freem(top); 1570 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1); 1571 return (NULL); 1572 } 1573 1574 /* 1575 * Concatenate mbuf chain n to m. 1576 * Both chains must be of the same type (e.g. MT_DATA). 1577 * Any m_pkthdr is not updated. 1578 */ 1579 void 1580 m_cat(struct mbuf *m, struct mbuf *n) 1581 { 1582 m = m_last(m); 1583 while (n) { 1584 if (m->m_flags & M_EXT || 1585 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 1586 /* just join the two chains */ 1587 m->m_next = n; 1588 return; 1589 } 1590 /* splat the data from one into the other */ 1591 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 1592 (u_int)n->m_len); 1593 m->m_len += n->m_len; 1594 n = m_free(n); 1595 } 1596 } 1597 1598 void 1599 m_adj(struct mbuf *mp, int req_len) 1600 { 1601 int len = req_len; 1602 struct mbuf *m; 1603 int count; 1604 1605 if ((m = mp) == NULL) 1606 return; 1607 if (len >= 0) { 1608 /* 1609 * Trim from head. 1610 */ 1611 while (m != NULL && len > 0) { 1612 if (m->m_len <= len) { 1613 len -= m->m_len; 1614 m->m_len = 0; 1615 m = m->m_next; 1616 } else { 1617 m->m_len -= len; 1618 m->m_data += len; 1619 len = 0; 1620 } 1621 } 1622 m = mp; 1623 if (mp->m_flags & M_PKTHDR) 1624 m->m_pkthdr.len -= (req_len - len); 1625 } else { 1626 /* 1627 * Trim from tail. Scan the mbuf chain, 1628 * calculating its length and finding the last mbuf. 1629 * If the adjustment only affects this mbuf, then just 1630 * adjust and return. Otherwise, rescan and truncate 1631 * after the remaining size. 1632 */ 1633 len = -len; 1634 count = 0; 1635 for (;;) { 1636 count += m->m_len; 1637 if (m->m_next == NULL) 1638 break; 1639 m = m->m_next; 1640 } 1641 if (m->m_len >= len) { 1642 m->m_len -= len; 1643 if (mp->m_flags & M_PKTHDR) 1644 mp->m_pkthdr.len -= len; 1645 return; 1646 } 1647 count -= len; 1648 if (count < 0) 1649 count = 0; 1650 /* 1651 * Correct length for chain is "count". 1652 * Find the mbuf with last data, adjust its length, 1653 * and toss data from remaining mbufs on chain. 1654 */ 1655 m = mp; 1656 if (m->m_flags & M_PKTHDR) 1657 m->m_pkthdr.len = count; 1658 for (; m; m = m->m_next) { 1659 if (m->m_len >= count) { 1660 m->m_len = count; 1661 break; 1662 } 1663 count -= m->m_len; 1664 } 1665 while (m->m_next) 1666 (m = m->m_next) ->m_len = 0; 1667 } 1668 } 1669 1670 /* 1671 * Set the m_data pointer of a newly-allocated mbuf 1672 * to place an object of the specified size at the 1673 * end of the mbuf, longword aligned. 1674 */ 1675 void 1676 m_align(struct mbuf *m, int len) 1677 { 1678 int adjust; 1679 1680 if (m->m_flags & M_EXT) 1681 adjust = m->m_ext.ext_size - len; 1682 else if (m->m_flags & M_PKTHDR) 1683 adjust = MHLEN - len; 1684 else 1685 adjust = MLEN - len; 1686 m->m_data += adjust &~ (sizeof(long)-1); 1687 } 1688 1689 /* 1690 * Rearrange an mbuf chain so that len bytes are contiguous 1691 * and in the data area of an mbuf (so that mtod will work for a structure 1692 * of size len). Returns the resulting mbuf chain on success, frees it and 1693 * returns null on failure. If there is room, it will add up to 1694 * max_protohdr-len extra bytes to the contiguous region in an attempt to 1695 * avoid being called next time. 1696 */ 1697 struct mbuf * 1698 m_pullup(struct mbuf *n, int len) 1699 { 1700 struct mbuf *m; 1701 int count; 1702 int space; 1703 1704 /* 1705 * If first mbuf has no cluster, and has room for len bytes 1706 * without shifting current data, pullup into it, 1707 * otherwise allocate a new mbuf to prepend to the chain. 1708 */ 1709 if (!(n->m_flags & M_EXT) && 1710 n->m_data + len < &n->m_dat[MLEN] && 1711 n->m_next) { 1712 if (n->m_len >= len) 1713 return (n); 1714 m = n; 1715 n = n->m_next; 1716 len -= m->m_len; 1717 } else { 1718 if (len > MHLEN) 1719 goto bad; 1720 if (n->m_flags & M_PKTHDR) 1721 m = m_gethdr(MB_DONTWAIT, n->m_type); 1722 else 1723 m = m_get(MB_DONTWAIT, n->m_type); 1724 if (m == NULL) 1725 goto bad; 1726 m->m_len = 0; 1727 if (n->m_flags & M_PKTHDR) 1728 M_MOVE_PKTHDR(m, n); 1729 } 1730 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 1731 do { 1732 count = min(min(max(len, max_protohdr), space), n->m_len); 1733 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 1734 (unsigned)count); 1735 len -= count; 1736 m->m_len += count; 1737 n->m_len -= count; 1738 space -= count; 1739 if (n->m_len) 1740 n->m_data += count; 1741 else 1742 n = m_free(n); 1743 } while (len > 0 && n); 1744 if (len > 0) { 1745 m_free(m); 1746 goto bad; 1747 } 1748 m->m_next = n; 1749 return (m); 1750 bad: 1751 m_freem(n); 1752 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1); 1753 return (NULL); 1754 } 1755 1756 /* 1757 * Partition an mbuf chain in two pieces, returning the tail -- 1758 * all but the first len0 bytes. In case of failure, it returns NULL and 1759 * attempts to restore the chain to its original state. 1760 * 1761 * Note that the resulting mbufs might be read-only, because the new 1762 * mbuf can end up sharing an mbuf cluster with the original mbuf if 1763 * the "breaking point" happens to lie within a cluster mbuf. Use the 1764 * M_WRITABLE() macro to check for this case. 1765 */ 1766 struct mbuf * 1767 m_split(struct mbuf *m0, int len0, int wait) 1768 { 1769 struct mbuf *m, *n; 1770 unsigned len = len0, remain; 1771 1772 for (m = m0; m && len > m->m_len; m = m->m_next) 1773 len -= m->m_len; 1774 if (m == NULL) 1775 return (NULL); 1776 remain = m->m_len - len; 1777 if (m0->m_flags & M_PKTHDR) { 1778 n = m_gethdr(wait, m0->m_type); 1779 if (n == NULL) 1780 return (NULL); 1781 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 1782 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 1783 m0->m_pkthdr.len = len0; 1784 if (m->m_flags & M_EXT) 1785 goto extpacket; 1786 if (remain > MHLEN) { 1787 /* m can't be the lead packet */ 1788 MH_ALIGN(n, 0); 1789 n->m_next = m_split(m, len, wait); 1790 if (n->m_next == NULL) { 1791 m_free(n); 1792 return (NULL); 1793 } else { 1794 n->m_len = 0; 1795 return (n); 1796 } 1797 } else 1798 MH_ALIGN(n, remain); 1799 } else if (remain == 0) { 1800 n = m->m_next; 1801 m->m_next = 0; 1802 return (n); 1803 } else { 1804 n = m_get(wait, m->m_type); 1805 if (n == NULL) 1806 return (NULL); 1807 M_ALIGN(n, remain); 1808 } 1809 extpacket: 1810 if (m->m_flags & M_EXT) { 1811 KKASSERT((n->m_flags & M_EXT) == 0); 1812 n->m_data = m->m_data + len; 1813 m->m_ext.ext_ref(m->m_ext.ext_arg); 1814 n->m_ext = m->m_ext; 1815 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER); 1816 } else { 1817 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 1818 } 1819 n->m_len = remain; 1820 m->m_len = len; 1821 n->m_next = m->m_next; 1822 m->m_next = 0; 1823 return (n); 1824 } 1825 1826 /* 1827 * Routine to copy from device local memory into mbufs. 1828 * Note: "offset" is ill-defined and always called as 0, so ignore it. 1829 */ 1830 struct mbuf * 1831 m_devget(char *buf, int len, int offset, struct ifnet *ifp, 1832 void (*copy)(volatile const void *from, volatile void *to, size_t length)) 1833 { 1834 struct mbuf *m, *mfirst = NULL, **mtail; 1835 int nsize, flags; 1836 1837 if (copy == NULL) 1838 copy = bcopy; 1839 mtail = &mfirst; 1840 flags = M_PKTHDR; 1841 1842 while (len > 0) { 1843 m = m_getl(len, MB_DONTWAIT, MT_DATA, flags, &nsize); 1844 if (m == NULL) { 1845 m_freem(mfirst); 1846 return (NULL); 1847 } 1848 m->m_len = min(len, nsize); 1849 1850 if (flags & M_PKTHDR) { 1851 if (len + max_linkhdr <= nsize) 1852 m->m_data += max_linkhdr; 1853 m->m_pkthdr.rcvif = ifp; 1854 m->m_pkthdr.len = len; 1855 flags = 0; 1856 } 1857 1858 copy(buf, m->m_data, (unsigned)m->m_len); 1859 buf += m->m_len; 1860 len -= m->m_len; 1861 *mtail = m; 1862 mtail = &m->m_next; 1863 } 1864 1865 return (mfirst); 1866 } 1867 1868 /* 1869 * Routine to pad mbuf to the specified length 'padto'. 1870 */ 1871 int 1872 m_devpad(struct mbuf *m, int padto) 1873 { 1874 struct mbuf *last = NULL; 1875 int padlen; 1876 1877 if (padto <= m->m_pkthdr.len) 1878 return 0; 1879 1880 padlen = padto - m->m_pkthdr.len; 1881 1882 /* if there's only the packet-header and we can pad there, use it. */ 1883 if (m->m_pkthdr.len == m->m_len && M_TRAILINGSPACE(m) >= padlen) { 1884 last = m; 1885 } else { 1886 /* 1887 * Walk packet chain to find last mbuf. We will either 1888 * pad there, or append a new mbuf and pad it 1889 */ 1890 for (last = m; last->m_next != NULL; last = last->m_next) 1891 ; /* EMPTY */ 1892 1893 /* `last' now points to last in chain. */ 1894 if (M_TRAILINGSPACE(last) < padlen) { 1895 struct mbuf *n; 1896 1897 /* Allocate new empty mbuf, pad it. Compact later. */ 1898 MGET(n, MB_DONTWAIT, MT_DATA); 1899 if (n == NULL) 1900 return ENOBUFS; 1901 n->m_len = 0; 1902 last->m_next = n; 1903 last = n; 1904 } 1905 } 1906 KKASSERT(M_TRAILINGSPACE(last) >= padlen); 1907 KKASSERT(M_WRITABLE(last)); 1908 1909 /* Now zero the pad area */ 1910 bzero(mtod(last, char *) + last->m_len, padlen); 1911 last->m_len += padlen; 1912 m->m_pkthdr.len += padlen; 1913 return 0; 1914 } 1915 1916 /* 1917 * Copy data from a buffer back into the indicated mbuf chain, 1918 * starting "off" bytes from the beginning, extending the mbuf 1919 * chain if necessary. 1920 */ 1921 void 1922 m_copyback(struct mbuf *m0, int off, int len, caddr_t cp) 1923 { 1924 int mlen; 1925 struct mbuf *m = m0, *n; 1926 int totlen = 0; 1927 1928 if (m0 == NULL) 1929 return; 1930 while (off > (mlen = m->m_len)) { 1931 off -= mlen; 1932 totlen += mlen; 1933 if (m->m_next == NULL) { 1934 n = m_getclr(MB_DONTWAIT, m->m_type); 1935 if (n == NULL) 1936 goto out; 1937 n->m_len = min(MLEN, len + off); 1938 m->m_next = n; 1939 } 1940 m = m->m_next; 1941 } 1942 while (len > 0) { 1943 mlen = min (m->m_len - off, len); 1944 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen); 1945 cp += mlen; 1946 len -= mlen; 1947 mlen += off; 1948 off = 0; 1949 totlen += mlen; 1950 if (len == 0) 1951 break; 1952 if (m->m_next == NULL) { 1953 n = m_get(MB_DONTWAIT, m->m_type); 1954 if (n == NULL) 1955 break; 1956 n->m_len = min(MLEN, len); 1957 m->m_next = n; 1958 } 1959 m = m->m_next; 1960 } 1961 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 1962 m->m_pkthdr.len = totlen; 1963 } 1964 1965 /* 1966 * Append the specified data to the indicated mbuf chain, 1967 * Extend the mbuf chain if the new data does not fit in 1968 * existing space. 1969 * 1970 * Return 1 if able to complete the job; otherwise 0. 1971 */ 1972 int 1973 m_append(struct mbuf *m0, int len, c_caddr_t cp) 1974 { 1975 struct mbuf *m, *n; 1976 int remainder, space; 1977 1978 for (m = m0; m->m_next != NULL; m = m->m_next) 1979 ; 1980 remainder = len; 1981 space = M_TRAILINGSPACE(m); 1982 if (space > 0) { 1983 /* 1984 * Copy into available space. 1985 */ 1986 if (space > remainder) 1987 space = remainder; 1988 bcopy(cp, mtod(m, caddr_t) + m->m_len, space); 1989 m->m_len += space; 1990 cp += space, remainder -= space; 1991 } 1992 while (remainder > 0) { 1993 /* 1994 * Allocate a new mbuf; could check space 1995 * and allocate a cluster instead. 1996 */ 1997 n = m_get(MB_DONTWAIT, m->m_type); 1998 if (n == NULL) 1999 break; 2000 n->m_len = min(MLEN, remainder); 2001 bcopy(cp, mtod(n, caddr_t), n->m_len); 2002 cp += n->m_len, remainder -= n->m_len; 2003 m->m_next = n; 2004 m = n; 2005 } 2006 if (m0->m_flags & M_PKTHDR) 2007 m0->m_pkthdr.len += len - remainder; 2008 return (remainder == 0); 2009 } 2010 2011 /* 2012 * Apply function f to the data in an mbuf chain starting "off" bytes from 2013 * the beginning, continuing for "len" bytes. 2014 */ 2015 int 2016 m_apply(struct mbuf *m, int off, int len, 2017 int (*f)(void *, void *, u_int), void *arg) 2018 { 2019 u_int count; 2020 int rval; 2021 2022 KASSERT(off >= 0, ("m_apply, negative off %d", off)); 2023 KASSERT(len >= 0, ("m_apply, negative len %d", len)); 2024 while (off > 0) { 2025 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain")); 2026 if (off < m->m_len) 2027 break; 2028 off -= m->m_len; 2029 m = m->m_next; 2030 } 2031 while (len > 0) { 2032 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain")); 2033 count = min(m->m_len - off, len); 2034 rval = (*f)(arg, mtod(m, caddr_t) + off, count); 2035 if (rval) 2036 return (rval); 2037 len -= count; 2038 off = 0; 2039 m = m->m_next; 2040 } 2041 return (0); 2042 } 2043 2044 /* 2045 * Return a pointer to mbuf/offset of location in mbuf chain. 2046 */ 2047 struct mbuf * 2048 m_getptr(struct mbuf *m, int loc, int *off) 2049 { 2050 2051 while (loc >= 0) { 2052 /* Normal end of search. */ 2053 if (m->m_len > loc) { 2054 *off = loc; 2055 return (m); 2056 } else { 2057 loc -= m->m_len; 2058 if (m->m_next == NULL) { 2059 if (loc == 0) { 2060 /* Point at the end of valid data. */ 2061 *off = m->m_len; 2062 return (m); 2063 } 2064 return (NULL); 2065 } 2066 m = m->m_next; 2067 } 2068 } 2069 return (NULL); 2070 } 2071 2072 void 2073 m_print(const struct mbuf *m) 2074 { 2075 int len; 2076 const struct mbuf *m2; 2077 2078 len = m->m_pkthdr.len; 2079 m2 = m; 2080 while (len) { 2081 kprintf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-"); 2082 len -= m2->m_len; 2083 m2 = m2->m_next; 2084 } 2085 return; 2086 } 2087 2088 /* 2089 * "Move" mbuf pkthdr from "from" to "to". 2090 * "from" must have M_PKTHDR set, and "to" must be empty. 2091 */ 2092 void 2093 m_move_pkthdr(struct mbuf *to, struct mbuf *from) 2094 { 2095 KASSERT((to->m_flags & M_PKTHDR), ("m_move_pkthdr: not packet header")); 2096 2097 to->m_flags |= from->m_flags & M_COPYFLAGS; 2098 to->m_pkthdr = from->m_pkthdr; /* especially tags */ 2099 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */ 2100 } 2101 2102 /* 2103 * Duplicate "from"'s mbuf pkthdr in "to". 2104 * "from" must have M_PKTHDR set, and "to" must be empty. 2105 * In particular, this does a deep copy of the packet tags. 2106 */ 2107 int 2108 m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how) 2109 { 2110 KASSERT((to->m_flags & M_PKTHDR), ("m_dup_pkthdr: not packet header")); 2111 2112 to->m_flags = (from->m_flags & M_COPYFLAGS) | 2113 (to->m_flags & ~M_COPYFLAGS); 2114 to->m_pkthdr = from->m_pkthdr; 2115 SLIST_INIT(&to->m_pkthdr.tags); 2116 return (m_tag_copy_chain(to, from, how)); 2117 } 2118 2119 /* 2120 * Defragment a mbuf chain, returning the shortest possible 2121 * chain of mbufs and clusters. If allocation fails and 2122 * this cannot be completed, NULL will be returned, but 2123 * the passed in chain will be unchanged. Upon success, 2124 * the original chain will be freed, and the new chain 2125 * will be returned. 2126 * 2127 * If a non-packet header is passed in, the original 2128 * mbuf (chain?) will be returned unharmed. 2129 * 2130 * m_defrag_nofree doesn't free the passed in mbuf. 2131 */ 2132 struct mbuf * 2133 m_defrag(struct mbuf *m0, int how) 2134 { 2135 struct mbuf *m_new; 2136 2137 if ((m_new = m_defrag_nofree(m0, how)) == NULL) 2138 return (NULL); 2139 if (m_new != m0) 2140 m_freem(m0); 2141 return (m_new); 2142 } 2143 2144 struct mbuf * 2145 m_defrag_nofree(struct mbuf *m0, int how) 2146 { 2147 struct mbuf *m_new = NULL, *m_final = NULL; 2148 int progress = 0, length, nsize; 2149 2150 if (!(m0->m_flags & M_PKTHDR)) 2151 return (m0); 2152 2153 #ifdef MBUF_STRESS_TEST 2154 if (m_defragrandomfailures) { 2155 int temp = karc4random() & 0xff; 2156 if (temp == 0xba) 2157 goto nospace; 2158 } 2159 #endif 2160 2161 m_final = m_getl(m0->m_pkthdr.len, how, MT_DATA, M_PKTHDR, &nsize); 2162 if (m_final == NULL) 2163 goto nospace; 2164 m_final->m_len = 0; /* in case m0->m_pkthdr.len is zero */ 2165 2166 if (m_dup_pkthdr(m_final, m0, how) == 0) 2167 goto nospace; 2168 2169 m_new = m_final; 2170 2171 while (progress < m0->m_pkthdr.len) { 2172 length = m0->m_pkthdr.len - progress; 2173 if (length > MCLBYTES) 2174 length = MCLBYTES; 2175 2176 if (m_new == NULL) { 2177 m_new = m_getl(length, how, MT_DATA, 0, &nsize); 2178 if (m_new == NULL) 2179 goto nospace; 2180 } 2181 2182 m_copydata(m0, progress, length, mtod(m_new, caddr_t)); 2183 progress += length; 2184 m_new->m_len = length; 2185 if (m_new != m_final) 2186 m_cat(m_final, m_new); 2187 m_new = NULL; 2188 } 2189 if (m0->m_next == NULL) 2190 m_defraguseless++; 2191 m_defragpackets++; 2192 m_defragbytes += m_final->m_pkthdr.len; 2193 return (m_final); 2194 nospace: 2195 m_defragfailure++; 2196 if (m_new) 2197 m_free(m_new); 2198 m_freem(m_final); 2199 return (NULL); 2200 } 2201 2202 /* 2203 * Move data from uio into mbufs. 2204 */ 2205 struct mbuf * 2206 m_uiomove(struct uio *uio) 2207 { 2208 struct mbuf *m; /* current working mbuf */ 2209 struct mbuf *head = NULL; /* result mbuf chain */ 2210 struct mbuf **mp = &head; 2211 int flags = M_PKTHDR; 2212 int nsize; 2213 int error; 2214 int resid; 2215 2216 do { 2217 if (uio->uio_resid > INT_MAX) 2218 resid = INT_MAX; 2219 else 2220 resid = (int)uio->uio_resid; 2221 m = m_getl(resid, MB_WAIT, MT_DATA, flags, &nsize); 2222 if (flags) { 2223 m->m_pkthdr.len = 0; 2224 /* Leave room for protocol headers. */ 2225 if (resid < MHLEN) 2226 MH_ALIGN(m, resid); 2227 flags = 0; 2228 } 2229 m->m_len = imin(nsize, resid); 2230 error = uiomove(mtod(m, caddr_t), m->m_len, uio); 2231 if (error) { 2232 m_free(m); 2233 goto failed; 2234 } 2235 *mp = m; 2236 mp = &m->m_next; 2237 head->m_pkthdr.len += m->m_len; 2238 } while (uio->uio_resid > 0); 2239 2240 return (head); 2241 2242 failed: 2243 m_freem(head); 2244 return (NULL); 2245 } 2246 2247 struct mbuf * 2248 m_last(struct mbuf *m) 2249 { 2250 while (m->m_next) 2251 m = m->m_next; 2252 return (m); 2253 } 2254 2255 /* 2256 * Return the number of bytes in an mbuf chain. 2257 * If lastm is not NULL, also return the last mbuf. 2258 */ 2259 u_int 2260 m_lengthm(struct mbuf *m, struct mbuf **lastm) 2261 { 2262 u_int len = 0; 2263 struct mbuf *prev = m; 2264 2265 while (m) { 2266 len += m->m_len; 2267 prev = m; 2268 m = m->m_next; 2269 } 2270 if (lastm != NULL) 2271 *lastm = prev; 2272 return (len); 2273 } 2274 2275 /* 2276 * Like m_lengthm(), except also keep track of mbuf usage. 2277 */ 2278 u_int 2279 m_countm(struct mbuf *m, struct mbuf **lastm, u_int *pmbcnt) 2280 { 2281 u_int len = 0, mbcnt = 0; 2282 struct mbuf *prev = m; 2283 2284 while (m) { 2285 len += m->m_len; 2286 mbcnt += MSIZE; 2287 if (m->m_flags & M_EXT) 2288 mbcnt += m->m_ext.ext_size; 2289 prev = m; 2290 m = m->m_next; 2291 } 2292 if (lastm != NULL) 2293 *lastm = prev; 2294 *pmbcnt = mbcnt; 2295 return (len); 2296 } 2297