1 /* 2 * Copyright (c) 1982, 1986, 1988, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 34 * $FreeBSD$ 35 */ 36 37 #include "opt_param.h" 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/malloc.h> 41 #include <sys/mbuf.h> 42 #include <sys/kernel.h> 43 #include <sys/sysctl.h> 44 #include <sys/domain.h> 45 #include <sys/protosw.h> 46 47 #include <vm/vm.h> 48 #include <vm/vm_kern.h> 49 #include <vm/vm_extern.h> 50 51 static void mbinit __P((void *)); 52 SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL) 53 54 struct mbuf *mbutl; 55 char *mclrefcnt; 56 struct mbstat mbstat; 57 struct mbuf *mmbfree; 58 union mcluster *mclfree; 59 int max_linkhdr; 60 int max_protohdr; 61 int max_hdr; 62 int max_datalen; 63 int nmbclusters; 64 int nmbufs; 65 66 SYSCTL_DECL(_kern_ipc); 67 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW, 68 &max_linkhdr, 0, ""); 69 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW, 70 &max_protohdr, 0, ""); 71 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, ""); 72 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW, 73 &max_datalen, 0, ""); 74 SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RW, &mbstat, mbstat, ""); 75 SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD, 76 &nmbclusters, 0, "Maximum number of mbuf clusters avaliable"); 77 #ifndef NMBCLUSTERS 78 #define NMBCLUSTERS (512 + MAXUSERS * 16) 79 #endif 80 TUNABLE_INT_DECL("kern.ipc.nmbclusters", NMBCLUSTERS, nmbclusters); 81 TUNABLE_INT_DECL("kern.ipc.nmbufs", NMBCLUSTERS * 4, nmbufs); /* XXX fixup? */ 82 83 static void m_reclaim __P((void)); 84 85 /* "number of clusters of pages" */ 86 #define NCL_INIT 1 87 88 #define NMB_INIT 16 89 90 /* ARGSUSED*/ 91 static void 92 mbinit(dummy) 93 void *dummy; 94 { 95 int s; 96 97 mmbfree = NULL; mclfree = NULL; 98 mbstat.m_msize = MSIZE; 99 mbstat.m_mclbytes = MCLBYTES; 100 mbstat.m_minclsize = MINCLSIZE; 101 mbstat.m_mlen = MLEN; 102 mbstat.m_mhlen = MHLEN; 103 104 s = splimp(); 105 if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0) 106 goto bad; 107 #if MCLBYTES <= PAGE_SIZE 108 if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0) 109 goto bad; 110 #else 111 /* It's OK to call contigmalloc in this context. */ 112 if (m_clalloc(16, M_WAIT) == 0) 113 goto bad; 114 #endif 115 splx(s); 116 return; 117 bad: 118 panic("mbinit"); 119 } 120 121 /* 122 * Allocate at least nmb mbufs and place on mbuf free list. 123 * Must be called at splimp. 124 */ 125 /* ARGSUSED */ 126 int 127 m_mballoc(nmb, how) 128 register int nmb; 129 int how; 130 { 131 register caddr_t p; 132 register int i; 133 int nbytes; 134 135 /* Once we run out of map space, it will be impossible to get 136 * any more (nothing is ever freed back to the map) (XXX which 137 * is dumb). (however you are not dead as m_reclaim might 138 * still be able to free a substantial amount of space). 139 */ 140 if (mb_map_full) 141 return (0); 142 143 nbytes = round_page(nmb * MSIZE); 144 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT); 145 if (p == 0 && how == M_WAIT) { 146 mbstat.m_wait++; 147 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_WAITOK); 148 } 149 150 /* 151 * Either the map is now full, or `how' is M_NOWAIT and there 152 * are no pages left. 153 */ 154 if (p == NULL) 155 return (0); 156 157 nmb = nbytes / MSIZE; 158 for (i = 0; i < nmb; i++) { 159 ((struct mbuf *)p)->m_next = mmbfree; 160 mmbfree = (struct mbuf *)p; 161 p += MSIZE; 162 } 163 mbstat.m_mbufs += nmb; 164 return (1); 165 } 166 167 #if MCLBYTES > PAGE_SIZE 168 static int i_want_my_mcl; 169 170 static void 171 kproc_mclalloc(void) 172 { 173 int status; 174 175 while (1) { 176 tsleep(&i_want_my_mcl, PVM, "mclalloc", 0); 177 178 for (; i_want_my_mcl; i_want_my_mcl--) { 179 if (m_clalloc(1, M_WAIT) == 0) 180 printf("m_clalloc failed even in process context!\n"); 181 } 182 } 183 } 184 185 static struct proc *mclallocproc; 186 static struct kproc_desc mclalloc_kp = { 187 "mclalloc", 188 kproc_mclalloc, 189 &mclallocproc 190 }; 191 SYSINIT(mclallocproc, SI_SUB_KTHREAD_UPDATE, SI_ORDER_ANY, kproc_start, 192 &mclalloc_kp); 193 #endif 194 195 /* 196 * Allocate some number of mbuf clusters 197 * and place on cluster free list. 198 * Must be called at splimp. 199 */ 200 /* ARGSUSED */ 201 int 202 m_clalloc(ncl, how) 203 register int ncl; 204 int how; 205 { 206 register caddr_t p; 207 register int i; 208 int npg; 209 210 /* 211 * Once we run out of map space, it will be impossible 212 * to get any more (nothing is ever freed back to the 213 * map). 214 */ 215 if (mb_map_full) { 216 mbstat.m_drops++; 217 return (0); 218 } 219 220 #if MCLBYTES > PAGE_SIZE 221 if (how != M_WAIT) { 222 i_want_my_mcl += ncl; 223 wakeup(&i_want_my_mcl); 224 mbstat.m_wait++; 225 p = 0; 226 } else { 227 p = contigmalloc1(MCLBYTES * ncl, M_DEVBUF, M_WAITOK, 0ul, 228 ~0ul, PAGE_SIZE, 0, mb_map); 229 } 230 #else 231 npg = ncl; 232 p = (caddr_t)kmem_malloc(mb_map, ctob(npg), 233 how != M_WAIT ? M_NOWAIT : M_WAITOK); 234 ncl = ncl * PAGE_SIZE / MCLBYTES; 235 #endif 236 /* 237 * Either the map is now full, or `how' is M_NOWAIT and there 238 * are no pages left. 239 */ 240 if (p == NULL) { 241 mbstat.m_drops++; 242 return (0); 243 } 244 245 for (i = 0; i < ncl; i++) { 246 ((union mcluster *)p)->mcl_next = mclfree; 247 mclfree = (union mcluster *)p; 248 p += MCLBYTES; 249 mbstat.m_clfree++; 250 } 251 mbstat.m_clusters += ncl; 252 return (1); 253 } 254 255 /* 256 * When MGET fails, ask protocols to free space when short of memory, 257 * then re-attempt to allocate an mbuf. 258 */ 259 struct mbuf * 260 m_retry(i, t) 261 int i, t; 262 { 263 register struct mbuf *m; 264 265 /* 266 * Must only do the reclaim if not in an interrupt context. 267 */ 268 if (i == M_WAIT) 269 m_reclaim(); 270 #define m_retry(i, t) (struct mbuf *)0 271 MGET(m, i, t); 272 #undef m_retry 273 if (m != NULL) { 274 mbstat.m_wait++; 275 } else { 276 if (i == M_DONTWAIT) 277 mbstat.m_drops++; 278 else 279 panic("Out of mbuf clusters"); 280 } 281 return (m); 282 } 283 284 /* 285 * As above; retry an MGETHDR. 286 */ 287 struct mbuf * 288 m_retryhdr(i, t) 289 int i, t; 290 { 291 register struct mbuf *m; 292 293 /* 294 * Must only do the reclaim if not in an interrupt context. 295 */ 296 if (i == M_WAIT) 297 m_reclaim(); 298 #define m_retryhdr(i, t) (struct mbuf *)0 299 MGETHDR(m, i, t); 300 #undef m_retryhdr 301 if (m != NULL) { 302 mbstat.m_wait++; 303 } else { 304 if (i == M_DONTWAIT) 305 mbstat.m_drops++; 306 else 307 panic("Out of mbuf clusters"); 308 } 309 return (m); 310 } 311 312 static void 313 m_reclaim() 314 { 315 register struct domain *dp; 316 register struct protosw *pr; 317 int s = splimp(); 318 319 for (dp = domains; dp; dp = dp->dom_next) 320 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 321 if (pr->pr_drain) 322 (*pr->pr_drain)(); 323 splx(s); 324 mbstat.m_drain++; 325 } 326 327 /* 328 * Space allocation routines. 329 * These are also available as macros 330 * for critical paths. 331 */ 332 struct mbuf * 333 m_get(how, type) 334 int how, type; 335 { 336 register struct mbuf *m; 337 338 MGET(m, how, type); 339 return (m); 340 } 341 342 struct mbuf * 343 m_gethdr(how, type) 344 int how, type; 345 { 346 register struct mbuf *m; 347 348 MGETHDR(m, how, type); 349 return (m); 350 } 351 352 struct mbuf * 353 m_getclr(how, type) 354 int how, type; 355 { 356 register struct mbuf *m; 357 358 MGET(m, how, type); 359 if (m == 0) 360 return (0); 361 bzero(mtod(m, caddr_t), MLEN); 362 return (m); 363 } 364 365 struct mbuf * 366 m_free(m) 367 struct mbuf *m; 368 { 369 register struct mbuf *n; 370 371 MFREE(m, n); 372 return (n); 373 } 374 375 void 376 m_freem(m) 377 register struct mbuf *m; 378 { 379 register struct mbuf *n; 380 381 if (m == NULL) 382 return; 383 do { 384 MFREE(m, n); 385 m = n; 386 } while (m); 387 } 388 389 /* 390 * Mbuffer utility routines. 391 */ 392 393 /* 394 * Lesser-used path for M_PREPEND: 395 * allocate new mbuf to prepend to chain, 396 * copy junk along. 397 */ 398 struct mbuf * 399 m_prepend(m, len, how) 400 register struct mbuf *m; 401 int len, how; 402 { 403 struct mbuf *mn; 404 405 MGET(mn, how, m->m_type); 406 if (mn == (struct mbuf *)NULL) { 407 m_freem(m); 408 return ((struct mbuf *)NULL); 409 } 410 if (m->m_flags & M_PKTHDR) { 411 M_COPY_PKTHDR(mn, m); 412 m->m_flags &= ~M_PKTHDR; 413 } 414 mn->m_next = m; 415 m = mn; 416 if (len < MHLEN) 417 MH_ALIGN(m, len); 418 m->m_len = len; 419 return (m); 420 } 421 422 /* 423 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 424 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 425 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller. 426 */ 427 #define MCFail (mbstat.m_mcfail) 428 429 struct mbuf * 430 m_copym(m, off0, len, wait) 431 register struct mbuf *m; 432 int off0, wait; 433 register int len; 434 { 435 register struct mbuf *n, **np; 436 register int off = off0; 437 struct mbuf *top; 438 int copyhdr = 0; 439 440 if (off < 0 || len < 0) 441 panic("m_copym"); 442 if (off == 0 && m->m_flags & M_PKTHDR) 443 copyhdr = 1; 444 while (off > 0) { 445 if (m == 0) 446 panic("m_copym"); 447 if (off < m->m_len) 448 break; 449 off -= m->m_len; 450 m = m->m_next; 451 } 452 np = ⊤ 453 top = 0; 454 while (len > 0) { 455 if (m == 0) { 456 if (len != M_COPYALL) 457 panic("m_copym"); 458 break; 459 } 460 MGET(n, wait, m->m_type); 461 *np = n; 462 if (n == 0) 463 goto nospace; 464 if (copyhdr) { 465 M_COPY_PKTHDR(n, m); 466 if (len == M_COPYALL) 467 n->m_pkthdr.len -= off0; 468 else 469 n->m_pkthdr.len = len; 470 copyhdr = 0; 471 } 472 n->m_len = min(len, m->m_len - off); 473 if (m->m_flags & M_EXT) { 474 n->m_data = m->m_data + off; 475 if(!m->m_ext.ext_ref) 476 mclrefcnt[mtocl(m->m_ext.ext_buf)]++; 477 else 478 (*(m->m_ext.ext_ref))(m->m_ext.ext_buf, 479 m->m_ext.ext_size); 480 n->m_ext = m->m_ext; 481 n->m_flags |= M_EXT; 482 } else 483 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 484 (unsigned)n->m_len); 485 if (len != M_COPYALL) 486 len -= n->m_len; 487 off = 0; 488 m = m->m_next; 489 np = &n->m_next; 490 } 491 if (top == 0) 492 MCFail++; 493 return (top); 494 nospace: 495 m_freem(top); 496 MCFail++; 497 return (0); 498 } 499 500 /* 501 * Copy an entire packet, including header (which must be present). 502 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'. 503 */ 504 struct mbuf * 505 m_copypacket(m, how) 506 struct mbuf *m; 507 int how; 508 { 509 struct mbuf *top, *n, *o; 510 511 MGET(n, how, m->m_type); 512 top = n; 513 if (!n) 514 goto nospace; 515 516 M_COPY_PKTHDR(n, m); 517 n->m_len = m->m_len; 518 if (m->m_flags & M_EXT) { 519 n->m_data = m->m_data; 520 if(!m->m_ext.ext_ref) 521 mclrefcnt[mtocl(m->m_ext.ext_buf)]++; 522 else 523 (*(m->m_ext.ext_ref))(m->m_ext.ext_buf, 524 m->m_ext.ext_size); 525 n->m_ext = m->m_ext; 526 n->m_flags |= M_EXT; 527 } else { 528 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 529 } 530 531 m = m->m_next; 532 while (m) { 533 MGET(o, how, m->m_type); 534 if (!o) 535 goto nospace; 536 537 n->m_next = o; 538 n = n->m_next; 539 540 n->m_len = m->m_len; 541 if (m->m_flags & M_EXT) { 542 n->m_data = m->m_data; 543 if(!m->m_ext.ext_ref) 544 mclrefcnt[mtocl(m->m_ext.ext_buf)]++; 545 else 546 (*(m->m_ext.ext_ref))(m->m_ext.ext_buf, 547 m->m_ext.ext_size); 548 n->m_ext = m->m_ext; 549 n->m_flags |= M_EXT; 550 } else { 551 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 552 } 553 554 m = m->m_next; 555 } 556 return top; 557 nospace: 558 m_freem(top); 559 MCFail++; 560 return 0; 561 } 562 563 /* 564 * Copy data from an mbuf chain starting "off" bytes from the beginning, 565 * continuing for "len" bytes, into the indicated buffer. 566 */ 567 void 568 m_copydata(m, off, len, cp) 569 register struct mbuf *m; 570 register int off; 571 register int len; 572 caddr_t cp; 573 { 574 register unsigned count; 575 576 if (off < 0 || len < 0) 577 panic("m_copydata"); 578 while (off > 0) { 579 if (m == 0) 580 panic("m_copydata"); 581 if (off < m->m_len) 582 break; 583 off -= m->m_len; 584 m = m->m_next; 585 } 586 while (len > 0) { 587 if (m == 0) 588 panic("m_copydata"); 589 count = min(m->m_len - off, len); 590 bcopy(mtod(m, caddr_t) + off, cp, count); 591 len -= count; 592 cp += count; 593 off = 0; 594 m = m->m_next; 595 } 596 } 597 598 /* 599 * Concatenate mbuf chain n to m. 600 * Both chains must be of the same type (e.g. MT_DATA). 601 * Any m_pkthdr is not updated. 602 */ 603 void 604 m_cat(m, n) 605 register struct mbuf *m, *n; 606 { 607 while (m->m_next) 608 m = m->m_next; 609 while (n) { 610 if (m->m_flags & M_EXT || 611 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 612 /* just join the two chains */ 613 m->m_next = n; 614 return; 615 } 616 /* splat the data from one into the other */ 617 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 618 (u_int)n->m_len); 619 m->m_len += n->m_len; 620 n = m_free(n); 621 } 622 } 623 624 void 625 m_adj(mp, req_len) 626 struct mbuf *mp; 627 int req_len; 628 { 629 register int len = req_len; 630 register struct mbuf *m; 631 register int count; 632 633 if ((m = mp) == NULL) 634 return; 635 if (len >= 0) { 636 /* 637 * Trim from head. 638 */ 639 while (m != NULL && len > 0) { 640 if (m->m_len <= len) { 641 len -= m->m_len; 642 m->m_len = 0; 643 m = m->m_next; 644 } else { 645 m->m_len -= len; 646 m->m_data += len; 647 len = 0; 648 } 649 } 650 m = mp; 651 if (mp->m_flags & M_PKTHDR) 652 m->m_pkthdr.len -= (req_len - len); 653 } else { 654 /* 655 * Trim from tail. Scan the mbuf chain, 656 * calculating its length and finding the last mbuf. 657 * If the adjustment only affects this mbuf, then just 658 * adjust and return. Otherwise, rescan and truncate 659 * after the remaining size. 660 */ 661 len = -len; 662 count = 0; 663 for (;;) { 664 count += m->m_len; 665 if (m->m_next == (struct mbuf *)0) 666 break; 667 m = m->m_next; 668 } 669 if (m->m_len >= len) { 670 m->m_len -= len; 671 if (mp->m_flags & M_PKTHDR) 672 mp->m_pkthdr.len -= len; 673 return; 674 } 675 count -= len; 676 if (count < 0) 677 count = 0; 678 /* 679 * Correct length for chain is "count". 680 * Find the mbuf with last data, adjust its length, 681 * and toss data from remaining mbufs on chain. 682 */ 683 m = mp; 684 if (m->m_flags & M_PKTHDR) 685 m->m_pkthdr.len = count; 686 for (; m; m = m->m_next) { 687 if (m->m_len >= count) { 688 m->m_len = count; 689 break; 690 } 691 count -= m->m_len; 692 } 693 while (m->m_next) 694 (m = m->m_next) ->m_len = 0; 695 } 696 } 697 698 /* 699 * Rearange an mbuf chain so that len bytes are contiguous 700 * and in the data area of an mbuf (so that mtod and dtom 701 * will work for a structure of size len). Returns the resulting 702 * mbuf chain on success, frees it and returns null on failure. 703 * If there is room, it will add up to max_protohdr-len extra bytes to the 704 * contiguous region in an attempt to avoid being called next time. 705 */ 706 #define MPFail (mbstat.m_mpfail) 707 708 struct mbuf * 709 m_pullup(n, len) 710 register struct mbuf *n; 711 int len; 712 { 713 register struct mbuf *m; 714 register int count; 715 int space; 716 717 /* 718 * If first mbuf has no cluster, and has room for len bytes 719 * without shifting current data, pullup into it, 720 * otherwise allocate a new mbuf to prepend to the chain. 721 */ 722 if ((n->m_flags & M_EXT) == 0 && 723 n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 724 if (n->m_len >= len) 725 return (n); 726 m = n; 727 n = n->m_next; 728 len -= m->m_len; 729 } else { 730 if (len > MHLEN) 731 goto bad; 732 MGET(m, M_DONTWAIT, n->m_type); 733 if (m == 0) 734 goto bad; 735 m->m_len = 0; 736 if (n->m_flags & M_PKTHDR) { 737 M_COPY_PKTHDR(m, n); 738 n->m_flags &= ~M_PKTHDR; 739 } 740 } 741 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 742 do { 743 count = min(min(max(len, max_protohdr), space), n->m_len); 744 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 745 (unsigned)count); 746 len -= count; 747 m->m_len += count; 748 n->m_len -= count; 749 space -= count; 750 if (n->m_len) 751 n->m_data += count; 752 else 753 n = m_free(n); 754 } while (len > 0 && n); 755 if (len > 0) { 756 (void) m_free(m); 757 goto bad; 758 } 759 m->m_next = n; 760 return (m); 761 bad: 762 m_freem(n); 763 MPFail++; 764 return (0); 765 } 766 767 /* 768 * Partition an mbuf chain in two pieces, returning the tail -- 769 * all but the first len0 bytes. In case of failure, it returns NULL and 770 * attempts to restore the chain to its original state. 771 */ 772 struct mbuf * 773 m_split(m0, len0, wait) 774 register struct mbuf *m0; 775 int len0, wait; 776 { 777 register struct mbuf *m, *n; 778 unsigned len = len0, remain; 779 780 for (m = m0; m && len > m->m_len; m = m->m_next) 781 len -= m->m_len; 782 if (m == 0) 783 return (0); 784 remain = m->m_len - len; 785 if (m0->m_flags & M_PKTHDR) { 786 MGETHDR(n, wait, m0->m_type); 787 if (n == 0) 788 return (0); 789 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 790 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 791 m0->m_pkthdr.len = len0; 792 if (m->m_flags & M_EXT) 793 goto extpacket; 794 if (remain > MHLEN) { 795 /* m can't be the lead packet */ 796 MH_ALIGN(n, 0); 797 n->m_next = m_split(m, len, wait); 798 if (n->m_next == 0) { 799 (void) m_free(n); 800 return (0); 801 } else 802 return (n); 803 } else 804 MH_ALIGN(n, remain); 805 } else if (remain == 0) { 806 n = m->m_next; 807 m->m_next = 0; 808 return (n); 809 } else { 810 MGET(n, wait, m->m_type); 811 if (n == 0) 812 return (0); 813 M_ALIGN(n, remain); 814 } 815 extpacket: 816 if (m->m_flags & M_EXT) { 817 n->m_flags |= M_EXT; 818 n->m_ext = m->m_ext; 819 if(!m->m_ext.ext_ref) 820 mclrefcnt[mtocl(m->m_ext.ext_buf)]++; 821 else 822 (*(m->m_ext.ext_ref))(m->m_ext.ext_buf, 823 m->m_ext.ext_size); 824 m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */ 825 n->m_data = m->m_data + len; 826 } else { 827 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 828 } 829 n->m_len = remain; 830 m->m_len = len; 831 n->m_next = m->m_next; 832 m->m_next = 0; 833 return (n); 834 } 835 /* 836 * Routine to copy from device local memory into mbufs. 837 */ 838 struct mbuf * 839 m_devget(buf, totlen, off0, ifp, copy) 840 char *buf; 841 int totlen, off0; 842 struct ifnet *ifp; 843 void (*copy) __P((char *from, caddr_t to, u_int len)); 844 { 845 register struct mbuf *m; 846 struct mbuf *top = 0, **mp = ⊤ 847 register int off = off0, len; 848 register char *cp; 849 char *epkt; 850 851 cp = buf; 852 epkt = cp + totlen; 853 if (off) { 854 cp += off + 2 * sizeof(u_short); 855 totlen -= 2 * sizeof(u_short); 856 } 857 MGETHDR(m, M_DONTWAIT, MT_DATA); 858 if (m == 0) 859 return (0); 860 m->m_pkthdr.rcvif = ifp; 861 m->m_pkthdr.len = totlen; 862 m->m_len = MHLEN; 863 864 while (totlen > 0) { 865 if (top) { 866 MGET(m, M_DONTWAIT, MT_DATA); 867 if (m == 0) { 868 m_freem(top); 869 return (0); 870 } 871 m->m_len = MLEN; 872 } 873 len = min(totlen, epkt - cp); 874 if (len >= MINCLSIZE) { 875 MCLGET(m, M_DONTWAIT); 876 if (m->m_flags & M_EXT) 877 m->m_len = len = min(len, MCLBYTES); 878 else 879 len = m->m_len; 880 } else { 881 /* 882 * Place initial small packet/header at end of mbuf. 883 */ 884 if (len < m->m_len) { 885 if (top == 0 && len + max_linkhdr <= m->m_len) 886 m->m_data += max_linkhdr; 887 m->m_len = len; 888 } else 889 len = m->m_len; 890 } 891 if (copy) 892 copy(cp, mtod(m, caddr_t), (unsigned)len); 893 else 894 bcopy(cp, mtod(m, caddr_t), (unsigned)len); 895 cp += len; 896 *mp = m; 897 mp = &m->m_next; 898 totlen -= len; 899 if (cp == epkt) 900 cp = buf; 901 } 902 return (top); 903 } 904 905 /* 906 * Copy data from a buffer back into the indicated mbuf chain, 907 * starting "off" bytes from the beginning, extending the mbuf 908 * chain if necessary. 909 */ 910 void 911 m_copyback(m0, off, len, cp) 912 struct mbuf *m0; 913 register int off; 914 register int len; 915 caddr_t cp; 916 { 917 register int mlen; 918 register struct mbuf *m = m0, *n; 919 int totlen = 0; 920 921 if (m0 == 0) 922 return; 923 while (off > (mlen = m->m_len)) { 924 off -= mlen; 925 totlen += mlen; 926 if (m->m_next == 0) { 927 n = m_getclr(M_DONTWAIT, m->m_type); 928 if (n == 0) 929 goto out; 930 n->m_len = min(MLEN, len + off); 931 m->m_next = n; 932 } 933 m = m->m_next; 934 } 935 while (len > 0) { 936 mlen = min (m->m_len - off, len); 937 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen); 938 cp += mlen; 939 len -= mlen; 940 mlen += off; 941 off = 0; 942 totlen += mlen; 943 if (len == 0) 944 break; 945 if (m->m_next == 0) { 946 n = m_get(M_DONTWAIT, m->m_type); 947 if (n == 0) 948 break; 949 n->m_len = min(MLEN, len); 950 m->m_next = n; 951 } 952 m = m->m_next; 953 } 954 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 955 m->m_pkthdr.len = totlen; 956 } 957