1 /* 2 * Copyright (c) 1982, 1986, 1988, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * %sccs.include.redist.c% 6 * 7 * @(#)uipc_mbuf.c 8.3 (Berkeley) 01/09/95 8 */ 9 10 #include <sys/param.h> 11 #include <sys/systm.h> 12 #include <sys/proc.h> 13 #include <sys/malloc.h> 14 #include <sys/map.h> 15 #define MBTYPES 16 #include <sys/mbuf.h> 17 #include <sys/kernel.h> 18 #include <sys/syslog.h> 19 #include <sys/domain.h> 20 #include <sys/protosw.h> 21 22 #include <vm/vm.h> 23 24 extern vm_map_t mb_map; 25 struct mbuf *mbutl; 26 char *mclrefcnt; 27 28 mbinit() 29 { 30 int s; 31 32 #if CLBYTES < 4096 33 #define NCL_INIT (4096/CLBYTES) 34 #else 35 #define NCL_INIT 1 36 #endif 37 s = splimp(); 38 if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0) 39 goto bad; 40 splx(s); 41 return; 42 bad: 43 panic("mbinit"); 44 } 45 46 /* 47 * Allocate some number of mbuf clusters 48 * and place on cluster free list. 49 * Must be called at splimp. 50 */ 51 /* ARGSUSED */ 52 m_clalloc(ncl, nowait) 53 register int ncl; 54 int nowait; 55 { 56 static int logged; 57 register caddr_t p; 58 register int i; 59 int npg; 60 61 npg = ncl * CLSIZE; 62 p = (caddr_t)kmem_malloc(mb_map, ctob(npg), !nowait); 63 if (p == NULL) { 64 if (logged == 0) { 65 logged++; 66 log(LOG_ERR, "mb_map full\n"); 67 } 68 return (0); 69 } 70 ncl = ncl * CLBYTES / MCLBYTES; 71 for (i = 0; i < ncl; i++) { 72 ((union mcluster *)p)->mcl_next = mclfree; 73 mclfree = (union mcluster *)p; 74 p += MCLBYTES; 75 mbstat.m_clfree++; 76 } 77 mbstat.m_clusters += ncl; 78 return (1); 79 } 80 81 /* 82 * When MGET failes, ask protocols to free space when short of memory, 83 * then re-attempt to allocate an mbuf. 84 */ 85 struct mbuf * 86 m_retry(i, t) 87 int i, t; 88 { 89 register struct mbuf *m; 90 91 m_reclaim(); 92 #define m_retry(i, t) (struct mbuf *)0 93 MGET(m, i, t); 94 #undef m_retry 95 return (m); 96 } 97 98 /* 99 * As above; retry an MGETHDR. 100 */ 101 struct mbuf * 102 m_retryhdr(i, t) 103 int i, t; 104 { 105 register struct mbuf *m; 106 107 m_reclaim(); 108 #define m_retryhdr(i, t) (struct mbuf *)0 109 MGETHDR(m, i, t); 110 #undef m_retryhdr 111 return (m); 112 } 113 114 m_reclaim() 115 { 116 register struct domain *dp; 117 register struct protosw *pr; 118 int s = splimp(); 119 120 for (dp = domains; dp; dp = dp->dom_next) 121 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 122 if (pr->pr_drain) 123 (*pr->pr_drain)(); 124 splx(s); 125 mbstat.m_drain++; 126 } 127 128 /* 129 * Space allocation routines. 130 * These are also available as macros 131 * for critical paths. 132 */ 133 struct mbuf * 134 m_get(nowait, type) 135 int nowait, type; 136 { 137 register struct mbuf *m; 138 139 MGET(m, nowait, type); 140 return (m); 141 } 142 143 struct mbuf * 144 m_gethdr(nowait, type) 145 int nowait, type; 146 { 147 register struct mbuf *m; 148 149 MGETHDR(m, nowait, type); 150 return (m); 151 } 152 153 struct mbuf * 154 m_getclr(nowait, type) 155 int nowait, type; 156 { 157 register struct mbuf *m; 158 159 MGET(m, nowait, type); 160 if (m == 0) 161 return (0); 162 bzero(mtod(m, caddr_t), MLEN); 163 return (m); 164 } 165 166 struct mbuf * 167 m_free(m) 168 struct mbuf *m; 169 { 170 register struct mbuf *n; 171 172 MFREE(m, n); 173 return (n); 174 } 175 176 void 177 m_freem(m) 178 register struct mbuf *m; 179 { 180 register struct mbuf *n; 181 182 if (m == NULL) 183 return; 184 do { 185 MFREE(m, n); 186 } while (m = n); 187 } 188 189 /* 190 * Mbuffer utility routines. 191 */ 192 193 /* 194 * Lesser-used path for M_PREPEND: 195 * allocate new mbuf to prepend to chain, 196 * copy junk along. 197 */ 198 struct mbuf * 199 m_prepend(m, len, how) 200 register struct mbuf *m; 201 int len, how; 202 { 203 struct mbuf *mn; 204 205 MGET(mn, how, m->m_type); 206 if (mn == (struct mbuf *)NULL) { 207 m_freem(m); 208 return ((struct mbuf *)NULL); 209 } 210 if (m->m_flags & M_PKTHDR) { 211 M_COPY_PKTHDR(mn, m); 212 m->m_flags &= ~M_PKTHDR; 213 } 214 mn->m_next = m; 215 m = mn; 216 if (len < MHLEN) 217 MH_ALIGN(m, len); 218 m->m_len = len; 219 return (m); 220 } 221 222 /* 223 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 224 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 225 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller. 226 */ 227 int MCFail; 228 229 struct mbuf * 230 m_copym(m, off0, len, wait) 231 register struct mbuf *m; 232 int off0, wait; 233 register int len; 234 { 235 register struct mbuf *n, **np; 236 register int off = off0; 237 struct mbuf *top; 238 int copyhdr = 0; 239 240 if (off < 0 || len < 0) 241 panic("m_copym"); 242 if (off == 0 && m->m_flags & M_PKTHDR) 243 copyhdr = 1; 244 while (off > 0) { 245 if (m == 0) 246 panic("m_copym"); 247 if (off < m->m_len) 248 break; 249 off -= m->m_len; 250 m = m->m_next; 251 } 252 np = ⊤ 253 top = 0; 254 while (len > 0) { 255 if (m == 0) { 256 if (len != M_COPYALL) 257 panic("m_copym"); 258 break; 259 } 260 MGET(n, wait, m->m_type); 261 *np = n; 262 if (n == 0) 263 goto nospace; 264 if (copyhdr) { 265 M_COPY_PKTHDR(n, m); 266 if (len == M_COPYALL) 267 n->m_pkthdr.len -= off0; 268 else 269 n->m_pkthdr.len = len; 270 copyhdr = 0; 271 } 272 n->m_len = min(len, m->m_len - off); 273 if (m->m_flags & M_EXT) { 274 n->m_data = m->m_data + off; 275 mclrefcnt[mtocl(m->m_ext.ext_buf)]++; 276 n->m_ext = m->m_ext; 277 n->m_flags |= M_EXT; 278 } else 279 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 280 (unsigned)n->m_len); 281 if (len != M_COPYALL) 282 len -= n->m_len; 283 off = 0; 284 m = m->m_next; 285 np = &n->m_next; 286 } 287 if (top == 0) 288 MCFail++; 289 return (top); 290 nospace: 291 m_freem(top); 292 MCFail++; 293 return (0); 294 } 295 296 /* 297 * Copy data from an mbuf chain starting "off" bytes from the beginning, 298 * continuing for "len" bytes, into the indicated buffer. 299 */ 300 m_copydata(m, off, len, cp) 301 register struct mbuf *m; 302 register int off; 303 register int len; 304 caddr_t cp; 305 { 306 register unsigned count; 307 308 if (off < 0 || len < 0) 309 panic("m_copydata"); 310 while (off > 0) { 311 if (m == 0) 312 panic("m_copydata"); 313 if (off < m->m_len) 314 break; 315 off -= m->m_len; 316 m = m->m_next; 317 } 318 while (len > 0) { 319 if (m == 0) 320 panic("m_copydata"); 321 count = min(m->m_len - off, len); 322 bcopy(mtod(m, caddr_t) + off, cp, count); 323 len -= count; 324 cp += count; 325 off = 0; 326 m = m->m_next; 327 } 328 } 329 330 /* 331 * Concatenate mbuf chain n to m. 332 * Both chains must be of the same type (e.g. MT_DATA). 333 * Any m_pkthdr is not updated. 334 */ 335 m_cat(m, n) 336 register struct mbuf *m, *n; 337 { 338 while (m->m_next) 339 m = m->m_next; 340 while (n) { 341 if (m->m_flags & M_EXT || 342 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 343 /* just join the two chains */ 344 m->m_next = n; 345 return; 346 } 347 /* splat the data from one into the other */ 348 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 349 (u_int)n->m_len); 350 m->m_len += n->m_len; 351 n = m_free(n); 352 } 353 } 354 355 void 356 m_adj(mp, req_len) 357 struct mbuf *mp; 358 int req_len; 359 { 360 register int len = req_len; 361 register struct mbuf *m; 362 register count; 363 364 if ((m = mp) == NULL) 365 return; 366 if (len >= 0) { 367 /* 368 * Trim from head. 369 */ 370 while (m != NULL && len > 0) { 371 if (m->m_len <= len) { 372 len -= m->m_len; 373 m->m_len = 0; 374 m = m->m_next; 375 } else { 376 m->m_len -= len; 377 m->m_data += len; 378 len = 0; 379 } 380 } 381 m = mp; 382 if (mp->m_flags & M_PKTHDR) 383 m->m_pkthdr.len -= (req_len - len); 384 } else { 385 /* 386 * Trim from tail. Scan the mbuf chain, 387 * calculating its length and finding the last mbuf. 388 * If the adjustment only affects this mbuf, then just 389 * adjust and return. Otherwise, rescan and truncate 390 * after the remaining size. 391 */ 392 len = -len; 393 count = 0; 394 for (;;) { 395 count += m->m_len; 396 if (m->m_next == (struct mbuf *)0) 397 break; 398 m = m->m_next; 399 } 400 if (m->m_len >= len) { 401 m->m_len -= len; 402 if (mp->m_flags & M_PKTHDR) 403 mp->m_pkthdr.len -= len; 404 return; 405 } 406 count -= len; 407 if (count < 0) 408 count = 0; 409 /* 410 * Correct length for chain is "count". 411 * Find the mbuf with last data, adjust its length, 412 * and toss data from remaining mbufs on chain. 413 */ 414 m = mp; 415 if (m->m_flags & M_PKTHDR) 416 m->m_pkthdr.len = count; 417 for (; m; m = m->m_next) { 418 if (m->m_len >= count) { 419 m->m_len = count; 420 break; 421 } 422 count -= m->m_len; 423 } 424 while (m = m->m_next) 425 m->m_len = 0; 426 } 427 } 428 429 /* 430 * Rearange an mbuf chain so that len bytes are contiguous 431 * and in the data area of an mbuf (so that mtod and dtom 432 * will work for a structure of size len). Returns the resulting 433 * mbuf chain on success, frees it and returns null on failure. 434 * If there is room, it will add up to max_protohdr-len extra bytes to the 435 * contiguous region in an attempt to avoid being called next time. 436 */ 437 int MPFail; 438 439 struct mbuf * 440 m_pullup(n, len) 441 register struct mbuf *n; 442 int len; 443 { 444 register struct mbuf *m; 445 register int count; 446 int space; 447 448 /* 449 * If first mbuf has no cluster, and has room for len bytes 450 * without shifting current data, pullup into it, 451 * otherwise allocate a new mbuf to prepend to the chain. 452 */ 453 if ((n->m_flags & M_EXT) == 0 && 454 n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 455 if (n->m_len >= len) 456 return (n); 457 m = n; 458 n = n->m_next; 459 len -= m->m_len; 460 } else { 461 if (len > MHLEN) 462 goto bad; 463 MGET(m, M_DONTWAIT, n->m_type); 464 if (m == 0) 465 goto bad; 466 m->m_len = 0; 467 if (n->m_flags & M_PKTHDR) { 468 M_COPY_PKTHDR(m, n); 469 n->m_flags &= ~M_PKTHDR; 470 } 471 } 472 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 473 do { 474 count = min(min(max(len, max_protohdr), space), n->m_len); 475 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 476 (unsigned)count); 477 len -= count; 478 m->m_len += count; 479 n->m_len -= count; 480 space -= count; 481 if (n->m_len) 482 n->m_data += count; 483 else 484 n = m_free(n); 485 } while (len > 0 && n); 486 if (len > 0) { 487 (void) m_free(m); 488 goto bad; 489 } 490 m->m_next = n; 491 return (m); 492 bad: 493 m_freem(n); 494 MPFail++; 495 return (0); 496 } 497 498 /* 499 * Partition an mbuf chain in two pieces, returning the tail -- 500 * all but the first len0 bytes. In case of failure, it returns NULL and 501 * attempts to restore the chain to its original state. 502 */ 503 struct mbuf * 504 m_split(m0, len0, wait) 505 register struct mbuf *m0; 506 int len0, wait; 507 { 508 register struct mbuf *m, *n; 509 unsigned len = len0, remain; 510 511 for (m = m0; m && len > m->m_len; m = m->m_next) 512 len -= m->m_len; 513 if (m == 0) 514 return (0); 515 remain = m->m_len - len; 516 if (m0->m_flags & M_PKTHDR) { 517 MGETHDR(n, wait, m0->m_type); 518 if (n == 0) 519 return (0); 520 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 521 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 522 m0->m_pkthdr.len = len0; 523 if (m->m_flags & M_EXT) 524 goto extpacket; 525 if (remain > MHLEN) { 526 /* m can't be the lead packet */ 527 MH_ALIGN(n, 0); 528 n->m_next = m_split(m, len, wait); 529 if (n->m_next == 0) { 530 (void) m_free(n); 531 return (0); 532 } else 533 return (n); 534 } else 535 MH_ALIGN(n, remain); 536 } else if (remain == 0) { 537 n = m->m_next; 538 m->m_next = 0; 539 return (n); 540 } else { 541 MGET(n, wait, m->m_type); 542 if (n == 0) 543 return (0); 544 M_ALIGN(n, remain); 545 } 546 extpacket: 547 if (m->m_flags & M_EXT) { 548 n->m_flags |= M_EXT; 549 n->m_ext = m->m_ext; 550 mclrefcnt[mtocl(m->m_ext.ext_buf)]++; 551 m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */ 552 n->m_data = m->m_data + len; 553 } else { 554 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 555 } 556 n->m_len = remain; 557 m->m_len = len; 558 n->m_next = m->m_next; 559 m->m_next = 0; 560 return (n); 561 } 562 /* 563 * Routine to copy from device local memory into mbufs. 564 */ 565 struct mbuf * 566 m_devget(buf, totlen, off0, ifp, copy) 567 char *buf; 568 int totlen, off0; 569 struct ifnet *ifp; 570 void (*copy)(); 571 { 572 register struct mbuf *m; 573 struct mbuf *top = 0, **mp = ⊤ 574 register int off = off0, len; 575 register char *cp; 576 char *epkt; 577 578 cp = buf; 579 epkt = cp + totlen; 580 if (off) { 581 cp += off + 2 * sizeof(u_short); 582 totlen -= 2 * sizeof(u_short); 583 } 584 MGETHDR(m, M_DONTWAIT, MT_DATA); 585 if (m == 0) 586 return (0); 587 m->m_pkthdr.rcvif = ifp; 588 m->m_pkthdr.len = totlen; 589 m->m_len = MHLEN; 590 591 while (totlen > 0) { 592 if (top) { 593 MGET(m, M_DONTWAIT, MT_DATA); 594 if (m == 0) { 595 m_freem(top); 596 return (0); 597 } 598 m->m_len = MLEN; 599 } 600 len = min(totlen, epkt - cp); 601 if (len >= MINCLSIZE) { 602 MCLGET(m, M_DONTWAIT); 603 if (m->m_flags & M_EXT) 604 m->m_len = len = min(len, MCLBYTES); 605 else 606 len = m->m_len; 607 } else { 608 /* 609 * Place initial small packet/header at end of mbuf. 610 */ 611 if (len < m->m_len) { 612 if (top == 0 && len + max_linkhdr <= m->m_len) 613 m->m_data += max_linkhdr; 614 m->m_len = len; 615 } else 616 len = m->m_len; 617 } 618 if (copy) 619 copy(cp, mtod(m, caddr_t), (unsigned)len); 620 else 621 bcopy(cp, mtod(m, caddr_t), (unsigned)len); 622 cp += len; 623 *mp = m; 624 mp = &m->m_next; 625 totlen -= len; 626 if (cp == epkt) 627 cp = buf; 628 } 629 return (top); 630 } 631