1 /* 2 * Copyright (c) 1982, 1986, 1988, 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * %sccs.include.redist.c% 6 * 7 * @(#)uipc_mbuf.c 7.25 (Berkeley) 10/11/92 8 */ 9 10 #include <sys/param.h> 11 #include <sys/systm.h> 12 #include <sys/proc.h> 13 #include <sys/malloc.h> 14 #include <sys/map.h> 15 #define MBTYPES 16 #include <sys/mbuf.h> 17 #include <sys/kernel.h> 18 #include <sys/syslog.h> 19 #include <sys/domain.h> 20 #include <sys/protosw.h> 21 22 #include <vm/vm.h> 23 24 extern vm_map_t mb_map; 25 struct mbuf *mbutl; 26 char *mclrefcnt; 27 28 mbinit() 29 { 30 int s; 31 32 #if CLBYTES < 4096 33 #define NCL_INIT (4096/CLBYTES) 34 #else 35 #define NCL_INIT 1 36 #endif 37 s = splimp(); 38 if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0) 39 goto bad; 40 splx(s); 41 return; 42 bad: 43 panic("mbinit"); 44 } 45 46 /* 47 * Allocate some number of mbuf clusters 48 * and place on cluster free list. 49 * Must be called at splimp. 50 */ 51 /* ARGSUSED */ 52 m_clalloc(ncl, nowait) 53 register int ncl; 54 int nowait; 55 { 56 int npg, mbx; 57 register caddr_t p; 58 register int i; 59 static int logged; 60 61 npg = ncl * CLSIZE; 62 p = (caddr_t)kmem_malloc(mb_map, ctob(npg), !nowait); 63 if (p == NULL) { 64 if (logged == 0) { 65 logged++; 66 log(LOG_ERR, "mb_map full\n"); 67 } 68 return (0); 69 } 70 ncl = ncl * CLBYTES / MCLBYTES; 71 for (i = 0; i < ncl; i++) { 72 ((union mcluster *)p)->mcl_next = mclfree; 73 mclfree = (union mcluster *)p; 74 p += MCLBYTES; 75 mbstat.m_clfree++; 76 } 77 mbstat.m_clusters += ncl; 78 return (1); 79 } 80 81 /* 82 * When MGET failes, ask protocols to free space when short of memory, 83 * then re-attempt to allocate an mbuf. 84 */ 85 struct mbuf * 86 m_retry(i, t) 87 int i, t; 88 { 89 register struct mbuf *m; 90 91 m_reclaim(); 92 #define m_retry(i, t) (struct mbuf *)0 93 MGET(m, i, t); 94 #undef m_retry 95 return (m); 96 } 97 98 /* 99 * As above; retry an MGETHDR. 100 */ 101 struct mbuf * 102 m_retryhdr(i, t) 103 int i, t; 104 { 105 register struct mbuf *m; 106 107 m_reclaim(); 108 #define m_retryhdr(i, t) (struct mbuf *)0 109 MGETHDR(m, i, t); 110 #undef m_retryhdr 111 return (m); 112 } 113 114 m_reclaim() 115 { 116 register struct domain *dp; 117 register struct protosw *pr; 118 int s = splimp(); 119 120 for (dp = domains; dp; dp = dp->dom_next) 121 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 122 if (pr->pr_drain) 123 (*pr->pr_drain)(); 124 splx(s); 125 mbstat.m_drain++; 126 } 127 128 /* 129 * Space allocation routines. 130 * These are also available as macros 131 * for critical paths. 132 */ 133 struct mbuf * 134 m_get(nowait, type) 135 int nowait, type; 136 { 137 register struct mbuf *m; 138 139 MGET(m, nowait, type); 140 return (m); 141 } 142 143 struct mbuf * 144 m_gethdr(nowait, type) 145 int nowait, type; 146 { 147 register struct mbuf *m; 148 149 MGETHDR(m, nowait, type); 150 return (m); 151 } 152 153 struct mbuf * 154 m_getclr(nowait, type) 155 int nowait, type; 156 { 157 register struct mbuf *m; 158 159 MGET(m, nowait, type); 160 if (m == 0) 161 return (0); 162 bzero(mtod(m, caddr_t), MLEN); 163 return (m); 164 } 165 166 struct mbuf * 167 m_free(m) 168 struct mbuf *m; 169 { 170 register struct mbuf *n; 171 172 MFREE(m, n); 173 return (n); 174 } 175 176 void 177 m_freem(m) 178 register struct mbuf *m; 179 { 180 register struct mbuf *n; 181 182 if (m == NULL) 183 return; 184 do { 185 MFREE(m, n); 186 } while (m = n); 187 } 188 189 /* 190 * Mbuffer utility routines. 191 */ 192 193 /* 194 * Lesser-used path for M_PREPEND: 195 * allocate new mbuf to prepend to chain, 196 * copy junk along. 197 */ 198 struct mbuf * 199 m_prepend(m, len, how) 200 register struct mbuf *m; 201 int len, how; 202 { 203 struct mbuf *mn; 204 205 MGET(mn, how, m->m_type); 206 if (mn == (struct mbuf *)NULL) { 207 m_freem(m); 208 return ((struct mbuf *)NULL); 209 } 210 if (m->m_flags & M_PKTHDR) { 211 M_COPY_PKTHDR(mn, m); 212 m->m_flags &= ~M_PKTHDR; 213 } 214 mn->m_next = m; 215 m = mn; 216 if (len < MHLEN) 217 MH_ALIGN(m, len); 218 m->m_len = len; 219 return (m); 220 } 221 222 /* 223 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 224 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 225 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller. 226 */ 227 int MCFail; 228 229 struct mbuf * 230 m_copym(m, off0, len, wait) 231 register struct mbuf *m; 232 int off0, wait; 233 register int len; 234 { 235 register struct mbuf *n, **np; 236 register int off = off0; 237 struct mbuf *top; 238 int copyhdr = 0; 239 240 if (off < 0 || len < 0) 241 panic("m_copym"); 242 if (off == 0 && m->m_flags & M_PKTHDR) 243 copyhdr = 1; 244 while (off > 0) { 245 if (m == 0) 246 panic("m_copym"); 247 if (off < m->m_len) 248 break; 249 off -= m->m_len; 250 m = m->m_next; 251 } 252 np = ⊤ 253 top = 0; 254 while (len > 0) { 255 if (m == 0) { 256 if (len != M_COPYALL) 257 panic("m_copym"); 258 break; 259 } 260 MGET(n, wait, m->m_type); 261 *np = n; 262 if (n == 0) 263 goto nospace; 264 if (copyhdr) { 265 M_COPY_PKTHDR(n, m); 266 if (len == M_COPYALL) 267 n->m_pkthdr.len -= off0; 268 else 269 n->m_pkthdr.len = len; 270 copyhdr = 0; 271 } 272 n->m_len = min(len, m->m_len - off); 273 if (m->m_flags & M_EXT) { 274 n->m_data = m->m_data + off; 275 mclrefcnt[mtocl(m->m_ext.ext_buf)]++; 276 n->m_ext = m->m_ext; 277 n->m_flags |= M_EXT; 278 } else 279 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 280 (unsigned)n->m_len); 281 if (len != M_COPYALL) 282 len -= n->m_len; 283 off = 0; 284 m = m->m_next; 285 np = &n->m_next; 286 } 287 if (top == 0) 288 MCFail++; 289 return (top); 290 nospace: 291 m_freem(top); 292 MCFail++; 293 return (0); 294 } 295 296 /* 297 * Copy data from an mbuf chain starting "off" bytes from the beginning, 298 * continuing for "len" bytes, into the indicated buffer. 299 */ 300 m_copydata(m, off, len, cp) 301 register struct mbuf *m; 302 register int off; 303 register int len; 304 caddr_t cp; 305 { 306 register unsigned count; 307 308 if (off < 0 || len < 0) 309 panic("m_copydata"); 310 while (off > 0) { 311 if (m == 0) 312 panic("m_copydata"); 313 if (off < m->m_len) 314 break; 315 off -= m->m_len; 316 m = m->m_next; 317 } 318 while (len > 0) { 319 if (m == 0) 320 panic("m_copydata"); 321 count = min(m->m_len - off, len); 322 bcopy(mtod(m, caddr_t) + off, cp, count); 323 len -= count; 324 cp += count; 325 off = 0; 326 m = m->m_next; 327 } 328 } 329 330 /* 331 * Concatenate mbuf chain n to m. 332 * Both chains must be of the same type (e.g. MT_DATA). 333 * Any m_pkthdr is not updated. 334 */ 335 m_cat(m, n) 336 register struct mbuf *m, *n; 337 { 338 while (m->m_next) 339 m = m->m_next; 340 while (n) { 341 if (m->m_flags & M_EXT || 342 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 343 /* just join the two chains */ 344 m->m_next = n; 345 return; 346 } 347 /* splat the data from one into the other */ 348 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 349 (u_int)n->m_len); 350 m->m_len += n->m_len; 351 n = m_free(n); 352 } 353 } 354 355 m_adj(mp, req_len) 356 struct mbuf *mp; 357 int req_len; 358 { 359 register int len = req_len; 360 register struct mbuf *m; 361 register count; 362 363 if ((m = mp) == NULL) 364 return; 365 if (len >= 0) { 366 /* 367 * Trim from head. 368 */ 369 while (m != NULL && len > 0) { 370 if (m->m_len <= len) { 371 len -= m->m_len; 372 m->m_len = 0; 373 m = m->m_next; 374 } else { 375 m->m_len -= len; 376 m->m_data += len; 377 len = 0; 378 } 379 } 380 m = mp; 381 if (mp->m_flags & M_PKTHDR) 382 m->m_pkthdr.len -= (req_len - len); 383 } else { 384 /* 385 * Trim from tail. Scan the mbuf chain, 386 * calculating its length and finding the last mbuf. 387 * If the adjustment only affects this mbuf, then just 388 * adjust and return. Otherwise, rescan and truncate 389 * after the remaining size. 390 */ 391 len = -len; 392 count = 0; 393 for (;;) { 394 count += m->m_len; 395 if (m->m_next == (struct mbuf *)0) 396 break; 397 m = m->m_next; 398 } 399 if (m->m_len >= len) { 400 m->m_len -= len; 401 if ((mp = m)->m_flags & M_PKTHDR) 402 m->m_pkthdr.len -= len; 403 return; 404 } 405 count -= len; 406 if (count < 0) 407 count = 0; 408 /* 409 * Correct length for chain is "count". 410 * Find the mbuf with last data, adjust its length, 411 * and toss data from remaining mbufs on chain. 412 */ 413 m = mp; 414 if (m->m_flags & M_PKTHDR) 415 m->m_pkthdr.len = count; 416 for (; m; m = m->m_next) { 417 if (m->m_len >= count) { 418 m->m_len = count; 419 break; 420 } 421 count -= m->m_len; 422 } 423 while (m = m->m_next) 424 m->m_len = 0; 425 } 426 } 427 428 /* 429 * Rearange an mbuf chain so that len bytes are contiguous 430 * and in the data area of an mbuf (so that mtod and dtom 431 * will work for a structure of size len). Returns the resulting 432 * mbuf chain on success, frees it and returns null on failure. 433 * If there is room, it will add up to max_protohdr-len extra bytes to the 434 * contiguous region in an attempt to avoid being called next time. 435 */ 436 int MPFail; 437 438 struct mbuf * 439 m_pullup(n, len) 440 register struct mbuf *n; 441 int len; 442 { 443 register struct mbuf *m; 444 register int count; 445 int space; 446 447 /* 448 * If first mbuf has no cluster, and has room for len bytes 449 * without shifting current data, pullup into it, 450 * otherwise allocate a new mbuf to prepend to the chain. 451 */ 452 if ((n->m_flags & M_EXT) == 0 && 453 n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 454 if (n->m_len >= len) 455 return (n); 456 m = n; 457 n = n->m_next; 458 len -= m->m_len; 459 } else { 460 if (len > MHLEN) 461 goto bad; 462 MGET(m, M_DONTWAIT, n->m_type); 463 if (m == 0) 464 goto bad; 465 m->m_len = 0; 466 if (n->m_flags & M_PKTHDR) { 467 M_COPY_PKTHDR(m, n); 468 n->m_flags &= ~M_PKTHDR; 469 } 470 } 471 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 472 do { 473 count = min(min(max(len, max_protohdr), space), n->m_len); 474 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 475 (unsigned)count); 476 len -= count; 477 m->m_len += count; 478 n->m_len -= count; 479 space -= count; 480 if (n->m_len) 481 n->m_data += count; 482 else 483 n = m_free(n); 484 } while (len > 0 && n); 485 if (len > 0) { 486 (void) m_free(m); 487 goto bad; 488 } 489 m->m_next = n; 490 return (m); 491 bad: 492 m_freem(n); 493 MPFail++; 494 return (0); 495 } 496 497 /* 498 * Partition an mbuf chain in two pieces, returning the tail -- 499 * all but the first len0 bytes. In case of failure, it returns NULL and 500 * attempts to restore the chain to its original state. 501 */ 502 struct mbuf * 503 m_split(m0, len0, wait) 504 register struct mbuf *m0; 505 int len0, wait; 506 { 507 register struct mbuf *m, *n; 508 unsigned len = len0, remain; 509 510 for (m = m0; m && len > m->m_len; m = m->m_next) 511 len -= m->m_len; 512 if (m == 0) 513 return (0); 514 remain = m->m_len - len; 515 if (m0->m_flags & M_PKTHDR) { 516 MGETHDR(n, wait, m0->m_type); 517 if (n == 0) 518 return (0); 519 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 520 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 521 m0->m_pkthdr.len = len0; 522 if (m->m_flags & M_EXT) 523 goto extpacket; 524 if (remain > MHLEN) { 525 /* m can't be the lead packet */ 526 MH_ALIGN(n, 0); 527 n->m_next = m_split(m, len, wait); 528 if (n->m_next == 0) { 529 (void) m_free(n); 530 return (0); 531 } else 532 return (n); 533 } else 534 MH_ALIGN(n, remain); 535 } else if (remain == 0) { 536 n = m->m_next; 537 m->m_next = 0; 538 return (n); 539 } else { 540 MGET(n, wait, m->m_type); 541 if (n == 0) 542 return (0); 543 M_ALIGN(n, remain); 544 } 545 extpacket: 546 if (m->m_flags & M_EXT) { 547 n->m_flags |= M_EXT; 548 n->m_ext = m->m_ext; 549 mclrefcnt[mtocl(m->m_ext.ext_buf)]++; 550 m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */ 551 n->m_data = m->m_data + len; 552 } else { 553 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 554 } 555 n->m_len = remain; 556 m->m_len = len; 557 n->m_next = m->m_next; 558 m->m_next = 0; 559 return (n); 560 } 561 /* 562 * Routine to copy from device local memory into mbufs. 563 */ 564 struct mbuf * 565 m_devget(buf, totlen, off0, ifp, copy) 566 char *buf; 567 int totlen, off0; 568 struct ifnet *ifp; 569 void (*copy)(); 570 { 571 register struct mbuf *m; 572 struct mbuf *top = 0, **mp = ⊤ 573 register int off = off0, len; 574 register char *cp; 575 char *epkt; 576 577 cp = buf; 578 epkt = cp + totlen; 579 if (off) { 580 cp += off + 2 * sizeof(u_short); 581 totlen -= 2 * sizeof(u_short); 582 } 583 MGETHDR(m, M_DONTWAIT, MT_DATA); 584 if (m == 0) 585 return (0); 586 m->m_pkthdr.rcvif = ifp; 587 m->m_pkthdr.len = totlen; 588 m->m_len = MHLEN; 589 590 while (totlen > 0) { 591 if (top) { 592 MGET(m, M_DONTWAIT, MT_DATA); 593 if (m == 0) { 594 m_freem(top); 595 return (0); 596 } 597 m->m_len = MLEN; 598 } 599 len = min(totlen, epkt - cp); 600 if (len >= MINCLSIZE) { 601 MCLGET(m, M_DONTWAIT); 602 if (m->m_flags & M_EXT) 603 m->m_len = len = min(len, MCLBYTES); 604 else 605 len = m->m_len; 606 } else { 607 /* 608 * Place initial small packet/header at end of mbuf. 609 */ 610 if (len < m->m_len) { 611 if (top == 0 && len + max_linkhdr <= m->m_len) 612 m->m_data += max_linkhdr; 613 m->m_len = len; 614 } else 615 len = m->m_len; 616 } 617 if (copy) 618 copy(cp, mtod(m, caddr_t), (unsigned)len); 619 else 620 bcopy(cp, mtod(m, caddr_t), (unsigned)len); 621 cp += len; 622 *mp = m; 623 mp = &m->m_next; 624 totlen -= len; 625 if (cp == epkt) 626 cp = buf; 627 } 628 return (top); 629 } 630