1 /* 2 * Copyright (c) 1982, 1986, 1988, 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * %sccs.include.redist.c% 6 * 7 * @(#)uipc_mbuf.c 7.22 (Berkeley) 02/05/92 8 */ 9 10 #include "param.h" 11 #include "proc.h" 12 #include "malloc.h" 13 #include "map.h" 14 #define MBTYPES 15 #include "mbuf.h" 16 #include "kernel.h" 17 #include "syslog.h" 18 #include "domain.h" 19 #include "protosw.h" 20 #include "vm/vm.h" 21 22 extern vm_map_t mb_map; 23 struct mbuf *mbutl; 24 char *mclrefcnt; 25 26 mbinit() 27 { 28 int s; 29 30 #if CLBYTES < 4096 31 #define NCL_INIT (4096/CLBYTES) 32 #else 33 #define NCL_INIT 1 34 #endif 35 s = splimp(); 36 if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0) 37 goto bad; 38 splx(s); 39 return; 40 bad: 41 panic("mbinit"); 42 } 43 44 /* 45 * Allocate some number of mbuf clusters 46 * and place on cluster free list. 47 * Must be called at splimp. 48 */ 49 /* ARGSUSED */ 50 m_clalloc(ncl, nowait) 51 register int ncl; 52 int nowait; 53 { 54 int npg, mbx; 55 register caddr_t p; 56 register int i; 57 static int logged; 58 59 npg = ncl * CLSIZE; 60 p = (caddr_t)kmem_malloc(mb_map, ctob(npg), !nowait); 61 if (p == NULL) { 62 if (logged == 0) { 63 logged++; 64 log(LOG_ERR, "mb_map full\n"); 65 } 66 return (0); 67 } 68 ncl = ncl * CLBYTES / MCLBYTES; 69 for (i = 0; i < ncl; i++) { 70 ((union mcluster *)p)->mcl_next = mclfree; 71 mclfree = (union mcluster *)p; 72 p += MCLBYTES; 73 mbstat.m_clfree++; 74 } 75 mbstat.m_clusters += ncl; 76 return (1); 77 } 78 79 /* 80 * When MGET failes, ask protocols to free space when short of memory, 81 * then re-attempt to allocate an mbuf. 82 */ 83 struct mbuf * 84 m_retry(i, t) 85 int i, t; 86 { 87 register struct mbuf *m; 88 89 m_reclaim(); 90 #define m_retry(i, t) (struct mbuf *)0 91 MGET(m, i, t); 92 #undef m_retry 93 return (m); 94 } 95 96 /* 97 * As above; retry an MGETHDR. 98 */ 99 struct mbuf * 100 m_retryhdr(i, t) 101 int i, t; 102 { 103 register struct mbuf *m; 104 105 m_reclaim(); 106 #define m_retryhdr(i, t) (struct mbuf *)0 107 MGETHDR(m, i, t); 108 #undef m_retryhdr 109 return (m); 110 } 111 112 m_reclaim() 113 { 114 register struct domain *dp; 115 register struct protosw *pr; 116 int s = splimp(); 117 118 for (dp = domains; dp; dp = dp->dom_next) 119 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 120 if (pr->pr_drain) 121 (*pr->pr_drain)(); 122 splx(s); 123 mbstat.m_drain++; 124 } 125 126 /* 127 * Space allocation routines. 128 * These are also available as macros 129 * for critical paths. 130 */ 131 struct mbuf * 132 m_get(nowait, type) 133 int nowait, type; 134 { 135 register struct mbuf *m; 136 137 MGET(m, nowait, type); 138 return (m); 139 } 140 141 struct mbuf * 142 m_gethdr(nowait, type) 143 int nowait, type; 144 { 145 register struct mbuf *m; 146 147 MGETHDR(m, nowait, type); 148 return (m); 149 } 150 151 struct mbuf * 152 m_getclr(nowait, type) 153 int nowait, type; 154 { 155 register struct mbuf *m; 156 157 MGET(m, nowait, type); 158 if (m == 0) 159 return (0); 160 bzero(mtod(m, caddr_t), MLEN); 161 return (m); 162 } 163 164 struct mbuf * 165 m_free(m) 166 struct mbuf *m; 167 { 168 register struct mbuf *n; 169 170 MFREE(m, n); 171 return (n); 172 } 173 174 void 175 m_freem(m) 176 register struct mbuf *m; 177 { 178 register struct mbuf *n; 179 180 if (m == NULL) 181 return; 182 do { 183 MFREE(m, n); 184 } while (m = n); 185 } 186 187 /* 188 * Mbuffer utility routines. 189 */ 190 191 /* 192 * Lesser-used path for M_PREPEND: 193 * allocate new mbuf to prepend to chain, 194 * copy junk along. 195 */ 196 struct mbuf * 197 m_prepend(m, len, how) 198 register struct mbuf *m; 199 int len, how; 200 { 201 struct mbuf *mn; 202 203 MGET(mn, how, m->m_type); 204 if (mn == (struct mbuf *)NULL) { 205 m_freem(m); 206 return ((struct mbuf *)NULL); 207 } 208 if (m->m_flags & M_PKTHDR) { 209 M_COPY_PKTHDR(mn, m); 210 m->m_flags &= ~M_PKTHDR; 211 } 212 mn->m_next = m; 213 m = mn; 214 if (len < MHLEN) 215 MH_ALIGN(m, len); 216 m->m_len = len; 217 return (m); 218 } 219 220 /* 221 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 222 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 223 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller. 224 */ 225 int MCFail; 226 227 struct mbuf * 228 m_copym(m, off0, len, wait) 229 register struct mbuf *m; 230 int off0, wait; 231 register int len; 232 { 233 register struct mbuf *n, **np; 234 register int off = off0; 235 struct mbuf *top; 236 int copyhdr = 0; 237 238 if (off < 0 || len < 0) 239 panic("m_copym"); 240 if (off == 0 && m->m_flags & M_PKTHDR) 241 copyhdr = 1; 242 while (off > 0) { 243 if (m == 0) 244 panic("m_copym"); 245 if (off < m->m_len) 246 break; 247 off -= m->m_len; 248 m = m->m_next; 249 } 250 np = ⊤ 251 top = 0; 252 while (len > 0) { 253 if (m == 0) { 254 if (len != M_COPYALL) 255 panic("m_copym"); 256 break; 257 } 258 MGET(n, wait, m->m_type); 259 *np = n; 260 if (n == 0) 261 goto nospace; 262 if (copyhdr) { 263 M_COPY_PKTHDR(n, m); 264 if (len == M_COPYALL) 265 n->m_pkthdr.len -= off0; 266 else 267 n->m_pkthdr.len = len; 268 copyhdr = 0; 269 } 270 n->m_len = MIN(len, m->m_len - off); 271 if (m->m_flags & M_EXT) { 272 n->m_data = m->m_data + off; 273 mclrefcnt[mtocl(m->m_ext.ext_buf)]++; 274 n->m_ext = m->m_ext; 275 n->m_flags |= M_EXT; 276 } else 277 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 278 (unsigned)n->m_len); 279 if (len != M_COPYALL) 280 len -= n->m_len; 281 off = 0; 282 m = m->m_next; 283 np = &n->m_next; 284 } 285 if (top == 0) 286 MCFail++; 287 return (top); 288 nospace: 289 m_freem(top); 290 MCFail++; 291 return (0); 292 } 293 294 /* 295 * Copy data from an mbuf chain starting "off" bytes from the beginning, 296 * continuing for "len" bytes, into the indicated buffer. 297 */ 298 m_copydata(m, off, len, cp) 299 register struct mbuf *m; 300 register int off; 301 register int len; 302 caddr_t cp; 303 { 304 register unsigned count; 305 306 if (off < 0 || len < 0) 307 panic("m_copydata"); 308 while (off > 0) { 309 if (m == 0) 310 panic("m_copydata"); 311 if (off < m->m_len) 312 break; 313 off -= m->m_len; 314 m = m->m_next; 315 } 316 while (len > 0) { 317 if (m == 0) 318 panic("m_copydata"); 319 count = MIN(m->m_len - off, len); 320 bcopy(mtod(m, caddr_t) + off, cp, count); 321 len -= count; 322 cp += count; 323 off = 0; 324 m = m->m_next; 325 } 326 } 327 328 /* 329 * Concatenate mbuf chain n to m. 330 * Both chains must be of the same type (e.g. MT_DATA). 331 * Any m_pkthdr is not updated. 332 */ 333 m_cat(m, n) 334 register struct mbuf *m, *n; 335 { 336 while (m->m_next) 337 m = m->m_next; 338 while (n) { 339 if (m->m_flags & M_EXT || 340 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 341 /* just join the two chains */ 342 m->m_next = n; 343 return; 344 } 345 /* splat the data from one into the other */ 346 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 347 (u_int)n->m_len); 348 m->m_len += n->m_len; 349 n = m_free(n); 350 } 351 } 352 353 m_adj(mp, req_len) 354 struct mbuf *mp; 355 int req_len; 356 { 357 register int len = req_len; 358 register struct mbuf *m; 359 register count; 360 361 if ((m = mp) == NULL) 362 return; 363 if (len >= 0) { 364 /* 365 * Trim from head. 366 */ 367 while (m != NULL && len > 0) { 368 if (m->m_len <= len) { 369 len -= m->m_len; 370 m->m_len = 0; 371 m = m->m_next; 372 } else { 373 m->m_len -= len; 374 m->m_data += len; 375 len = 0; 376 } 377 } 378 m = mp; 379 if (mp->m_flags & M_PKTHDR) 380 m->m_pkthdr.len -= (req_len - len); 381 } else { 382 /* 383 * Trim from tail. Scan the mbuf chain, 384 * calculating its length and finding the last mbuf. 385 * If the adjustment only affects this mbuf, then just 386 * adjust and return. Otherwise, rescan and truncate 387 * after the remaining size. 388 */ 389 len = -len; 390 count = 0; 391 for (;;) { 392 count += m->m_len; 393 if (m->m_next == (struct mbuf *)0) 394 break; 395 m = m->m_next; 396 } 397 if (m->m_len >= len) { 398 m->m_len -= len; 399 if ((mp = m)->m_flags & M_PKTHDR) 400 m->m_pkthdr.len -= len; 401 return; 402 } 403 count -= len; 404 if (count < 0) 405 count = 0; 406 /* 407 * Correct length for chain is "count". 408 * Find the mbuf with last data, adjust its length, 409 * and toss data from remaining mbufs on chain. 410 */ 411 m = mp; 412 if (m->m_flags & M_PKTHDR) 413 m->m_pkthdr.len = count; 414 for (; m; m = m->m_next) { 415 if (m->m_len >= count) { 416 m->m_len = count; 417 break; 418 } 419 count -= m->m_len; 420 } 421 while (m = m->m_next) 422 m->m_len = 0; 423 } 424 } 425 426 /* 427 * Rearange an mbuf chain so that len bytes are contiguous 428 * and in the data area of an mbuf (so that mtod and dtom 429 * will work for a structure of size len). Returns the resulting 430 * mbuf chain on success, frees it and returns null on failure. 431 * If there is room, it will add up to max_protohdr-len extra bytes to the 432 * contiguous region in an attempt to avoid being called next time. 433 */ 434 int MPFail; 435 436 struct mbuf * 437 m_pullup(n, len) 438 register struct mbuf *n; 439 int len; 440 { 441 register struct mbuf *m; 442 register int count; 443 int space; 444 445 /* 446 * If first mbuf has no cluster, and has room for len bytes 447 * without shifting current data, pullup into it, 448 * otherwise allocate a new mbuf to prepend to the chain. 449 */ 450 if ((n->m_flags & M_EXT) == 0 && 451 n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 452 if (n->m_len >= len) 453 return (n); 454 m = n; 455 n = n->m_next; 456 len -= m->m_len; 457 } else { 458 if (len > MHLEN) 459 goto bad; 460 MGET(m, M_DONTWAIT, n->m_type); 461 if (m == 0) 462 goto bad; 463 m->m_len = 0; 464 if (n->m_flags & M_PKTHDR) { 465 M_COPY_PKTHDR(m, n); 466 n->m_flags &= ~M_PKTHDR; 467 } 468 } 469 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 470 do { 471 count = min(min(max(len, max_protohdr), space), n->m_len); 472 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 473 (unsigned)count); 474 len -= count; 475 m->m_len += count; 476 n->m_len -= count; 477 space -= count; 478 if (n->m_len) 479 n->m_data += count; 480 else 481 n = m_free(n); 482 } while (len > 0 && n); 483 if (len > 0) { 484 (void) m_free(m); 485 goto bad; 486 } 487 m->m_next = n; 488 return (m); 489 bad: 490 m_freem(n); 491 MPFail++; 492 return (0); 493 } 494 495 /* 496 * Partition an mbuf chain in two pieces, returning the tail -- 497 * all but the first len0 bytes. In case of failure, it returns NULL and 498 * attempts to restore the chain to its original state. 499 */ 500 struct mbuf * 501 m_split(m0, len0, wait) 502 register struct mbuf *m0; 503 int len0, wait; 504 { 505 register struct mbuf *m, *n; 506 unsigned len = len0, remain; 507 508 for (m = m0; m && len > m->m_len; m = m->m_next) 509 len -= m->m_len; 510 if (m == 0) 511 return (0); 512 remain = m->m_len - len; 513 if (m0->m_flags & M_PKTHDR) { 514 MGETHDR(n, wait, m0->m_type); 515 if (n == 0) 516 return (0); 517 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 518 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 519 m0->m_pkthdr.len = len0; 520 if (m->m_flags & M_EXT) 521 goto extpacket; 522 if (remain > MHLEN) { 523 /* m can't be the lead packet */ 524 MH_ALIGN(n, 0); 525 n->m_next = m_split(m, len, wait); 526 if (n->m_next == 0) { 527 (void) m_free(n); 528 return (0); 529 } else 530 return (n); 531 } else 532 MH_ALIGN(n, remain); 533 } else if (remain == 0) { 534 n = m->m_next; 535 m->m_next = 0; 536 return (n); 537 } else { 538 MGET(n, wait, m->m_type); 539 if (n == 0) 540 return (0); 541 M_ALIGN(n, remain); 542 } 543 extpacket: 544 if (m->m_flags & M_EXT) { 545 n->m_flags |= M_EXT; 546 n->m_ext = m->m_ext; 547 mclrefcnt[mtocl(m->m_ext.ext_buf)]++; 548 m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */ 549 n->m_data = m->m_data + len; 550 } else { 551 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 552 } 553 n->m_len = remain; 554 m->m_len = len; 555 n->m_next = m->m_next; 556 m->m_next = 0; 557 return (n); 558 } 559 /* 560 * Routine to copy from device local memory into mbufs. 561 */ 562 struct mbuf * 563 m_devget(buf, totlen, off0, ifp, copy) 564 char *buf; 565 int totlen, off0; 566 struct ifnet *ifp; 567 void (*copy)(); 568 { 569 register struct mbuf *m; 570 struct mbuf *top = 0, **mp = ⊤ 571 register int off = off0, len; 572 register char *cp; 573 char *epkt; 574 575 cp = buf; 576 epkt = cp + totlen; 577 if (off) { 578 cp += off + 2 * sizeof(u_short); 579 totlen -= 2 * sizeof(u_short); 580 } 581 MGETHDR(m, M_DONTWAIT, MT_DATA); 582 if (m == 0) 583 return (0); 584 m->m_pkthdr.rcvif = ifp; 585 m->m_pkthdr.len = totlen; 586 m->m_len = MHLEN; 587 588 while (totlen > 0) { 589 if (top) { 590 MGET(m, M_DONTWAIT, MT_DATA); 591 if (m == 0) { 592 m_freem(top); 593 return (0); 594 } 595 m->m_len = MLEN; 596 } 597 len = min(totlen, epkt - cp); 598 if (len >= MINCLSIZE) { 599 MCLGET(m, M_DONTWAIT); 600 if (m->m_flags & M_EXT) 601 m->m_len = len = min(len, MCLBYTES); 602 else 603 len = m->m_len; 604 } else { 605 /* 606 * Place initial small packet/header at end of mbuf. 607 */ 608 if (len < m->m_len) { 609 if (top == 0 && len + max_linkhdr <= m->m_len) 610 m->m_data += max_linkhdr; 611 m->m_len = len; 612 } else 613 len = m->m_len; 614 } 615 if (copy) 616 copy(cp, mtod(m, caddr_t), (unsigned)len); 617 else 618 bcopy(cp, mtod(m, caddr_t), (unsigned)len); 619 cp += len; 620 *mp = m; 621 mp = &m->m_next; 622 totlen -= len; 623 if (cp == epkt) 624 cp = buf; 625 } 626 return (top); 627 } 628