1 /* 2 * Copyright (c) 1982, 1986, 1988, 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * %sccs.include.redist.c% 6 * 7 * @(#)uipc_mbuf.c 7.21 (Berkeley) 12/19/91 8 */ 9 10 #include "param.h" 11 #include "proc.h" 12 #include "malloc.h" 13 #include "map.h" 14 #define MBTYPES 15 #include "mbuf.h" 16 #include "kernel.h" 17 #include "syslog.h" 18 #include "domain.h" 19 #include "protosw.h" 20 #include "vm/vm.h" 21 22 extern vm_map_t mb_map; 23 struct mbuf *mbutl; 24 char *mclrefcnt; 25 26 mbinit() 27 { 28 int s; 29 30 #if CLBYTES < 4096 31 #define NCL_INIT (4096/CLBYTES) 32 #else 33 #define NCL_INIT 1 34 #endif 35 s = splimp(); 36 if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0) 37 goto bad; 38 splx(s); 39 return; 40 bad: 41 panic("mbinit"); 42 } 43 44 /* 45 * Allocate some number of mbuf clusters 46 * and place on cluster free list. 47 * Must be called at splimp. 48 */ 49 /* ARGSUSED */ 50 m_clalloc(ncl, nowait) 51 register int ncl; 52 int nowait; 53 { 54 int npg, mbx; 55 register caddr_t p; 56 register int i; 57 static int logged; 58 59 npg = ncl * CLSIZE; 60 p = (caddr_t)kmem_malloc(mb_map, ctob(npg), !nowait); 61 if (p == NULL) { 62 if (logged == 0) { 63 logged++; 64 log(LOG_ERR, "mb_map full\n"); 65 } 66 return (0); 67 } 68 ncl = ncl * CLBYTES / MCLBYTES; 69 for (i = 0; i < ncl; i++) { 70 ((union mcluster *)p)->mcl_next = mclfree; 71 mclfree = (union mcluster *)p; 72 p += MCLBYTES; 73 mbstat.m_clfree++; 74 } 75 mbstat.m_clusters += ncl; 76 return (1); 77 } 78 79 /* 80 * When MGET failes, ask protocols to free space when short of memory, 81 * then re-attempt to allocate an mbuf. 82 */ 83 struct mbuf * 84 m_retry(i, t) 85 int i, t; 86 { 87 register struct mbuf *m; 88 89 m_reclaim(); 90 #define m_retry(i, t) (struct mbuf *)0 91 MGET(m, i, t); 92 #undef m_retry 93 return (m); 94 } 95 96 /* 97 * As above; retry an MGETHDR. 98 */ 99 struct mbuf * 100 m_retryhdr(i, t) 101 int i, t; 102 { 103 register struct mbuf *m; 104 105 m_reclaim(); 106 #define m_retryhdr(i, t) (struct mbuf *)0 107 MGETHDR(m, i, t); 108 #undef m_retryhdr 109 return (m); 110 } 111 112 m_reclaim() 113 { 114 register struct domain *dp; 115 register struct protosw *pr; 116 int s = splimp(); 117 118 for (dp = domains; dp; dp = dp->dom_next) 119 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 120 if (pr->pr_drain) 121 (*pr->pr_drain)(); 122 splx(s); 123 mbstat.m_drain++; 124 } 125 126 /* 127 * Space allocation routines. 128 * These are also available as macros 129 * for critical paths. 130 */ 131 struct mbuf * 132 m_get(nowait, type) 133 int nowait, type; 134 { 135 register struct mbuf *m; 136 137 MGET(m, nowait, type); 138 return (m); 139 } 140 141 struct mbuf * 142 m_gethdr(nowait, type) 143 int nowait, type; 144 { 145 register struct mbuf *m; 146 147 MGETHDR(m, nowait, type); 148 return (m); 149 } 150 151 struct mbuf * 152 m_getclr(nowait, type) 153 int nowait, type; 154 { 155 register struct mbuf *m; 156 157 MGET(m, nowait, type); 158 if (m == 0) 159 return (0); 160 bzero(mtod(m, caddr_t), MLEN); 161 return (m); 162 } 163 164 struct mbuf * 165 m_free(m) 166 struct mbuf *m; 167 { 168 register struct mbuf *n; 169 170 MFREE(m, n); 171 return (n); 172 } 173 174 m_freem(m) 175 register struct mbuf *m; 176 { 177 register struct mbuf *n; 178 179 if (m == NULL) 180 return; 181 do { 182 MFREE(m, n); 183 } while (m = n); 184 } 185 186 /* 187 * Mbuffer utility routines. 188 */ 189 190 /* 191 * Lesser-used path for M_PREPEND: 192 * allocate new mbuf to prepend to chain, 193 * copy junk along. 194 */ 195 struct mbuf * 196 m_prepend(m, len, how) 197 register struct mbuf *m; 198 int len, how; 199 { 200 struct mbuf *mn; 201 202 MGET(mn, how, m->m_type); 203 if (mn == (struct mbuf *)NULL) { 204 m_freem(m); 205 return ((struct mbuf *)NULL); 206 } 207 if (m->m_flags & M_PKTHDR) { 208 M_COPY_PKTHDR(mn, m); 209 m->m_flags &= ~M_PKTHDR; 210 } 211 mn->m_next = m; 212 m = mn; 213 if (len < MHLEN) 214 MH_ALIGN(m, len); 215 m->m_len = len; 216 return (m); 217 } 218 219 /* 220 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 221 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 222 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller. 223 */ 224 int MCFail; 225 226 struct mbuf * 227 m_copym(m, off0, len, wait) 228 register struct mbuf *m; 229 int off0, wait; 230 register int len; 231 { 232 register struct mbuf *n, **np; 233 register int off = off0; 234 struct mbuf *top; 235 int copyhdr = 0; 236 237 if (off < 0 || len < 0) 238 panic("m_copym"); 239 if (off == 0 && m->m_flags & M_PKTHDR) 240 copyhdr = 1; 241 while (off > 0) { 242 if (m == 0) 243 panic("m_copym"); 244 if (off < m->m_len) 245 break; 246 off -= m->m_len; 247 m = m->m_next; 248 } 249 np = ⊤ 250 top = 0; 251 while (len > 0) { 252 if (m == 0) { 253 if (len != M_COPYALL) 254 panic("m_copym"); 255 break; 256 } 257 MGET(n, wait, m->m_type); 258 *np = n; 259 if (n == 0) 260 goto nospace; 261 if (copyhdr) { 262 M_COPY_PKTHDR(n, m); 263 if (len == M_COPYALL) 264 n->m_pkthdr.len -= off0; 265 else 266 n->m_pkthdr.len = len; 267 copyhdr = 0; 268 } 269 n->m_len = MIN(len, m->m_len - off); 270 if (m->m_flags & M_EXT) { 271 n->m_data = m->m_data + off; 272 mclrefcnt[mtocl(m->m_ext.ext_buf)]++; 273 n->m_ext = m->m_ext; 274 n->m_flags |= M_EXT; 275 } else 276 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 277 (unsigned)n->m_len); 278 if (len != M_COPYALL) 279 len -= n->m_len; 280 off = 0; 281 m = m->m_next; 282 np = &n->m_next; 283 } 284 if (top == 0) 285 MCFail++; 286 return (top); 287 nospace: 288 m_freem(top); 289 MCFail++; 290 return (0); 291 } 292 293 /* 294 * Copy data from an mbuf chain starting "off" bytes from the beginning, 295 * continuing for "len" bytes, into the indicated buffer. 296 */ 297 m_copydata(m, off, len, cp) 298 register struct mbuf *m; 299 register int off; 300 register int len; 301 caddr_t cp; 302 { 303 register unsigned count; 304 305 if (off < 0 || len < 0) 306 panic("m_copydata"); 307 while (off > 0) { 308 if (m == 0) 309 panic("m_copydata"); 310 if (off < m->m_len) 311 break; 312 off -= m->m_len; 313 m = m->m_next; 314 } 315 while (len > 0) { 316 if (m == 0) 317 panic("m_copydata"); 318 count = MIN(m->m_len - off, len); 319 bcopy(mtod(m, caddr_t) + off, cp, count); 320 len -= count; 321 cp += count; 322 off = 0; 323 m = m->m_next; 324 } 325 } 326 327 /* 328 * Concatenate mbuf chain n to m. 329 * Both chains must be of the same type (e.g. MT_DATA). 330 * Any m_pkthdr is not updated. 331 */ 332 m_cat(m, n) 333 register struct mbuf *m, *n; 334 { 335 while (m->m_next) 336 m = m->m_next; 337 while (n) { 338 if (m->m_flags & M_EXT || 339 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 340 /* just join the two chains */ 341 m->m_next = n; 342 return; 343 } 344 /* splat the data from one into the other */ 345 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 346 (u_int)n->m_len); 347 m->m_len += n->m_len; 348 n = m_free(n); 349 } 350 } 351 352 m_adj(mp, req_len) 353 struct mbuf *mp; 354 { 355 register int len = req_len; 356 register struct mbuf *m; 357 register count; 358 359 if ((m = mp) == NULL) 360 return; 361 if (len >= 0) { 362 /* 363 * Trim from head. 364 */ 365 while (m != NULL && len > 0) { 366 if (m->m_len <= len) { 367 len -= m->m_len; 368 m->m_len = 0; 369 m = m->m_next; 370 } else { 371 m->m_len -= len; 372 m->m_data += len; 373 len = 0; 374 } 375 } 376 m = mp; 377 if (mp->m_flags & M_PKTHDR) 378 m->m_pkthdr.len -= (req_len - len); 379 } else { 380 /* 381 * Trim from tail. Scan the mbuf chain, 382 * calculating its length and finding the last mbuf. 383 * If the adjustment only affects this mbuf, then just 384 * adjust and return. Otherwise, rescan and truncate 385 * after the remaining size. 386 */ 387 len = -len; 388 count = 0; 389 for (;;) { 390 count += m->m_len; 391 if (m->m_next == (struct mbuf *)0) 392 break; 393 m = m->m_next; 394 } 395 if (m->m_len >= len) { 396 m->m_len -= len; 397 if ((mp = m)->m_flags & M_PKTHDR) 398 m->m_pkthdr.len -= len; 399 return; 400 } 401 count -= len; 402 if (count < 0) 403 count = 0; 404 /* 405 * Correct length for chain is "count". 406 * Find the mbuf with last data, adjust its length, 407 * and toss data from remaining mbufs on chain. 408 */ 409 m = mp; 410 if (m->m_flags & M_PKTHDR) 411 m->m_pkthdr.len = count; 412 for (; m; m = m->m_next) { 413 if (m->m_len >= count) { 414 m->m_len = count; 415 break; 416 } 417 count -= m->m_len; 418 } 419 while (m = m->m_next) 420 m->m_len = 0; 421 } 422 } 423 424 /* 425 * Rearange an mbuf chain so that len bytes are contiguous 426 * and in the data area of an mbuf (so that mtod and dtom 427 * will work for a structure of size len). Returns the resulting 428 * mbuf chain on success, frees it and returns null on failure. 429 * If there is room, it will add up to max_protohdr-len extra bytes to the 430 * contiguous region in an attempt to avoid being called next time. 431 */ 432 int MPFail; 433 434 struct mbuf * 435 m_pullup(n, len) 436 register struct mbuf *n; 437 int len; 438 { 439 register struct mbuf *m; 440 register int count; 441 int space; 442 443 /* 444 * If first mbuf has no cluster, and has room for len bytes 445 * without shifting current data, pullup into it, 446 * otherwise allocate a new mbuf to prepend to the chain. 447 */ 448 if ((n->m_flags & M_EXT) == 0 && 449 n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 450 if (n->m_len >= len) 451 return (n); 452 m = n; 453 n = n->m_next; 454 len -= m->m_len; 455 } else { 456 if (len > MHLEN) 457 goto bad; 458 MGET(m, M_DONTWAIT, n->m_type); 459 if (m == 0) 460 goto bad; 461 m->m_len = 0; 462 if (n->m_flags & M_PKTHDR) { 463 M_COPY_PKTHDR(m, n); 464 n->m_flags &= ~M_PKTHDR; 465 } 466 } 467 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 468 do { 469 count = min(min(max(len, max_protohdr), space), n->m_len); 470 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 471 (unsigned)count); 472 len -= count; 473 m->m_len += count; 474 n->m_len -= count; 475 space -= count; 476 if (n->m_len) 477 n->m_data += count; 478 else 479 n = m_free(n); 480 } while (len > 0 && n); 481 if (len > 0) { 482 (void) m_free(m); 483 goto bad; 484 } 485 m->m_next = n; 486 return (m); 487 bad: 488 m_freem(n); 489 MPFail++; 490 return (0); 491 } 492 493 /* 494 * Partition an mbuf chain in two pieces, returning the tail -- 495 * all but the first len0 bytes. In case of failure, it returns NULL and 496 * attempts to restore the chain to its original state. 497 */ 498 struct mbuf * 499 m_split(m0, len0, wait) 500 register struct mbuf *m0; 501 int len0; 502 { 503 register struct mbuf *m, *n; 504 unsigned len = len0, remain; 505 506 for (m = m0; m && len > m->m_len; m = m->m_next) 507 len -= m->m_len; 508 if (m == 0) 509 return (0); 510 remain = m->m_len - len; 511 if (m0->m_flags & M_PKTHDR) { 512 MGETHDR(n, wait, m0->m_type); 513 if (n == 0) 514 return (0); 515 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 516 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 517 m0->m_pkthdr.len = len0; 518 if (m->m_flags & M_EXT) 519 goto extpacket; 520 if (remain > MHLEN) { 521 /* m can't be the lead packet */ 522 MH_ALIGN(n, 0); 523 n->m_next = m_split(m, len, wait); 524 if (n->m_next == 0) { 525 (void) m_free(n); 526 return (0); 527 } else 528 return (n); 529 } else 530 MH_ALIGN(n, remain); 531 } else if (remain == 0) { 532 n = m->m_next; 533 m->m_next = 0; 534 return (n); 535 } else { 536 MGET(n, wait, m->m_type); 537 if (n == 0) 538 return (0); 539 M_ALIGN(n, remain); 540 } 541 extpacket: 542 if (m->m_flags & M_EXT) { 543 n->m_flags |= M_EXT; 544 n->m_ext = m->m_ext; 545 mclrefcnt[mtocl(m->m_ext.ext_buf)]++; 546 m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */ 547 n->m_data = m->m_data + len; 548 } else { 549 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 550 } 551 n->m_len = remain; 552 m->m_len = len; 553 n->m_next = m->m_next; 554 m->m_next = 0; 555 return (n); 556 } 557 /* 558 * Routine to copy from device local memory into mbufs. 559 */ 560 struct mbuf * 561 m_devget(buf, totlen, off0, ifp, copy) 562 char *buf; 563 int totlen, off0; 564 struct ifnet *ifp; 565 void (*copy)(); 566 { 567 register struct mbuf *m; 568 struct mbuf *top = 0, **mp = ⊤ 569 register int off = off0, len; 570 register char *cp; 571 char *epkt; 572 573 cp = buf; 574 epkt = cp + totlen; 575 if (off) { 576 cp += off + 2 * sizeof(u_short); 577 totlen -= 2 * sizeof(u_short); 578 } 579 MGETHDR(m, M_DONTWAIT, MT_DATA); 580 if (m == 0) 581 return (0); 582 m->m_pkthdr.rcvif = ifp; 583 m->m_pkthdr.len = totlen; 584 m->m_len = MHLEN; 585 586 while (totlen > 0) { 587 if (top) { 588 MGET(m, M_DONTWAIT, MT_DATA); 589 if (m == 0) { 590 m_freem(top); 591 return (0); 592 } 593 m->m_len = MLEN; 594 } 595 len = min(totlen, epkt - cp); 596 if (len >= MINCLSIZE) { 597 MCLGET(m, M_DONTWAIT); 598 if (m->m_flags & M_EXT) 599 m->m_len = len = min(len, MCLBYTES); 600 else 601 len = m->m_len; 602 } else { 603 /* 604 * Place initial small packet/header at end of mbuf. 605 */ 606 if (len < m->m_len) { 607 if (top == 0 && len + max_linkhdr <= m->m_len) 608 m->m_data += max_linkhdr; 609 m->m_len = len; 610 } else 611 len = m->m_len; 612 } 613 if (copy) 614 copy(cp, mtod(m, caddr_t), (unsigned)len); 615 else 616 bcopy(cp, mtod(m, caddr_t), (unsigned)len); 617 cp += len; 618 *mp = m; 619 mp = &m->m_next; 620 totlen -= len; 621 if (cp == epkt) 622 cp = buf; 623 } 624 return (top); 625 } 626