1 /*
2 * Copyright (c) 1982, 1986, 1988, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * %sccs.include.redist.c%
6 *
7 * @(#)uipc_mbuf.c 8.4 (Berkeley) 02/14/95
8 */
9
10 #include <sys/param.h>
11 #include <sys/systm.h>
12 #include <sys/proc.h>
13 #include <sys/malloc.h>
14 #include <sys/map.h>
15 #define MBTYPES
16 #include <sys/mbuf.h>
17 #include <sys/kernel.h>
18 #include <sys/syslog.h>
19 #include <sys/domain.h>
20 #include <sys/protosw.h>
21
22 #include <vm/vm.h>
23
24 extern vm_map_t mb_map;
25 struct mbuf *mbutl;
26 char *mclrefcnt;
27
28 void
mbinit()29 mbinit()
30 {
31 int s;
32
33 s = splimp();
34 if (m_clalloc(max(4096/CLBYTES, 1), M_DONTWAIT) == 0)
35 goto bad;
36 splx(s);
37 return;
38 bad:
39 panic("mbinit");
40 }
41
42 /*
43 * Allocate some number of mbuf clusters
44 * and place on cluster free list.
45 * Must be called at splimp.
46 */
47 /* ARGSUSED */
48 int
m_clalloc(ncl,nowait)49 m_clalloc(ncl, nowait)
50 register int ncl;
51 int nowait;
52 {
53 static int logged;
54 register caddr_t p;
55 register int i;
56 int npg;
57
58 npg = ncl * CLSIZE;
59 p = (caddr_t)kmem_malloc(mb_map, ctob(npg), !nowait);
60 if (p == NULL) {
61 if (logged == 0) {
62 logged++;
63 log(LOG_ERR, "mb_map full\n");
64 }
65 return (0);
66 }
67 ncl = ncl * CLBYTES / MCLBYTES;
68 for (i = 0; i < ncl; i++) {
69 ((union mcluster *)p)->mcl_next = mclfree;
70 mclfree = (union mcluster *)p;
71 p += MCLBYTES;
72 mbstat.m_clfree++;
73 }
74 mbstat.m_clusters += ncl;
75 return (1);
76 }
77
78 /*
79 * When MGET failes, ask protocols to free space when short of memory,
80 * then re-attempt to allocate an mbuf.
81 */
82 struct mbuf *
m_retry(i,t)83 m_retry(i, t)
84 int i, t;
85 {
86 register struct mbuf *m;
87
88 m_reclaim();
89 #define m_retry(i, t) (struct mbuf *)0
90 MGET(m, i, t);
91 #undef m_retry
92 return (m);
93 }
94
95 /*
96 * As above; retry an MGETHDR.
97 */
98 struct mbuf *
m_retryhdr(i,t)99 m_retryhdr(i, t)
100 int i, t;
101 {
102 register struct mbuf *m;
103
104 m_reclaim();
105 #define m_retryhdr(i, t) (struct mbuf *)0
106 MGETHDR(m, i, t);
107 #undef m_retryhdr
108 return (m);
109 }
110
111 void
m_reclaim()112 m_reclaim()
113 {
114 register struct domain *dp;
115 register struct protosw *pr;
116 int s = splimp();
117
118 for (dp = domains; dp; dp = dp->dom_next)
119 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
120 if (pr->pr_drain)
121 (*pr->pr_drain)();
122 splx(s);
123 mbstat.m_drain++;
124 }
125
126 /*
127 * Space allocation routines.
128 * These are also available as macros
129 * for critical paths.
130 */
131 struct mbuf *
m_get(nowait,type)132 m_get(nowait, type)
133 int nowait, type;
134 {
135 register struct mbuf *m;
136
137 MGET(m, nowait, type);
138 return (m);
139 }
140
141 struct mbuf *
m_gethdr(nowait,type)142 m_gethdr(nowait, type)
143 int nowait, type;
144 {
145 register struct mbuf *m;
146
147 MGETHDR(m, nowait, type);
148 return (m);
149 }
150
151 struct mbuf *
m_getclr(nowait,type)152 m_getclr(nowait, type)
153 int nowait, type;
154 {
155 register struct mbuf *m;
156
157 MGET(m, nowait, type);
158 if (m == 0)
159 return (0);
160 bzero(mtod(m, caddr_t), MLEN);
161 return (m);
162 }
163
164 struct mbuf *
m_free(m)165 m_free(m)
166 struct mbuf *m;
167 {
168 register struct mbuf *n;
169
170 MFREE(m, n);
171 return (n);
172 }
173
174 void
m_freem(m)175 m_freem(m)
176 register struct mbuf *m;
177 {
178 register struct mbuf *n;
179
180 if (m == NULL)
181 return;
182 do {
183 MFREE(m, n);
184 } while (m = n);
185 }
186
187 /*
188 * Mbuffer utility routines.
189 */
190
191 /*
192 * Lesser-used path for M_PREPEND:
193 * allocate new mbuf to prepend to chain,
194 * copy junk along.
195 */
196 struct mbuf *
m_prepend(m,len,how)197 m_prepend(m, len, how)
198 register struct mbuf *m;
199 int len, how;
200 {
201 struct mbuf *mn;
202
203 MGET(mn, how, m->m_type);
204 if (mn == (struct mbuf *)NULL) {
205 m_freem(m);
206 return ((struct mbuf *)NULL);
207 }
208 if (m->m_flags & M_PKTHDR) {
209 M_COPY_PKTHDR(mn, m);
210 m->m_flags &= ~M_PKTHDR;
211 }
212 mn->m_next = m;
213 m = mn;
214 if (len < MHLEN)
215 MH_ALIGN(m, len);
216 m->m_len = len;
217 return (m);
218 }
219
220 /*
221 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
222 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
223 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
224 */
225 int MCFail;
226
227 struct mbuf *
m_copym(m,off0,len,wait)228 m_copym(m, off0, len, wait)
229 register struct mbuf *m;
230 int off0, wait;
231 register int len;
232 {
233 register struct mbuf *n, **np;
234 register int off = off0;
235 struct mbuf *top;
236 int copyhdr = 0;
237
238 if (off < 0 || len < 0)
239 panic("m_copym");
240 if (off == 0 && m->m_flags & M_PKTHDR)
241 copyhdr = 1;
242 while (off > 0) {
243 if (m == 0)
244 panic("m_copym");
245 if (off < m->m_len)
246 break;
247 off -= m->m_len;
248 m = m->m_next;
249 }
250 np = ⊤
251 top = 0;
252 while (len > 0) {
253 if (m == 0) {
254 if (len != M_COPYALL)
255 panic("m_copym");
256 break;
257 }
258 MGET(n, wait, m->m_type);
259 *np = n;
260 if (n == 0)
261 goto nospace;
262 if (copyhdr) {
263 M_COPY_PKTHDR(n, m);
264 if (len == M_COPYALL)
265 n->m_pkthdr.len -= off0;
266 else
267 n->m_pkthdr.len = len;
268 copyhdr = 0;
269 }
270 n->m_len = min(len, m->m_len - off);
271 if (m->m_flags & M_EXT) {
272 n->m_data = m->m_data + off;
273 mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
274 n->m_ext = m->m_ext;
275 n->m_flags |= M_EXT;
276 } else
277 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
278 (unsigned)n->m_len);
279 if (len != M_COPYALL)
280 len -= n->m_len;
281 off = 0;
282 m = m->m_next;
283 np = &n->m_next;
284 }
285 if (top == 0)
286 MCFail++;
287 return (top);
288 nospace:
289 m_freem(top);
290 MCFail++;
291 return (0);
292 }
293
294 /*
295 * Copy data from an mbuf chain starting "off" bytes from the beginning,
296 * continuing for "len" bytes, into the indicated buffer.
297 */
298 void
m_copydata(m,off,len,cp)299 m_copydata(m, off, len, cp)
300 register struct mbuf *m;
301 register int off;
302 register int len;
303 caddr_t cp;
304 {
305 register unsigned count;
306
307 if (off < 0 || len < 0)
308 panic("m_copydata");
309 while (off > 0) {
310 if (m == 0)
311 panic("m_copydata");
312 if (off < m->m_len)
313 break;
314 off -= m->m_len;
315 m = m->m_next;
316 }
317 while (len > 0) {
318 if (m == 0)
319 panic("m_copydata");
320 count = min(m->m_len - off, len);
321 bcopy(mtod(m, caddr_t) + off, cp, count);
322 len -= count;
323 cp += count;
324 off = 0;
325 m = m->m_next;
326 }
327 }
328
329 /*
330 * Concatenate mbuf chain n to m.
331 * Both chains must be of the same type (e.g. MT_DATA).
332 * Any m_pkthdr is not updated.
333 */
334 void
m_cat(m,n)335 m_cat(m, n)
336 register struct mbuf *m, *n;
337 {
338 while (m->m_next)
339 m = m->m_next;
340 while (n) {
341 if (m->m_flags & M_EXT ||
342 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
343 /* just join the two chains */
344 m->m_next = n;
345 return;
346 }
347 /* splat the data from one into the other */
348 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
349 (u_int)n->m_len);
350 m->m_len += n->m_len;
351 n = m_free(n);
352 }
353 }
354
355 void
m_adj(mp,req_len)356 m_adj(mp, req_len)
357 struct mbuf *mp;
358 int req_len;
359 {
360 register int len = req_len;
361 register struct mbuf *m;
362 register count;
363
364 if ((m = mp) == NULL)
365 return;
366 if (len >= 0) {
367 /*
368 * Trim from head.
369 */
370 while (m != NULL && len > 0) {
371 if (m->m_len <= len) {
372 len -= m->m_len;
373 m->m_len = 0;
374 m = m->m_next;
375 } else {
376 m->m_len -= len;
377 m->m_data += len;
378 len = 0;
379 }
380 }
381 m = mp;
382 if (mp->m_flags & M_PKTHDR)
383 m->m_pkthdr.len -= (req_len - len);
384 } else {
385 /*
386 * Trim from tail. Scan the mbuf chain,
387 * calculating its length and finding the last mbuf.
388 * If the adjustment only affects this mbuf, then just
389 * adjust and return. Otherwise, rescan and truncate
390 * after the remaining size.
391 */
392 len = -len;
393 count = 0;
394 for (;;) {
395 count += m->m_len;
396 if (m->m_next == (struct mbuf *)0)
397 break;
398 m = m->m_next;
399 }
400 if (m->m_len >= len) {
401 m->m_len -= len;
402 if (mp->m_flags & M_PKTHDR)
403 mp->m_pkthdr.len -= len;
404 return;
405 }
406 count -= len;
407 if (count < 0)
408 count = 0;
409 /*
410 * Correct length for chain is "count".
411 * Find the mbuf with last data, adjust its length,
412 * and toss data from remaining mbufs on chain.
413 */
414 m = mp;
415 if (m->m_flags & M_PKTHDR)
416 m->m_pkthdr.len = count;
417 for (; m; m = m->m_next) {
418 if (m->m_len >= count) {
419 m->m_len = count;
420 break;
421 }
422 count -= m->m_len;
423 }
424 while (m = m->m_next)
425 m->m_len = 0;
426 }
427 }
428
429 /*
430 * Rearange an mbuf chain so that len bytes are contiguous
431 * and in the data area of an mbuf (so that mtod and dtom
432 * will work for a structure of size len). Returns the resulting
433 * mbuf chain on success, frees it and returns null on failure.
434 * If there is room, it will add up to max_protohdr-len extra bytes to the
435 * contiguous region in an attempt to avoid being called next time.
436 */
437 int MPFail;
438
439 struct mbuf *
m_pullup(n,len)440 m_pullup(n, len)
441 register struct mbuf *n;
442 int len;
443 {
444 register struct mbuf *m;
445 register int count;
446 int space;
447
448 /*
449 * If first mbuf has no cluster, and has room for len bytes
450 * without shifting current data, pullup into it,
451 * otherwise allocate a new mbuf to prepend to the chain.
452 */
453 if ((n->m_flags & M_EXT) == 0 &&
454 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
455 if (n->m_len >= len)
456 return (n);
457 m = n;
458 n = n->m_next;
459 len -= m->m_len;
460 } else {
461 if (len > MHLEN)
462 goto bad;
463 MGET(m, M_DONTWAIT, n->m_type);
464 if (m == 0)
465 goto bad;
466 m->m_len = 0;
467 if (n->m_flags & M_PKTHDR) {
468 M_COPY_PKTHDR(m, n);
469 n->m_flags &= ~M_PKTHDR;
470 }
471 }
472 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
473 do {
474 count = min(min(max(len, max_protohdr), space), n->m_len);
475 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
476 (unsigned)count);
477 len -= count;
478 m->m_len += count;
479 n->m_len -= count;
480 space -= count;
481 if (n->m_len)
482 n->m_data += count;
483 else
484 n = m_free(n);
485 } while (len > 0 && n);
486 if (len > 0) {
487 (void) m_free(m);
488 goto bad;
489 }
490 m->m_next = n;
491 return (m);
492 bad:
493 m_freem(n);
494 MPFail++;
495 return (0);
496 }
497
498 /*
499 * Partition an mbuf chain in two pieces, returning the tail --
500 * all but the first len0 bytes. In case of failure, it returns NULL and
501 * attempts to restore the chain to its original state.
502 */
503 struct mbuf *
m_split(m0,len0,wait)504 m_split(m0, len0, wait)
505 register struct mbuf *m0;
506 int len0, wait;
507 {
508 register struct mbuf *m, *n;
509 unsigned len = len0, remain;
510
511 for (m = m0; m && len > m->m_len; m = m->m_next)
512 len -= m->m_len;
513 if (m == 0)
514 return (0);
515 remain = m->m_len - len;
516 if (m0->m_flags & M_PKTHDR) {
517 MGETHDR(n, wait, m0->m_type);
518 if (n == 0)
519 return (0);
520 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
521 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
522 m0->m_pkthdr.len = len0;
523 if (m->m_flags & M_EXT)
524 goto extpacket;
525 if (remain > MHLEN) {
526 /* m can't be the lead packet */
527 MH_ALIGN(n, 0);
528 n->m_next = m_split(m, len, wait);
529 if (n->m_next == 0) {
530 (void) m_free(n);
531 return (0);
532 } else
533 return (n);
534 } else
535 MH_ALIGN(n, remain);
536 } else if (remain == 0) {
537 n = m->m_next;
538 m->m_next = 0;
539 return (n);
540 } else {
541 MGET(n, wait, m->m_type);
542 if (n == 0)
543 return (0);
544 M_ALIGN(n, remain);
545 }
546 extpacket:
547 if (m->m_flags & M_EXT) {
548 n->m_flags |= M_EXT;
549 n->m_ext = m->m_ext;
550 mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
551 m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */
552 n->m_data = m->m_data + len;
553 } else {
554 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
555 }
556 n->m_len = remain;
557 m->m_len = len;
558 n->m_next = m->m_next;
559 m->m_next = 0;
560 return (n);
561 }
562 /*
563 * Routine to copy from device local memory into mbufs.
564 */
565 struct mbuf *
m_devget(buf,totlen,off0,ifp,copy)566 m_devget(buf, totlen, off0, ifp, copy)
567 char *buf;
568 int totlen, off0;
569 struct ifnet *ifp;
570 void (*copy)();
571 {
572 register struct mbuf *m;
573 struct mbuf *top = 0, **mp = ⊤
574 register int off = off0, len;
575 register char *cp;
576 char *epkt;
577
578 cp = buf;
579 epkt = cp + totlen;
580 if (off) {
581 /*
582 * If 'off' is non-zero, packet is trailer-encapsulated,
583 * so we have to skip the type and length fields.
584 */
585 cp += off + 2 * sizeof(u_int16_t);
586 totlen -= 2 * sizeof(u_int16_t);
587 }
588 MGETHDR(m, M_DONTWAIT, MT_DATA);
589 if (m == 0)
590 return (0);
591 m->m_pkthdr.rcvif = ifp;
592 m->m_pkthdr.len = totlen;
593 m->m_len = MHLEN;
594
595 while (totlen > 0) {
596 if (top) {
597 MGET(m, M_DONTWAIT, MT_DATA);
598 if (m == 0) {
599 m_freem(top);
600 return (0);
601 }
602 m->m_len = MLEN;
603 }
604 len = min(totlen, epkt - cp);
605 if (len >= MINCLSIZE) {
606 MCLGET(m, M_DONTWAIT);
607 if (m->m_flags & M_EXT)
608 m->m_len = len = min(len, MCLBYTES);
609 else
610 len = m->m_len;
611 } else {
612 /*
613 * Place initial small packet/header at end of mbuf.
614 */
615 if (len < m->m_len) {
616 if (top == 0 && len + max_linkhdr <= m->m_len)
617 m->m_data += max_linkhdr;
618 m->m_len = len;
619 } else
620 len = m->m_len;
621 }
622 if (copy)
623 copy(cp, mtod(m, caddr_t), (unsigned)len);
624 else
625 bcopy(cp, mtod(m, caddr_t), (unsigned)len);
626 cp += len;
627 *mp = m;
628 mp = &m->m_next;
629 totlen -= len;
630 if (cp == epkt)
631 cp = buf;
632 }
633 return (top);
634 }
635