xref: /freebsd/sys/kern/uipc_mbuf.c (revision cb7545a9)
1 /*
2  * Copyright (c) 1982, 1986, 1988, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by the University of
16  *	California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	@(#)uipc_mbuf.c	8.2 (Berkeley) 1/4/94
34  * $Id: uipc_mbuf.c,v 1.20 1996/05/08 19:38:14 wollman Exp $
35  */
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/proc.h>
40 #include <sys/malloc.h>
41 #define MBTYPES
42 #include <sys/mbuf.h>
43 #include <sys/kernel.h>
44 #include <sys/syslog.h>
45 #include <sys/domain.h>
46 #include <sys/protosw.h>
47 
48 #include <vm/vm.h>
49 #include <vm/vm_param.h>
50 #include <vm/vm_kern.h>
51 #include <vm/vm_extern.h>
52 
53 static void mbinit __P((void *));
54 SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL)
55 
56 struct mbuf *mbutl;
57 char	*mclrefcnt;
58 struct mbstat mbstat;
59 struct mbuf *mmbfree;
60 union mcluster *mclfree;
61 int	max_linkhdr;
62 int	max_protohdr;
63 int	max_hdr;
64 int	max_datalen;
65 
66 static void	m_reclaim __P((void));
67 
68 /* ARGSUSED*/
69 static void
70 mbinit(dummy)
71 	void *dummy;
72 {
73 	int s;
74 
75 #define NMB_INIT	16
76 #if MCLBYTES < 4096
77 #define NCL_INIT	(4096/MCLBYTES)
78 #else
79 #define NCL_INIT	1
80 #endif
81 
82 	mmbfree = NULL; mclfree = NULL;
83 	s = splimp();
84 	if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0)
85 		goto bad;
86 	if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
87 		goto bad;
88 	splx(s);
89 	return;
90 bad:
91 	panic("mbinit");
92 }
93 
94 /*
95  * Allocate at least nmb mbufs and place on mbuf free list.
96  * Must be called at splimp.
97  */
98 /* ARGSUSED */
99 int
100 m_mballoc(nmb, nowait)
101 	register int nmb;
102 	int nowait;
103 {
104 	register caddr_t p;
105 	register int i;
106 	int nbytes;
107 
108 	/* Once we run out of map space, it will be impossible to get
109 	 * any more (nothing is ever freed back to the map) (XXX which
110 	 * is dumb). (however you are not dead as m_reclaim might
111 	 * still be able to free a substantial amount of space).
112 	 */
113 	if (mb_map_full)
114 		return (0);
115 
116 	nbytes = round_page(nmb * MSIZE);
117 	p = (caddr_t)kmem_malloc(mb_map, nbytes, nowait ? M_NOWAIT : M_WAITOK);
118 	/*
119 	 * Either the map is now full, or this is nowait and there
120 	 * are no pages left.
121 	 */
122 	if (p == NULL)
123 		return (0);
124 
125 	nmb = nbytes / MSIZE;
126 	for (i = 0; i < nmb; i++) {
127 		((struct mbuf *)p)->m_next = mmbfree;
128 		mmbfree = (struct mbuf *)p;
129 		p += MSIZE;
130 	}
131 	mbstat.m_mbufs += nmb;
132 	return (1);
133 }
134 
135 /*
136  * Allocate some number of mbuf clusters
137  * and place on cluster free list.
138  * Must be called at splimp.
139  */
140 /* ARGSUSED */
141 int
142 m_clalloc(ncl, nowait)
143 	register int ncl;
144 	int nowait;
145 {
146 	register caddr_t p;
147 	register int i;
148 	int npg;
149 
150 	/*
151 	 * Once we run out of map space, it will be impossible
152 	 * to get any more (nothing is ever freed back to the
153 	 * map).
154 	 */
155 	if (mcl_map_full)
156 		return (0);
157 
158 	npg = ncl;
159 	p = (caddr_t)kmem_malloc(mcl_map, ctob(npg),
160 				 nowait ? M_NOWAIT : M_WAITOK);
161 	/*
162 	 * Either the map is now full, or this is nowait and there
163 	 * are no pages left.
164 	 */
165 	if (p == NULL)
166 		return (0);
167 
168 	ncl = ncl * PAGE_SIZE / MCLBYTES;
169 	for (i = 0; i < ncl; i++) {
170 		((union mcluster *)p)->mcl_next = mclfree;
171 		mclfree = (union mcluster *)p;
172 		p += MCLBYTES;
173 		mbstat.m_clfree++;
174 	}
175 	mbstat.m_clusters += ncl;
176 	return (1);
177 }
178 
179 /*
180  * When MGET failes, ask protocols to free space when short of memory,
181  * then re-attempt to allocate an mbuf.
182  */
183 struct mbuf *
184 m_retry(i, t)
185 	int i, t;
186 {
187 	register struct mbuf *m;
188 
189 	m_reclaim();
190 #define m_retry(i, t)	(struct mbuf *)0
191 	MGET(m, i, t);
192 #undef m_retry
193 	if (m != NULL)
194 		mbstat.m_wait++;
195 	else
196 		mbstat.m_drops++;
197 	return (m);
198 }
199 
200 /*
201  * As above; retry an MGETHDR.
202  */
203 struct mbuf *
204 m_retryhdr(i, t)
205 	int i, t;
206 {
207 	register struct mbuf *m;
208 
209 	m_reclaim();
210 #define m_retryhdr(i, t) (struct mbuf *)0
211 	MGETHDR(m, i, t);
212 #undef m_retryhdr
213 	if (m != NULL)
214 		mbstat.m_wait++;
215 	else
216 		mbstat.m_drops++;
217 	return (m);
218 }
219 
220 static void
221 m_reclaim()
222 {
223 	register struct domain *dp;
224 	register struct protosw *pr;
225 	int s = splimp();
226 
227 	for (dp = domains; dp; dp = dp->dom_next)
228 		for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
229 			if (pr->pr_drain)
230 				(*pr->pr_drain)();
231 	splx(s);
232 	mbstat.m_drain++;
233 }
234 
235 /*
236  * Space allocation routines.
237  * These are also available as macros
238  * for critical paths.
239  */
240 struct mbuf *
241 m_get(nowait, type)
242 	int nowait, type;
243 {
244 	register struct mbuf *m;
245 
246 	MGET(m, nowait, type);
247 	return (m);
248 }
249 
250 struct mbuf *
251 m_gethdr(nowait, type)
252 	int nowait, type;
253 {
254 	register struct mbuf *m;
255 
256 	MGETHDR(m, nowait, type);
257 	return (m);
258 }
259 
260 struct mbuf *
261 m_getclr(nowait, type)
262 	int nowait, type;
263 {
264 	register struct mbuf *m;
265 
266 	MGET(m, nowait, type);
267 	if (m == 0)
268 		return (0);
269 	bzero(mtod(m, caddr_t), MLEN);
270 	return (m);
271 }
272 
273 struct mbuf *
274 m_free(m)
275 	struct mbuf *m;
276 {
277 	register struct mbuf *n;
278 
279 	MFREE(m, n);
280 	return (n);
281 }
282 
283 void
284 m_freem(m)
285 	register struct mbuf *m;
286 {
287 	register struct mbuf *n;
288 
289 	if (m == NULL)
290 		return;
291 	do {
292 		MFREE(m, n);
293 		m = n;
294 	} while (m);
295 }
296 
297 /*
298  * Mbuffer utility routines.
299  */
300 
301 /*
302  * Lesser-used path for M_PREPEND:
303  * allocate new mbuf to prepend to chain,
304  * copy junk along.
305  */
306 struct mbuf *
307 m_prepend(m, len, how)
308 	register struct mbuf *m;
309 	int len, how;
310 {
311 	struct mbuf *mn;
312 
313 	MGET(mn, how, m->m_type);
314 	if (mn == (struct mbuf *)NULL) {
315 		m_freem(m);
316 		return ((struct mbuf *)NULL);
317 	}
318 	if (m->m_flags & M_PKTHDR) {
319 		M_COPY_PKTHDR(mn, m);
320 		m->m_flags &= ~M_PKTHDR;
321 	}
322 	mn->m_next = m;
323 	m = mn;
324 	if (len < MHLEN)
325 		MH_ALIGN(m, len);
326 	m->m_len = len;
327 	return (m);
328 }
329 
330 /*
331  * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
332  * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
333  * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
334  */
335 static int MCFail;
336 
337 struct mbuf *
338 m_copym(m, off0, len, wait)
339 	register struct mbuf *m;
340 	int off0, wait;
341 	register int len;
342 {
343 	register struct mbuf *n, **np;
344 	register int off = off0;
345 	struct mbuf *top;
346 	int copyhdr = 0;
347 
348 	if (off < 0 || len < 0)
349 		panic("m_copym");
350 	if (off == 0 && m->m_flags & M_PKTHDR)
351 		copyhdr = 1;
352 	while (off > 0) {
353 		if (m == 0)
354 			panic("m_copym");
355 		if (off < m->m_len)
356 			break;
357 		off -= m->m_len;
358 		m = m->m_next;
359 	}
360 	np = &top;
361 	top = 0;
362 	while (len > 0) {
363 		if (m == 0) {
364 			if (len != M_COPYALL)
365 				panic("m_copym");
366 			break;
367 		}
368 		MGET(n, wait, m->m_type);
369 		*np = n;
370 		if (n == 0)
371 			goto nospace;
372 		if (copyhdr) {
373 			M_COPY_PKTHDR(n, m);
374 			if (len == M_COPYALL)
375 				n->m_pkthdr.len -= off0;
376 			else
377 				n->m_pkthdr.len = len;
378 			copyhdr = 0;
379 		}
380 		n->m_len = min(len, m->m_len - off);
381 		if (m->m_flags & M_EXT) {
382 			n->m_data = m->m_data + off;
383 			mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
384 			n->m_ext = m->m_ext;
385 			n->m_flags |= M_EXT;
386 		} else
387 			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
388 			    (unsigned)n->m_len);
389 		if (len != M_COPYALL)
390 			len -= n->m_len;
391 		off = 0;
392 		m = m->m_next;
393 		np = &n->m_next;
394 	}
395 	if (top == 0)
396 		MCFail++;
397 	return (top);
398 nospace:
399 	m_freem(top);
400 	MCFail++;
401 	return (0);
402 }
403 
404 /*
405  * Copy an entire packet, including header (which must be present).
406  * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
407  */
408 struct mbuf *
409 m_copypacket(m, how)
410 	struct mbuf *m;
411 	int how;
412 {
413 	struct mbuf *top, *n, *o;
414 
415 	MGET(n, how, m->m_type);
416 	top = n;
417 	if (!n)
418 		goto nospace;
419 
420 	M_COPY_PKTHDR(n, m);
421 	n->m_len = m->m_len;
422 	if (m->m_flags & M_EXT) {
423 		n->m_data = m->m_data;
424 		mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
425 		n->m_ext = m->m_ext;
426 		n->m_flags |= M_EXT;
427 	} else {
428 		bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
429 	}
430 
431 	m = m->m_next;
432 	while (m) {
433 		MGET(o, how, m->m_type);
434 		if (!o)
435 			goto nospace;
436 
437 		n->m_next = o;
438 		n = n->m_next;
439 
440 		n->m_len = m->m_len;
441 		if (m->m_flags & M_EXT) {
442 			n->m_data = m->m_data;
443 			mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
444 			n->m_ext = m->m_ext;
445 			n->m_flags |= M_EXT;
446 		} else {
447 			bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
448 		}
449 
450 		m = m->m_next;
451 	}
452 	return top;
453 nospace:
454 	m_freem(top);
455 	MCFail++;
456 	return 0;
457 }
458 
459 /*
460  * Copy data from an mbuf chain starting "off" bytes from the beginning,
461  * continuing for "len" bytes, into the indicated buffer.
462  */
463 void
464 m_copydata(m, off, len, cp)
465 	register struct mbuf *m;
466 	register int off;
467 	register int len;
468 	caddr_t cp;
469 {
470 	register unsigned count;
471 
472 	if (off < 0 || len < 0)
473 		panic("m_copydata");
474 	while (off > 0) {
475 		if (m == 0)
476 			panic("m_copydata");
477 		if (off < m->m_len)
478 			break;
479 		off -= m->m_len;
480 		m = m->m_next;
481 	}
482 	while (len > 0) {
483 		if (m == 0)
484 			panic("m_copydata");
485 		count = min(m->m_len - off, len);
486 		bcopy(mtod(m, caddr_t) + off, cp, count);
487 		len -= count;
488 		cp += count;
489 		off = 0;
490 		m = m->m_next;
491 	}
492 }
493 
494 /*
495  * Concatenate mbuf chain n to m.
496  * Both chains must be of the same type (e.g. MT_DATA).
497  * Any m_pkthdr is not updated.
498  */
499 void
500 m_cat(m, n)
501 	register struct mbuf *m, *n;
502 {
503 	while (m->m_next)
504 		m = m->m_next;
505 	while (n) {
506 		if (m->m_flags & M_EXT ||
507 		    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
508 			/* just join the two chains */
509 			m->m_next = n;
510 			return;
511 		}
512 		/* splat the data from one into the other */
513 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
514 		    (u_int)n->m_len);
515 		m->m_len += n->m_len;
516 		n = m_free(n);
517 	}
518 }
519 
520 void
521 m_adj(mp, req_len)
522 	struct mbuf *mp;
523 	int req_len;
524 {
525 	register int len = req_len;
526 	register struct mbuf *m;
527 	register count;
528 
529 	if ((m = mp) == NULL)
530 		return;
531 	if (len >= 0) {
532 		/*
533 		 * Trim from head.
534 		 */
535 		while (m != NULL && len > 0) {
536 			if (m->m_len <= len) {
537 				len -= m->m_len;
538 				m->m_len = 0;
539 				m = m->m_next;
540 			} else {
541 				m->m_len -= len;
542 				m->m_data += len;
543 				len = 0;
544 			}
545 		}
546 		m = mp;
547 		if (mp->m_flags & M_PKTHDR)
548 			m->m_pkthdr.len -= (req_len - len);
549 	} else {
550 		/*
551 		 * Trim from tail.  Scan the mbuf chain,
552 		 * calculating its length and finding the last mbuf.
553 		 * If the adjustment only affects this mbuf, then just
554 		 * adjust and return.  Otherwise, rescan and truncate
555 		 * after the remaining size.
556 		 */
557 		len = -len;
558 		count = 0;
559 		for (;;) {
560 			count += m->m_len;
561 			if (m->m_next == (struct mbuf *)0)
562 				break;
563 			m = m->m_next;
564 		}
565 		if (m->m_len >= len) {
566 			m->m_len -= len;
567 			if (mp->m_flags & M_PKTHDR)
568 				mp->m_pkthdr.len -= len;
569 			return;
570 		}
571 		count -= len;
572 		if (count < 0)
573 			count = 0;
574 		/*
575 		 * Correct length for chain is "count".
576 		 * Find the mbuf with last data, adjust its length,
577 		 * and toss data from remaining mbufs on chain.
578 		 */
579 		m = mp;
580 		if (m->m_flags & M_PKTHDR)
581 			m->m_pkthdr.len = count;
582 		for (; m; m = m->m_next) {
583 			if (m->m_len >= count) {
584 				m->m_len = count;
585 				break;
586 			}
587 			count -= m->m_len;
588 		}
589 		while (m->m_next)
590 			(m = m->m_next) ->m_len = 0;
591 	}
592 }
593 
594 /*
595  * Rearange an mbuf chain so that len bytes are contiguous
596  * and in the data area of an mbuf (so that mtod and dtom
597  * will work for a structure of size len).  Returns the resulting
598  * mbuf chain on success, frees it and returns null on failure.
599  * If there is room, it will add up to max_protohdr-len extra bytes to the
600  * contiguous region in an attempt to avoid being called next time.
601  */
602 static int MPFail;
603 
604 struct mbuf *
605 m_pullup(n, len)
606 	register struct mbuf *n;
607 	int len;
608 {
609 	register struct mbuf *m;
610 	register int count;
611 	int space;
612 
613 	/*
614 	 * If first mbuf has no cluster, and has room for len bytes
615 	 * without shifting current data, pullup into it,
616 	 * otherwise allocate a new mbuf to prepend to the chain.
617 	 */
618 	if ((n->m_flags & M_EXT) == 0 &&
619 	    n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
620 		if (n->m_len >= len)
621 			return (n);
622 		m = n;
623 		n = n->m_next;
624 		len -= m->m_len;
625 	} else {
626 		if (len > MHLEN)
627 			goto bad;
628 		MGET(m, M_DONTWAIT, n->m_type);
629 		if (m == 0)
630 			goto bad;
631 		m->m_len = 0;
632 		if (n->m_flags & M_PKTHDR) {
633 			M_COPY_PKTHDR(m, n);
634 			n->m_flags &= ~M_PKTHDR;
635 		}
636 	}
637 	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
638 	do {
639 		count = min(min(max(len, max_protohdr), space), n->m_len);
640 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
641 		  (unsigned)count);
642 		len -= count;
643 		m->m_len += count;
644 		n->m_len -= count;
645 		space -= count;
646 		if (n->m_len)
647 			n->m_data += count;
648 		else
649 			n = m_free(n);
650 	} while (len > 0 && n);
651 	if (len > 0) {
652 		(void) m_free(m);
653 		goto bad;
654 	}
655 	m->m_next = n;
656 	return (m);
657 bad:
658 	m_freem(n);
659 	MPFail++;
660 	return (0);
661 }
662 
663 /*
664  * Partition an mbuf chain in two pieces, returning the tail --
665  * all but the first len0 bytes.  In case of failure, it returns NULL and
666  * attempts to restore the chain to its original state.
667  */
668 struct mbuf *
669 m_split(m0, len0, wait)
670 	register struct mbuf *m0;
671 	int len0, wait;
672 {
673 	register struct mbuf *m, *n;
674 	unsigned len = len0, remain;
675 
676 	for (m = m0; m && len > m->m_len; m = m->m_next)
677 		len -= m->m_len;
678 	if (m == 0)
679 		return (0);
680 	remain = m->m_len - len;
681 	if (m0->m_flags & M_PKTHDR) {
682 		MGETHDR(n, wait, m0->m_type);
683 		if (n == 0)
684 			return (0);
685 		n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
686 		n->m_pkthdr.len = m0->m_pkthdr.len - len0;
687 		m0->m_pkthdr.len = len0;
688 		if (m->m_flags & M_EXT)
689 			goto extpacket;
690 		if (remain > MHLEN) {
691 			/* m can't be the lead packet */
692 			MH_ALIGN(n, 0);
693 			n->m_next = m_split(m, len, wait);
694 			if (n->m_next == 0) {
695 				(void) m_free(n);
696 				return (0);
697 			} else
698 				return (n);
699 		} else
700 			MH_ALIGN(n, remain);
701 	} else if (remain == 0) {
702 		n = m->m_next;
703 		m->m_next = 0;
704 		return (n);
705 	} else {
706 		MGET(n, wait, m->m_type);
707 		if (n == 0)
708 			return (0);
709 		M_ALIGN(n, remain);
710 	}
711 extpacket:
712 	if (m->m_flags & M_EXT) {
713 		n->m_flags |= M_EXT;
714 		n->m_ext = m->m_ext;
715 		mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
716 		m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */
717 		n->m_data = m->m_data + len;
718 	} else {
719 		bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
720 	}
721 	n->m_len = remain;
722 	m->m_len = len;
723 	n->m_next = m->m_next;
724 	m->m_next = 0;
725 	return (n);
726 }
727 /*
728  * Routine to copy from device local memory into mbufs.
729  */
730 struct mbuf *
731 m_devget(buf, totlen, off0, ifp, copy)
732 	char *buf;
733 	int totlen, off0;
734 	struct ifnet *ifp;
735 	void (*copy) __P((char *from, caddr_t to, u_int len));
736 {
737 	register struct mbuf *m;
738 	struct mbuf *top = 0, **mp = &top;
739 	register int off = off0, len;
740 	register char *cp;
741 	char *epkt;
742 
743 	cp = buf;
744 	epkt = cp + totlen;
745 	if (off) {
746 		cp += off + 2 * sizeof(u_short);
747 		totlen -= 2 * sizeof(u_short);
748 	}
749 	MGETHDR(m, M_DONTWAIT, MT_DATA);
750 	if (m == 0)
751 		return (0);
752 	m->m_pkthdr.rcvif = ifp;
753 	m->m_pkthdr.len = totlen;
754 	m->m_len = MHLEN;
755 
756 	while (totlen > 0) {
757 		if (top) {
758 			MGET(m, M_DONTWAIT, MT_DATA);
759 			if (m == 0) {
760 				m_freem(top);
761 				return (0);
762 			}
763 			m->m_len = MLEN;
764 		}
765 		len = min(totlen, epkt - cp);
766 		if (len >= MINCLSIZE) {
767 			MCLGET(m, M_DONTWAIT);
768 			if (m->m_flags & M_EXT)
769 				m->m_len = len = min(len, MCLBYTES);
770 			else
771 				len = m->m_len;
772 		} else {
773 			/*
774 			 * Place initial small packet/header at end of mbuf.
775 			 */
776 			if (len < m->m_len) {
777 				if (top == 0 && len + max_linkhdr <= m->m_len)
778 					m->m_data += max_linkhdr;
779 				m->m_len = len;
780 			} else
781 				len = m->m_len;
782 		}
783 		if (copy)
784 			copy(cp, mtod(m, caddr_t), (unsigned)len);
785 		else
786 			bcopy(cp, mtod(m, caddr_t), (unsigned)len);
787 		cp += len;
788 		*mp = m;
789 		mp = &m->m_next;
790 		totlen -= len;
791 		if (cp == epkt)
792 			cp = buf;
793 	}
794 	return (top);
795 }
796 
797 /*
798  * Copy data from a buffer back into the indicated mbuf chain,
799  * starting "off" bytes from the beginning, extending the mbuf
800  * chain if necessary.
801  */
802 void
803 m_copyback(m0, off, len, cp)
804 	struct	mbuf *m0;
805 	register int off;
806 	register int len;
807 	caddr_t cp;
808 {
809 	register int mlen;
810 	register struct mbuf *m = m0, *n;
811 	int totlen = 0;
812 
813 	if (m0 == 0)
814 		return;
815 	while (off > (mlen = m->m_len)) {
816 		off -= mlen;
817 		totlen += mlen;
818 		if (m->m_next == 0) {
819 			n = m_getclr(M_DONTWAIT, m->m_type);
820 			if (n == 0)
821 				goto out;
822 			n->m_len = min(MLEN, len + off);
823 			m->m_next = n;
824 		}
825 		m = m->m_next;
826 	}
827 	while (len > 0) {
828 		mlen = min (m->m_len - off, len);
829 		bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
830 		cp += mlen;
831 		len -= mlen;
832 		mlen += off;
833 		off = 0;
834 		totlen += mlen;
835 		if (len == 0)
836 			break;
837 		if (m->m_next == 0) {
838 			n = m_get(M_DONTWAIT, m->m_type);
839 			if (n == 0)
840 				break;
841 			n->m_len = min(MLEN, len);
842 			m->m_next = n;
843 		}
844 		m = m->m_next;
845 	}
846 out:	if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
847 		m->m_pkthdr.len = totlen;
848 }
849