xref: /freebsd/sys/kern/uipc_mbuf.c (revision 3157ba21)
1 /*-
2  * Copyright (c) 1982, 1986, 1988, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 4. Neither the name of the University nor the names of its contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  *	@(#)uipc_mbuf.c	8.2 (Berkeley) 1/4/94
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_param.h"
36 #include "opt_mbuf_stress_test.h"
37 #include "opt_mbuf_profiling.h"
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/limits.h>
43 #include <sys/lock.h>
44 #include <sys/malloc.h>
45 #include <sys/mbuf.h>
46 #include <sys/sysctl.h>
47 #include <sys/domain.h>
48 #include <sys/protosw.h>
49 #include <sys/uio.h>
50 
51 int	max_linkhdr;
52 int	max_protohdr;
53 int	max_hdr;
54 int	max_datalen;
55 #ifdef MBUF_STRESS_TEST
56 int	m_defragpackets;
57 int	m_defragbytes;
58 int	m_defraguseless;
59 int	m_defragfailure;
60 int	m_defragrandomfailures;
61 #endif
62 
63 /*
64  * sysctl(8) exported objects
65  */
66 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RD,
67 	   &max_linkhdr, 0, "Size of largest link layer header");
68 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RD,
69 	   &max_protohdr, 0, "Size of largest protocol layer header");
70 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RD,
71 	   &max_hdr, 0, "Size of largest link plus protocol header");
72 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RD,
73 	   &max_datalen, 0, "Minimum space left in mbuf after max_hdr");
74 #ifdef MBUF_STRESS_TEST
75 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
76 	   &m_defragpackets, 0, "");
77 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
78 	   &m_defragbytes, 0, "");
79 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
80 	   &m_defraguseless, 0, "");
81 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
82 	   &m_defragfailure, 0, "");
83 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
84 	   &m_defragrandomfailures, 0, "");
85 #endif
86 
87 /*
88  * Allocate a given length worth of mbufs and/or clusters (whatever fits
89  * best) and return a pointer to the top of the allocated chain.  If an
90  * existing mbuf chain is provided, then we will append the new chain
91  * to the existing one but still return the top of the newly allocated
92  * chain.
93  */
94 struct mbuf *
95 m_getm2(struct mbuf *m, int len, int how, short type, int flags)
96 {
97 	struct mbuf *mb, *nm = NULL, *mtail = NULL;
98 
99 	KASSERT(len >= 0, ("%s: len is < 0", __func__));
100 
101 	/* Validate flags. */
102 	flags &= (M_PKTHDR | M_EOR);
103 
104 	/* Packet header mbuf must be first in chain. */
105 	if ((flags & M_PKTHDR) && m != NULL)
106 		flags &= ~M_PKTHDR;
107 
108 	/* Loop and append maximum sized mbufs to the chain tail. */
109 	while (len > 0) {
110 		if (len > MCLBYTES)
111 			mb = m_getjcl(how, type, (flags & M_PKTHDR),
112 			    MJUMPAGESIZE);
113 		else if (len >= MINCLSIZE)
114 			mb = m_getcl(how, type, (flags & M_PKTHDR));
115 		else if (flags & M_PKTHDR)
116 			mb = m_gethdr(how, type);
117 		else
118 			mb = m_get(how, type);
119 
120 		/* Fail the whole operation if one mbuf can't be allocated. */
121 		if (mb == NULL) {
122 			if (nm != NULL)
123 				m_freem(nm);
124 			return (NULL);
125 		}
126 
127 		/* Book keeping. */
128 		len -= (mb->m_flags & M_EXT) ? mb->m_ext.ext_size :
129 			((mb->m_flags & M_PKTHDR) ? MHLEN : MLEN);
130 		if (mtail != NULL)
131 			mtail->m_next = mb;
132 		else
133 			nm = mb;
134 		mtail = mb;
135 		flags &= ~M_PKTHDR;	/* Only valid on the first mbuf. */
136 	}
137 	if (flags & M_EOR)
138 		mtail->m_flags |= M_EOR;  /* Only valid on the last mbuf. */
139 
140 	/* If mbuf was supplied, append new chain to the end of it. */
141 	if (m != NULL) {
142 		for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next)
143 			;
144 		mtail->m_next = nm;
145 		mtail->m_flags &= ~M_EOR;
146 	} else
147 		m = nm;
148 
149 	return (m);
150 }
151 
152 /*
153  * Free an entire chain of mbufs and associated external buffers, if
154  * applicable.
155  */
156 void
157 m_freem(struct mbuf *mb)
158 {
159 
160 	while (mb != NULL)
161 		mb = m_free(mb);
162 }
163 
164 /*-
165  * Configure a provided mbuf to refer to the provided external storage
166  * buffer and setup a reference count for said buffer.  If the setting
167  * up of the reference count fails, the M_EXT bit will not be set.  If
168  * successfull, the M_EXT bit is set in the mbuf's flags.
169  *
170  * Arguments:
171  *    mb     The existing mbuf to which to attach the provided buffer.
172  *    buf    The address of the provided external storage buffer.
173  *    size   The size of the provided buffer.
174  *    freef  A pointer to a routine that is responsible for freeing the
175  *           provided external storage buffer.
176  *    args   A pointer to an argument structure (of any type) to be passed
177  *           to the provided freef routine (may be NULL).
178  *    flags  Any other flags to be passed to the provided mbuf.
179  *    type   The type that the external storage buffer should be
180  *           labeled with.
181  *
182  * Returns:
183  *    Nothing.
184  */
185 void
186 m_extadd(struct mbuf *mb, caddr_t buf, u_int size,
187     void (*freef)(void *, void *), void *arg1, void *arg2, int flags, int type)
188 {
189 	KASSERT(type != EXT_CLUSTER, ("%s: EXT_CLUSTER not allowed", __func__));
190 
191 	if (type != EXT_EXTREF)
192 		mb->m_ext.ref_cnt = (u_int *)uma_zalloc(zone_ext_refcnt, M_NOWAIT);
193 	if (mb->m_ext.ref_cnt != NULL) {
194 		*(mb->m_ext.ref_cnt) = 1;
195 		mb->m_flags |= (M_EXT | flags);
196 		mb->m_ext.ext_buf = buf;
197 		mb->m_data = mb->m_ext.ext_buf;
198 		mb->m_ext.ext_size = size;
199 		mb->m_ext.ext_free = freef;
200 		mb->m_ext.ext_arg1 = arg1;
201 		mb->m_ext.ext_arg2 = arg2;
202 		mb->m_ext.ext_type = type;
203         }
204 }
205 
206 /*
207  * Non-directly-exported function to clean up after mbufs with M_EXT
208  * storage attached to them if the reference count hits 1.
209  */
210 void
211 mb_free_ext(struct mbuf *m)
212 {
213 	int skipmbuf;
214 
215 	KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
216 	KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
217 
218 
219 	/*
220 	 * check if the header is embedded in the cluster
221 	 */
222 	skipmbuf = (m->m_flags & M_NOFREE);
223 
224 	/* Free attached storage if this mbuf is the only reference to it. */
225 	if (*(m->m_ext.ref_cnt) == 1 ||
226 	    atomic_fetchadd_int(m->m_ext.ref_cnt, -1) == 1) {
227 		switch (m->m_ext.ext_type) {
228 		case EXT_PACKET:	/* The packet zone is special. */
229 			if (*(m->m_ext.ref_cnt) == 0)
230 				*(m->m_ext.ref_cnt) = 1;
231 			uma_zfree(zone_pack, m);
232 			return;		/* Job done. */
233 		case EXT_CLUSTER:
234 			uma_zfree(zone_clust, m->m_ext.ext_buf);
235 			break;
236 		case EXT_JUMBOP:
237 			uma_zfree(zone_jumbop, m->m_ext.ext_buf);
238 			break;
239 		case EXT_JUMBO9:
240 			uma_zfree(zone_jumbo9, m->m_ext.ext_buf);
241 			break;
242 		case EXT_JUMBO16:
243 			uma_zfree(zone_jumbo16, m->m_ext.ext_buf);
244 			break;
245 		case EXT_SFBUF:
246 		case EXT_NET_DRV:
247 		case EXT_MOD_TYPE:
248 		case EXT_DISPOSABLE:
249 			*(m->m_ext.ref_cnt) = 0;
250 			uma_zfree(zone_ext_refcnt, __DEVOLATILE(u_int *,
251 				m->m_ext.ref_cnt));
252 			/* FALLTHROUGH */
253 		case EXT_EXTREF:
254 			KASSERT(m->m_ext.ext_free != NULL,
255 				("%s: ext_free not set", __func__));
256 			(*(m->m_ext.ext_free))(m->m_ext.ext_arg1,
257 			    m->m_ext.ext_arg2);
258 			break;
259 		default:
260 			KASSERT(m->m_ext.ext_type == 0,
261 				("%s: unknown ext_type", __func__));
262 		}
263 	}
264 	if (skipmbuf)
265 		return;
266 
267 	/*
268 	 * Free this mbuf back to the mbuf zone with all m_ext
269 	 * information purged.
270 	 */
271 	m->m_ext.ext_buf = NULL;
272 	m->m_ext.ext_free = NULL;
273 	m->m_ext.ext_arg1 = NULL;
274 	m->m_ext.ext_arg2 = NULL;
275 	m->m_ext.ref_cnt = NULL;
276 	m->m_ext.ext_size = 0;
277 	m->m_ext.ext_type = 0;
278 	m->m_flags &= ~M_EXT;
279 	uma_zfree(zone_mbuf, m);
280 }
281 
282 /*
283  * Attach the the cluster from *m to *n, set up m_ext in *n
284  * and bump the refcount of the cluster.
285  */
286 static void
287 mb_dupcl(struct mbuf *n, struct mbuf *m)
288 {
289 	KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
290 	KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
291 	KASSERT((n->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__));
292 
293 	if (*(m->m_ext.ref_cnt) == 1)
294 		*(m->m_ext.ref_cnt) += 1;
295 	else
296 		atomic_add_int(m->m_ext.ref_cnt, 1);
297 	n->m_ext.ext_buf = m->m_ext.ext_buf;
298 	n->m_ext.ext_free = m->m_ext.ext_free;
299 	n->m_ext.ext_arg1 = m->m_ext.ext_arg1;
300 	n->m_ext.ext_arg2 = m->m_ext.ext_arg2;
301 	n->m_ext.ext_size = m->m_ext.ext_size;
302 	n->m_ext.ref_cnt = m->m_ext.ref_cnt;
303 	n->m_ext.ext_type = m->m_ext.ext_type;
304 	n->m_flags |= M_EXT;
305 }
306 
307 /*
308  * Clean up mbuf (chain) from any tags and packet headers.
309  * If "all" is set then the first mbuf in the chain will be
310  * cleaned too.
311  */
312 void
313 m_demote(struct mbuf *m0, int all)
314 {
315 	struct mbuf *m;
316 
317 	for (m = all ? m0 : m0->m_next; m != NULL; m = m->m_next) {
318 		if (m->m_flags & M_PKTHDR) {
319 			m_tag_delete_chain(m, NULL);
320 			m->m_flags &= ~M_PKTHDR;
321 			bzero(&m->m_pkthdr, sizeof(struct pkthdr));
322 		}
323 		if (m != m0 && m->m_nextpkt != NULL) {
324 			KASSERT(m->m_nextpkt == NULL,
325 			    ("%s: m_nextpkt not NULL", __func__));
326 			m_freem(m->m_nextpkt);
327 			m->m_nextpkt = NULL;
328 		}
329 		m->m_flags = m->m_flags & (M_EXT|M_RDONLY|M_FREELIST|M_NOFREE);
330 	}
331 }
332 
333 /*
334  * Sanity checks on mbuf (chain) for use in KASSERT() and general
335  * debugging.
336  * Returns 0 or panics when bad and 1 on all tests passed.
337  * Sanitize, 0 to run M_SANITY_ACTION, 1 to garble things so they
338  * blow up later.
339  */
340 int
341 m_sanity(struct mbuf *m0, int sanitize)
342 {
343 	struct mbuf *m;
344 	caddr_t a, b;
345 	int pktlen = 0;
346 
347 #ifdef INVARIANTS
348 #define	M_SANITY_ACTION(s)	panic("mbuf %p: " s, m)
349 #else
350 #define	M_SANITY_ACTION(s)	printf("mbuf %p: " s, m)
351 #endif
352 
353 	for (m = m0; m != NULL; m = m->m_next) {
354 		/*
355 		 * Basic pointer checks.  If any of these fails then some
356 		 * unrelated kernel memory before or after us is trashed.
357 		 * No way to recover from that.
358 		 */
359 		a = ((m->m_flags & M_EXT) ? m->m_ext.ext_buf :
360 			((m->m_flags & M_PKTHDR) ? (caddr_t)(&m->m_pktdat) :
361 			 (caddr_t)(&m->m_dat)) );
362 		b = (caddr_t)(a + (m->m_flags & M_EXT ? m->m_ext.ext_size :
363 			((m->m_flags & M_PKTHDR) ? MHLEN : MLEN)));
364 		if ((caddr_t)m->m_data < a)
365 			M_SANITY_ACTION("m_data outside mbuf data range left");
366 		if ((caddr_t)m->m_data > b)
367 			M_SANITY_ACTION("m_data outside mbuf data range right");
368 		if ((caddr_t)m->m_data + m->m_len > b)
369 			M_SANITY_ACTION("m_data + m_len exeeds mbuf space");
370 		if ((m->m_flags & M_PKTHDR) && m->m_pkthdr.header) {
371 			if ((caddr_t)m->m_pkthdr.header < a ||
372 			    (caddr_t)m->m_pkthdr.header > b)
373 				M_SANITY_ACTION("m_pkthdr.header outside mbuf data range");
374 		}
375 
376 		/* m->m_nextpkt may only be set on first mbuf in chain. */
377 		if (m != m0 && m->m_nextpkt != NULL) {
378 			if (sanitize) {
379 				m_freem(m->m_nextpkt);
380 				m->m_nextpkt = (struct mbuf *)0xDEADC0DE;
381 			} else
382 				M_SANITY_ACTION("m->m_nextpkt on in-chain mbuf");
383 		}
384 
385 		/* packet length (not mbuf length!) calculation */
386 		if (m0->m_flags & M_PKTHDR)
387 			pktlen += m->m_len;
388 
389 		/* m_tags may only be attached to first mbuf in chain. */
390 		if (m != m0 && m->m_flags & M_PKTHDR &&
391 		    !SLIST_EMPTY(&m->m_pkthdr.tags)) {
392 			if (sanitize) {
393 				m_tag_delete_chain(m, NULL);
394 				/* put in 0xDEADC0DE perhaps? */
395 			} else
396 				M_SANITY_ACTION("m_tags on in-chain mbuf");
397 		}
398 
399 		/* M_PKTHDR may only be set on first mbuf in chain */
400 		if (m != m0 && m->m_flags & M_PKTHDR) {
401 			if (sanitize) {
402 				bzero(&m->m_pkthdr, sizeof(m->m_pkthdr));
403 				m->m_flags &= ~M_PKTHDR;
404 				/* put in 0xDEADCODE and leave hdr flag in */
405 			} else
406 				M_SANITY_ACTION("M_PKTHDR on in-chain mbuf");
407 		}
408 	}
409 	m = m0;
410 	if (pktlen && pktlen != m->m_pkthdr.len) {
411 		if (sanitize)
412 			m->m_pkthdr.len = 0;
413 		else
414 			M_SANITY_ACTION("m_pkthdr.len != mbuf chain length");
415 	}
416 	return 1;
417 
418 #undef	M_SANITY_ACTION
419 }
420 
421 
422 /*
423  * "Move" mbuf pkthdr from "from" to "to".
424  * "from" must have M_PKTHDR set, and "to" must be empty.
425  */
426 void
427 m_move_pkthdr(struct mbuf *to, struct mbuf *from)
428 {
429 
430 #if 0
431 	/* see below for why these are not enabled */
432 	M_ASSERTPKTHDR(to);
433 	/* Note: with MAC, this may not be a good assertion. */
434 	KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags),
435 	    ("m_move_pkthdr: to has tags"));
436 #endif
437 #ifdef MAC
438 	/*
439 	 * XXXMAC: It could be this should also occur for non-MAC?
440 	 */
441 	if (to->m_flags & M_PKTHDR)
442 		m_tag_delete_chain(to, NULL);
443 #endif
444 	to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
445 	if ((to->m_flags & M_EXT) == 0)
446 		to->m_data = to->m_pktdat;
447 	to->m_pkthdr = from->m_pkthdr;		/* especially tags */
448 	SLIST_INIT(&from->m_pkthdr.tags);	/* purge tags from src */
449 	from->m_flags &= ~M_PKTHDR;
450 }
451 
452 /*
453  * Duplicate "from"'s mbuf pkthdr in "to".
454  * "from" must have M_PKTHDR set, and "to" must be empty.
455  * In particular, this does a deep copy of the packet tags.
456  */
457 int
458 m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how)
459 {
460 
461 #if 0
462 	/*
463 	 * The mbuf allocator only initializes the pkthdr
464 	 * when the mbuf is allocated with MGETHDR. Many users
465 	 * (e.g. m_copy*, m_prepend) use MGET and then
466 	 * smash the pkthdr as needed causing these
467 	 * assertions to trip.  For now just disable them.
468 	 */
469 	M_ASSERTPKTHDR(to);
470 	/* Note: with MAC, this may not be a good assertion. */
471 	KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), ("m_dup_pkthdr: to has tags"));
472 #endif
473 	MBUF_CHECKSLEEP(how);
474 #ifdef MAC
475 	if (to->m_flags & M_PKTHDR)
476 		m_tag_delete_chain(to, NULL);
477 #endif
478 	to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
479 	if ((to->m_flags & M_EXT) == 0)
480 		to->m_data = to->m_pktdat;
481 	to->m_pkthdr = from->m_pkthdr;
482 	SLIST_INIT(&to->m_pkthdr.tags);
483 	return (m_tag_copy_chain(to, from, MBTOM(how)));
484 }
485 
486 /*
487  * Lesser-used path for M_PREPEND:
488  * allocate new mbuf to prepend to chain,
489  * copy junk along.
490  */
491 struct mbuf *
492 m_prepend(struct mbuf *m, int len, int how)
493 {
494 	struct mbuf *mn;
495 
496 	if (m->m_flags & M_PKTHDR)
497 		MGETHDR(mn, how, m->m_type);
498 	else
499 		MGET(mn, how, m->m_type);
500 	if (mn == NULL) {
501 		m_freem(m);
502 		return (NULL);
503 	}
504 	if (m->m_flags & M_PKTHDR)
505 		M_MOVE_PKTHDR(mn, m);
506 	mn->m_next = m;
507 	m = mn;
508 	if(m->m_flags & M_PKTHDR) {
509 		if (len < MHLEN)
510 			MH_ALIGN(m, len);
511 	} else {
512 		if (len < MLEN)
513 			M_ALIGN(m, len);
514 	}
515 	m->m_len = len;
516 	return (m);
517 }
518 
519 /*
520  * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
521  * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
522  * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
523  * Note that the copy is read-only, because clusters are not copied,
524  * only their reference counts are incremented.
525  */
526 struct mbuf *
527 m_copym(struct mbuf *m, int off0, int len, int wait)
528 {
529 	struct mbuf *n, **np;
530 	int off = off0;
531 	struct mbuf *top;
532 	int copyhdr = 0;
533 
534 	KASSERT(off >= 0, ("m_copym, negative off %d", off));
535 	KASSERT(len >= 0, ("m_copym, negative len %d", len));
536 	MBUF_CHECKSLEEP(wait);
537 	if (off == 0 && m->m_flags & M_PKTHDR)
538 		copyhdr = 1;
539 	while (off > 0) {
540 		KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
541 		if (off < m->m_len)
542 			break;
543 		off -= m->m_len;
544 		m = m->m_next;
545 	}
546 	np = &top;
547 	top = 0;
548 	while (len > 0) {
549 		if (m == NULL) {
550 			KASSERT(len == M_COPYALL,
551 			    ("m_copym, length > size of mbuf chain"));
552 			break;
553 		}
554 		if (copyhdr)
555 			MGETHDR(n, wait, m->m_type);
556 		else
557 			MGET(n, wait, m->m_type);
558 		*np = n;
559 		if (n == NULL)
560 			goto nospace;
561 		if (copyhdr) {
562 			if (!m_dup_pkthdr(n, m, wait))
563 				goto nospace;
564 			if (len == M_COPYALL)
565 				n->m_pkthdr.len -= off0;
566 			else
567 				n->m_pkthdr.len = len;
568 			copyhdr = 0;
569 		}
570 		n->m_len = min(len, m->m_len - off);
571 		if (m->m_flags & M_EXT) {
572 			n->m_data = m->m_data + off;
573 			mb_dupcl(n, m);
574 		} else
575 			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
576 			    (u_int)n->m_len);
577 		if (len != M_COPYALL)
578 			len -= n->m_len;
579 		off = 0;
580 		m = m->m_next;
581 		np = &n->m_next;
582 	}
583 	if (top == NULL)
584 		mbstat.m_mcfail++;	/* XXX: No consistency. */
585 
586 	return (top);
587 nospace:
588 	m_freem(top);
589 	mbstat.m_mcfail++;	/* XXX: No consistency. */
590 	return (NULL);
591 }
592 
593 /*
594  * Returns mbuf chain with new head for the prepending case.
595  * Copies from mbuf (chain) n from off for len to mbuf (chain) m
596  * either prepending or appending the data.
597  * The resulting mbuf (chain) m is fully writeable.
598  * m is destination (is made writeable)
599  * n is source, off is offset in source, len is len from offset
600  * dir, 0 append, 1 prepend
601  * how, wait or nowait
602  */
603 
604 static int
605 m_bcopyxxx(void *s, void *t, u_int len)
606 {
607 	bcopy(s, t, (size_t)len);
608 	return 0;
609 }
610 
611 struct mbuf *
612 m_copymdata(struct mbuf *m, struct mbuf *n, int off, int len,
613     int prep, int how)
614 {
615 	struct mbuf *mm, *x, *z, *prev = NULL;
616 	caddr_t p;
617 	int i, nlen = 0;
618 	caddr_t buf[MLEN];
619 
620 	KASSERT(m != NULL && n != NULL, ("m_copymdata, no target or source"));
621 	KASSERT(off >= 0, ("m_copymdata, negative off %d", off));
622 	KASSERT(len >= 0, ("m_copymdata, negative len %d", len));
623 	KASSERT(prep == 0 || prep == 1, ("m_copymdata, unknown direction %d", prep));
624 
625 	mm = m;
626 	if (!prep) {
627 		while(mm->m_next) {
628 			prev = mm;
629 			mm = mm->m_next;
630 		}
631 	}
632 	for (z = n; z != NULL; z = z->m_next)
633 		nlen += z->m_len;
634 	if (len == M_COPYALL)
635 		len = nlen - off;
636 	if (off + len > nlen || len < 1)
637 		return NULL;
638 
639 	if (!M_WRITABLE(mm)) {
640 		/* XXX: Use proper m_xxx function instead. */
641 		x = m_getcl(how, MT_DATA, mm->m_flags);
642 		if (x == NULL)
643 			return NULL;
644 		bcopy(mm->m_ext.ext_buf, x->m_ext.ext_buf, x->m_ext.ext_size);
645 		p = x->m_ext.ext_buf + (mm->m_data - mm->m_ext.ext_buf);
646 		x->m_data = p;
647 		mm->m_next = NULL;
648 		if (mm != m)
649 			prev->m_next = x;
650 		m_free(mm);
651 		mm = x;
652 	}
653 
654 	/*
655 	 * Append/prepend the data.  Allocating mbufs as necessary.
656 	 */
657 	/* Shortcut if enough free space in first/last mbuf. */
658 	if (!prep && M_TRAILINGSPACE(mm) >= len) {
659 		m_apply(n, off, len, m_bcopyxxx, mtod(mm, caddr_t) +
660 			 mm->m_len);
661 		mm->m_len += len;
662 		mm->m_pkthdr.len += len;
663 		return m;
664 	}
665 	if (prep && M_LEADINGSPACE(mm) >= len) {
666 		mm->m_data = mtod(mm, caddr_t) - len;
667 		m_apply(n, off, len, m_bcopyxxx, mtod(mm, caddr_t));
668 		mm->m_len += len;
669 		mm->m_pkthdr.len += len;
670 		return mm;
671 	}
672 
673 	/* Expand first/last mbuf to cluster if possible. */
674 	if (!prep && !(mm->m_flags & M_EXT) && len > M_TRAILINGSPACE(mm)) {
675 		bcopy(mm->m_data, &buf, mm->m_len);
676 		m_clget(mm, how);
677 		if (!(mm->m_flags & M_EXT))
678 			return NULL;
679 		bcopy(&buf, mm->m_ext.ext_buf, mm->m_len);
680 		mm->m_data = mm->m_ext.ext_buf;
681 		mm->m_pkthdr.header = NULL;
682 	}
683 	if (prep && !(mm->m_flags & M_EXT) && len > M_LEADINGSPACE(mm)) {
684 		bcopy(mm->m_data, &buf, mm->m_len);
685 		m_clget(mm, how);
686 		if (!(mm->m_flags & M_EXT))
687 			return NULL;
688 		bcopy(&buf, (caddr_t *)mm->m_ext.ext_buf +
689 		       mm->m_ext.ext_size - mm->m_len, mm->m_len);
690 		mm->m_data = (caddr_t)mm->m_ext.ext_buf +
691 			      mm->m_ext.ext_size - mm->m_len;
692 		mm->m_pkthdr.header = NULL;
693 	}
694 
695 	/* Append/prepend as many mbuf (clusters) as necessary to fit len. */
696 	if (!prep && len > M_TRAILINGSPACE(mm)) {
697 		if (!m_getm(mm, len - M_TRAILINGSPACE(mm), how, MT_DATA))
698 			return NULL;
699 	}
700 	if (prep && len > M_LEADINGSPACE(mm)) {
701 		if (!(z = m_getm(NULL, len - M_LEADINGSPACE(mm), how, MT_DATA)))
702 			return NULL;
703 		i = 0;
704 		for (x = z; x != NULL; x = x->m_next) {
705 			i += x->m_flags & M_EXT ? x->m_ext.ext_size :
706 			      (x->m_flags & M_PKTHDR ? MHLEN : MLEN);
707 			if (!x->m_next)
708 				break;
709 		}
710 		z->m_data += i - len;
711 		m_move_pkthdr(mm, z);
712 		x->m_next = mm;
713 		mm = z;
714 	}
715 
716 	/* Seek to start position in source mbuf. Optimization for long chains. */
717 	while (off > 0) {
718 		if (off < n->m_len)
719 			break;
720 		off -= n->m_len;
721 		n = n->m_next;
722 	}
723 
724 	/* Copy data into target mbuf. */
725 	z = mm;
726 	while (len > 0) {
727 		KASSERT(z != NULL, ("m_copymdata, falling off target edge"));
728 		i = M_TRAILINGSPACE(z);
729 		m_apply(n, off, i, m_bcopyxxx, mtod(z, caddr_t) + z->m_len);
730 		z->m_len += i;
731 		/* fixup pkthdr.len if necessary */
732 		if ((prep ? mm : m)->m_flags & M_PKTHDR)
733 			(prep ? mm : m)->m_pkthdr.len += i;
734 		off += i;
735 		len -= i;
736 		z = z->m_next;
737 	}
738 	return (prep ? mm : m);
739 }
740 
741 /*
742  * Copy an entire packet, including header (which must be present).
743  * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
744  * Note that the copy is read-only, because clusters are not copied,
745  * only their reference counts are incremented.
746  * Preserve alignment of the first mbuf so if the creator has left
747  * some room at the beginning (e.g. for inserting protocol headers)
748  * the copies still have the room available.
749  */
750 struct mbuf *
751 m_copypacket(struct mbuf *m, int how)
752 {
753 	struct mbuf *top, *n, *o;
754 
755 	MBUF_CHECKSLEEP(how);
756 	MGET(n, how, m->m_type);
757 	top = n;
758 	if (n == NULL)
759 		goto nospace;
760 
761 	if (!m_dup_pkthdr(n, m, how))
762 		goto nospace;
763 	n->m_len = m->m_len;
764 	if (m->m_flags & M_EXT) {
765 		n->m_data = m->m_data;
766 		mb_dupcl(n, m);
767 	} else {
768 		n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
769 		bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
770 	}
771 
772 	m = m->m_next;
773 	while (m) {
774 		MGET(o, how, m->m_type);
775 		if (o == NULL)
776 			goto nospace;
777 
778 		n->m_next = o;
779 		n = n->m_next;
780 
781 		n->m_len = m->m_len;
782 		if (m->m_flags & M_EXT) {
783 			n->m_data = m->m_data;
784 			mb_dupcl(n, m);
785 		} else {
786 			bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
787 		}
788 
789 		m = m->m_next;
790 	}
791 	return top;
792 nospace:
793 	m_freem(top);
794 	mbstat.m_mcfail++;	/* XXX: No consistency. */
795 	return (NULL);
796 }
797 
798 /*
799  * Copy data from an mbuf chain starting "off" bytes from the beginning,
800  * continuing for "len" bytes, into the indicated buffer.
801  */
802 void
803 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
804 {
805 	u_int count;
806 
807 	KASSERT(off >= 0, ("m_copydata, negative off %d", off));
808 	KASSERT(len >= 0, ("m_copydata, negative len %d", len));
809 	while (off > 0) {
810 		KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
811 		if (off < m->m_len)
812 			break;
813 		off -= m->m_len;
814 		m = m->m_next;
815 	}
816 	while (len > 0) {
817 		KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
818 		count = min(m->m_len - off, len);
819 		bcopy(mtod(m, caddr_t) + off, cp, count);
820 		len -= count;
821 		cp += count;
822 		off = 0;
823 		m = m->m_next;
824 	}
825 }
826 
827 /*
828  * Copy a packet header mbuf chain into a completely new chain, including
829  * copying any mbuf clusters.  Use this instead of m_copypacket() when
830  * you need a writable copy of an mbuf chain.
831  */
832 struct mbuf *
833 m_dup(struct mbuf *m, int how)
834 {
835 	struct mbuf **p, *top = NULL;
836 	int remain, moff, nsize;
837 
838 	MBUF_CHECKSLEEP(how);
839 	/* Sanity check */
840 	if (m == NULL)
841 		return (NULL);
842 	M_ASSERTPKTHDR(m);
843 
844 	/* While there's more data, get a new mbuf, tack it on, and fill it */
845 	remain = m->m_pkthdr.len;
846 	moff = 0;
847 	p = &top;
848 	while (remain > 0 || top == NULL) {	/* allow m->m_pkthdr.len == 0 */
849 		struct mbuf *n;
850 
851 		/* Get the next new mbuf */
852 		if (remain >= MINCLSIZE) {
853 			n = m_getcl(how, m->m_type, 0);
854 			nsize = MCLBYTES;
855 		} else {
856 			n = m_get(how, m->m_type);
857 			nsize = MLEN;
858 		}
859 		if (n == NULL)
860 			goto nospace;
861 
862 		if (top == NULL) {		/* First one, must be PKTHDR */
863 			if (!m_dup_pkthdr(n, m, how)) {
864 				m_free(n);
865 				goto nospace;
866 			}
867 			if ((n->m_flags & M_EXT) == 0)
868 				nsize = MHLEN;
869 		}
870 		n->m_len = 0;
871 
872 		/* Link it into the new chain */
873 		*p = n;
874 		p = &n->m_next;
875 
876 		/* Copy data from original mbuf(s) into new mbuf */
877 		while (n->m_len < nsize && m != NULL) {
878 			int chunk = min(nsize - n->m_len, m->m_len - moff);
879 
880 			bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
881 			moff += chunk;
882 			n->m_len += chunk;
883 			remain -= chunk;
884 			if (moff == m->m_len) {
885 				m = m->m_next;
886 				moff = 0;
887 			}
888 		}
889 
890 		/* Check correct total mbuf length */
891 		KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
892 		    	("%s: bogus m_pkthdr.len", __func__));
893 	}
894 	return (top);
895 
896 nospace:
897 	m_freem(top);
898 	mbstat.m_mcfail++;	/* XXX: No consistency. */
899 	return (NULL);
900 }
901 
902 /*
903  * Concatenate mbuf chain n to m.
904  * Both chains must be of the same type (e.g. MT_DATA).
905  * Any m_pkthdr is not updated.
906  */
907 void
908 m_cat(struct mbuf *m, struct mbuf *n)
909 {
910 	while (m->m_next)
911 		m = m->m_next;
912 	while (n) {
913 		if (m->m_flags & M_EXT ||
914 		    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
915 			/* just join the two chains */
916 			m->m_next = n;
917 			return;
918 		}
919 		/* splat the data from one into the other */
920 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
921 		    (u_int)n->m_len);
922 		m->m_len += n->m_len;
923 		n = m_free(n);
924 	}
925 }
926 
927 void
928 m_adj(struct mbuf *mp, int req_len)
929 {
930 	int len = req_len;
931 	struct mbuf *m;
932 	int count;
933 
934 	if ((m = mp) == NULL)
935 		return;
936 	if (len >= 0) {
937 		/*
938 		 * Trim from head.
939 		 */
940 		while (m != NULL && len > 0) {
941 			if (m->m_len <= len) {
942 				len -= m->m_len;
943 				m->m_len = 0;
944 				m = m->m_next;
945 			} else {
946 				m->m_len -= len;
947 				m->m_data += len;
948 				len = 0;
949 			}
950 		}
951 		if (mp->m_flags & M_PKTHDR)
952 			mp->m_pkthdr.len -= (req_len - len);
953 	} else {
954 		/*
955 		 * Trim from tail.  Scan the mbuf chain,
956 		 * calculating its length and finding the last mbuf.
957 		 * If the adjustment only affects this mbuf, then just
958 		 * adjust and return.  Otherwise, rescan and truncate
959 		 * after the remaining size.
960 		 */
961 		len = -len;
962 		count = 0;
963 		for (;;) {
964 			count += m->m_len;
965 			if (m->m_next == (struct mbuf *)0)
966 				break;
967 			m = m->m_next;
968 		}
969 		if (m->m_len >= len) {
970 			m->m_len -= len;
971 			if (mp->m_flags & M_PKTHDR)
972 				mp->m_pkthdr.len -= len;
973 			return;
974 		}
975 		count -= len;
976 		if (count < 0)
977 			count = 0;
978 		/*
979 		 * Correct length for chain is "count".
980 		 * Find the mbuf with last data, adjust its length,
981 		 * and toss data from remaining mbufs on chain.
982 		 */
983 		m = mp;
984 		if (m->m_flags & M_PKTHDR)
985 			m->m_pkthdr.len = count;
986 		for (; m; m = m->m_next) {
987 			if (m->m_len >= count) {
988 				m->m_len = count;
989 				if (m->m_next != NULL) {
990 					m_freem(m->m_next);
991 					m->m_next = NULL;
992 				}
993 				break;
994 			}
995 			count -= m->m_len;
996 		}
997 	}
998 }
999 
1000 /*
1001  * Rearange an mbuf chain so that len bytes are contiguous
1002  * and in the data area of an mbuf (so that mtod and dtom
1003  * will work for a structure of size len).  Returns the resulting
1004  * mbuf chain on success, frees it and returns null on failure.
1005  * If there is room, it will add up to max_protohdr-len extra bytes to the
1006  * contiguous region in an attempt to avoid being called next time.
1007  */
1008 struct mbuf *
1009 m_pullup(struct mbuf *n, int len)
1010 {
1011 	struct mbuf *m;
1012 	int count;
1013 	int space;
1014 
1015 	/*
1016 	 * If first mbuf has no cluster, and has room for len bytes
1017 	 * without shifting current data, pullup into it,
1018 	 * otherwise allocate a new mbuf to prepend to the chain.
1019 	 */
1020 	if ((n->m_flags & M_EXT) == 0 &&
1021 	    n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
1022 		if (n->m_len >= len)
1023 			return (n);
1024 		m = n;
1025 		n = n->m_next;
1026 		len -= m->m_len;
1027 	} else {
1028 		if (len > MHLEN)
1029 			goto bad;
1030 		MGET(m, M_DONTWAIT, n->m_type);
1031 		if (m == NULL)
1032 			goto bad;
1033 		m->m_len = 0;
1034 		if (n->m_flags & M_PKTHDR)
1035 			M_MOVE_PKTHDR(m, n);
1036 	}
1037 	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1038 	do {
1039 		count = min(min(max(len, max_protohdr), space), n->m_len);
1040 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1041 		  (u_int)count);
1042 		len -= count;
1043 		m->m_len += count;
1044 		n->m_len -= count;
1045 		space -= count;
1046 		if (n->m_len)
1047 			n->m_data += count;
1048 		else
1049 			n = m_free(n);
1050 	} while (len > 0 && n);
1051 	if (len > 0) {
1052 		(void) m_free(m);
1053 		goto bad;
1054 	}
1055 	m->m_next = n;
1056 	return (m);
1057 bad:
1058 	m_freem(n);
1059 	mbstat.m_mpfail++;	/* XXX: No consistency. */
1060 	return (NULL);
1061 }
1062 
1063 /*
1064  * Like m_pullup(), except a new mbuf is always allocated, and we allow
1065  * the amount of empty space before the data in the new mbuf to be specified
1066  * (in the event that the caller expects to prepend later).
1067  */
1068 int MSFail;
1069 
1070 struct mbuf *
1071 m_copyup(struct mbuf *n, int len, int dstoff)
1072 {
1073 	struct mbuf *m;
1074 	int count, space;
1075 
1076 	if (len > (MHLEN - dstoff))
1077 		goto bad;
1078 	MGET(m, M_DONTWAIT, n->m_type);
1079 	if (m == NULL)
1080 		goto bad;
1081 	m->m_len = 0;
1082 	if (n->m_flags & M_PKTHDR)
1083 		M_MOVE_PKTHDR(m, n);
1084 	m->m_data += dstoff;
1085 	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1086 	do {
1087 		count = min(min(max(len, max_protohdr), space), n->m_len);
1088 		memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
1089 		    (unsigned)count);
1090 		len -= count;
1091 		m->m_len += count;
1092 		n->m_len -= count;
1093 		space -= count;
1094 		if (n->m_len)
1095 			n->m_data += count;
1096 		else
1097 			n = m_free(n);
1098 	} while (len > 0 && n);
1099 	if (len > 0) {
1100 		(void) m_free(m);
1101 		goto bad;
1102 	}
1103 	m->m_next = n;
1104 	return (m);
1105  bad:
1106 	m_freem(n);
1107 	MSFail++;
1108 	return (NULL);
1109 }
1110 
1111 /*
1112  * Partition an mbuf chain in two pieces, returning the tail --
1113  * all but the first len0 bytes.  In case of failure, it returns NULL and
1114  * attempts to restore the chain to its original state.
1115  *
1116  * Note that the resulting mbufs might be read-only, because the new
1117  * mbuf can end up sharing an mbuf cluster with the original mbuf if
1118  * the "breaking point" happens to lie within a cluster mbuf. Use the
1119  * M_WRITABLE() macro to check for this case.
1120  */
1121 struct mbuf *
1122 m_split(struct mbuf *m0, int len0, int wait)
1123 {
1124 	struct mbuf *m, *n;
1125 	u_int len = len0, remain;
1126 
1127 	MBUF_CHECKSLEEP(wait);
1128 	for (m = m0; m && len > m->m_len; m = m->m_next)
1129 		len -= m->m_len;
1130 	if (m == NULL)
1131 		return (NULL);
1132 	remain = m->m_len - len;
1133 	if (m0->m_flags & M_PKTHDR) {
1134 		MGETHDR(n, wait, m0->m_type);
1135 		if (n == NULL)
1136 			return (NULL);
1137 		n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1138 		n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1139 		m0->m_pkthdr.len = len0;
1140 		if (m->m_flags & M_EXT)
1141 			goto extpacket;
1142 		if (remain > MHLEN) {
1143 			/* m can't be the lead packet */
1144 			MH_ALIGN(n, 0);
1145 			n->m_next = m_split(m, len, wait);
1146 			if (n->m_next == NULL) {
1147 				(void) m_free(n);
1148 				return (NULL);
1149 			} else {
1150 				n->m_len = 0;
1151 				return (n);
1152 			}
1153 		} else
1154 			MH_ALIGN(n, remain);
1155 	} else if (remain == 0) {
1156 		n = m->m_next;
1157 		m->m_next = NULL;
1158 		return (n);
1159 	} else {
1160 		MGET(n, wait, m->m_type);
1161 		if (n == NULL)
1162 			return (NULL);
1163 		M_ALIGN(n, remain);
1164 	}
1165 extpacket:
1166 	if (m->m_flags & M_EXT) {
1167 		n->m_data = m->m_data + len;
1168 		mb_dupcl(n, m);
1169 	} else {
1170 		bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1171 	}
1172 	n->m_len = remain;
1173 	m->m_len = len;
1174 	n->m_next = m->m_next;
1175 	m->m_next = NULL;
1176 	return (n);
1177 }
1178 /*
1179  * Routine to copy from device local memory into mbufs.
1180  * Note that `off' argument is offset into first mbuf of target chain from
1181  * which to begin copying the data to.
1182  */
1183 struct mbuf *
1184 m_devget(char *buf, int totlen, int off, struct ifnet *ifp,
1185     void (*copy)(char *from, caddr_t to, u_int len))
1186 {
1187 	struct mbuf *m;
1188 	struct mbuf *top = NULL, **mp = &top;
1189 	int len;
1190 
1191 	if (off < 0 || off > MHLEN)
1192 		return (NULL);
1193 
1194 	while (totlen > 0) {
1195 		if (top == NULL) {	/* First one, must be PKTHDR */
1196 			if (totlen + off >= MINCLSIZE) {
1197 				m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1198 				len = MCLBYTES;
1199 			} else {
1200 				m = m_gethdr(M_DONTWAIT, MT_DATA);
1201 				len = MHLEN;
1202 
1203 				/* Place initial small packet/header at end of mbuf */
1204 				if (m && totlen + off + max_linkhdr <= MLEN) {
1205 					m->m_data += max_linkhdr;
1206 					len -= max_linkhdr;
1207 				}
1208 			}
1209 			if (m == NULL)
1210 				return NULL;
1211 			m->m_pkthdr.rcvif = ifp;
1212 			m->m_pkthdr.len = totlen;
1213 		} else {
1214 			if (totlen + off >= MINCLSIZE) {
1215 				m = m_getcl(M_DONTWAIT, MT_DATA, 0);
1216 				len = MCLBYTES;
1217 			} else {
1218 				m = m_get(M_DONTWAIT, MT_DATA);
1219 				len = MLEN;
1220 			}
1221 			if (m == NULL) {
1222 				m_freem(top);
1223 				return NULL;
1224 			}
1225 		}
1226 		if (off) {
1227 			m->m_data += off;
1228 			len -= off;
1229 			off = 0;
1230 		}
1231 		m->m_len = len = min(totlen, len);
1232 		if (copy)
1233 			copy(buf, mtod(m, caddr_t), (u_int)len);
1234 		else
1235 			bcopy(buf, mtod(m, caddr_t), (u_int)len);
1236 		buf += len;
1237 		*mp = m;
1238 		mp = &m->m_next;
1239 		totlen -= len;
1240 	}
1241 	return (top);
1242 }
1243 
1244 /*
1245  * Copy data from a buffer back into the indicated mbuf chain,
1246  * starting "off" bytes from the beginning, extending the mbuf
1247  * chain if necessary.
1248  */
1249 void
1250 m_copyback(struct mbuf *m0, int off, int len, c_caddr_t cp)
1251 {
1252 	int mlen;
1253 	struct mbuf *m = m0, *n;
1254 	int totlen = 0;
1255 
1256 	if (m0 == NULL)
1257 		return;
1258 	while (off > (mlen = m->m_len)) {
1259 		off -= mlen;
1260 		totlen += mlen;
1261 		if (m->m_next == NULL) {
1262 			n = m_get(M_DONTWAIT, m->m_type);
1263 			if (n == NULL)
1264 				goto out;
1265 			bzero(mtod(n, caddr_t), MLEN);
1266 			n->m_len = min(MLEN, len + off);
1267 			m->m_next = n;
1268 		}
1269 		m = m->m_next;
1270 	}
1271 	while (len > 0) {
1272 		if (m->m_next == NULL && (len > m->m_len - off)) {
1273 			m->m_len += min(len - (m->m_len - off),
1274 			    M_TRAILINGSPACE(m));
1275 		}
1276 		mlen = min (m->m_len - off, len);
1277 		bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen);
1278 		cp += mlen;
1279 		len -= mlen;
1280 		mlen += off;
1281 		off = 0;
1282 		totlen += mlen;
1283 		if (len == 0)
1284 			break;
1285 		if (m->m_next == NULL) {
1286 			n = m_get(M_DONTWAIT, m->m_type);
1287 			if (n == NULL)
1288 				break;
1289 			n->m_len = min(MLEN, len);
1290 			m->m_next = n;
1291 		}
1292 		m = m->m_next;
1293 	}
1294 out:	if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1295 		m->m_pkthdr.len = totlen;
1296 }
1297 
1298 /*
1299  * Append the specified data to the indicated mbuf chain,
1300  * Extend the mbuf chain if the new data does not fit in
1301  * existing space.
1302  *
1303  * Return 1 if able to complete the job; otherwise 0.
1304  */
1305 int
1306 m_append(struct mbuf *m0, int len, c_caddr_t cp)
1307 {
1308 	struct mbuf *m, *n;
1309 	int remainder, space;
1310 
1311 	for (m = m0; m->m_next != NULL; m = m->m_next)
1312 		;
1313 	remainder = len;
1314 	space = M_TRAILINGSPACE(m);
1315 	if (space > 0) {
1316 		/*
1317 		 * Copy into available space.
1318 		 */
1319 		if (space > remainder)
1320 			space = remainder;
1321 		bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
1322 		m->m_len += space;
1323 		cp += space, remainder -= space;
1324 	}
1325 	while (remainder > 0) {
1326 		/*
1327 		 * Allocate a new mbuf; could check space
1328 		 * and allocate a cluster instead.
1329 		 */
1330 		n = m_get(M_DONTWAIT, m->m_type);
1331 		if (n == NULL)
1332 			break;
1333 		n->m_len = min(MLEN, remainder);
1334 		bcopy(cp, mtod(n, caddr_t), n->m_len);
1335 		cp += n->m_len, remainder -= n->m_len;
1336 		m->m_next = n;
1337 		m = n;
1338 	}
1339 	if (m0->m_flags & M_PKTHDR)
1340 		m0->m_pkthdr.len += len - remainder;
1341 	return (remainder == 0);
1342 }
1343 
1344 /*
1345  * Apply function f to the data in an mbuf chain starting "off" bytes from
1346  * the beginning, continuing for "len" bytes.
1347  */
1348 int
1349 m_apply(struct mbuf *m, int off, int len,
1350     int (*f)(void *, void *, u_int), void *arg)
1351 {
1352 	u_int count;
1353 	int rval;
1354 
1355 	KASSERT(off >= 0, ("m_apply, negative off %d", off));
1356 	KASSERT(len >= 0, ("m_apply, negative len %d", len));
1357 	while (off > 0) {
1358 		KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1359 		if (off < m->m_len)
1360 			break;
1361 		off -= m->m_len;
1362 		m = m->m_next;
1363 	}
1364 	while (len > 0) {
1365 		KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1366 		count = min(m->m_len - off, len);
1367 		rval = (*f)(arg, mtod(m, caddr_t) + off, count);
1368 		if (rval)
1369 			return (rval);
1370 		len -= count;
1371 		off = 0;
1372 		m = m->m_next;
1373 	}
1374 	return (0);
1375 }
1376 
1377 /*
1378  * Return a pointer to mbuf/offset of location in mbuf chain.
1379  */
1380 struct mbuf *
1381 m_getptr(struct mbuf *m, int loc, int *off)
1382 {
1383 
1384 	while (loc >= 0) {
1385 		/* Normal end of search. */
1386 		if (m->m_len > loc) {
1387 			*off = loc;
1388 			return (m);
1389 		} else {
1390 			loc -= m->m_len;
1391 			if (m->m_next == NULL) {
1392 				if (loc == 0) {
1393 					/* Point at the end of valid data. */
1394 					*off = m->m_len;
1395 					return (m);
1396 				}
1397 				return (NULL);
1398 			}
1399 			m = m->m_next;
1400 		}
1401 	}
1402 	return (NULL);
1403 }
1404 
1405 void
1406 m_print(const struct mbuf *m, int maxlen)
1407 {
1408 	int len;
1409 	int pdata;
1410 	const struct mbuf *m2;
1411 
1412 	if (m->m_flags & M_PKTHDR)
1413 		len = m->m_pkthdr.len;
1414 	else
1415 		len = -1;
1416 	m2 = m;
1417 	while (m2 != NULL && (len == -1 || len)) {
1418 		pdata = m2->m_len;
1419 		if (maxlen != -1 && pdata > maxlen)
1420 			pdata = maxlen;
1421 		printf("mbuf: %p len: %d, next: %p, %b%s", m2, m2->m_len,
1422 		    m2->m_next, m2->m_flags, "\20\20freelist\17skipfw"
1423 		    "\11proto5\10proto4\7proto3\6proto2\5proto1\4rdonly"
1424 		    "\3eor\2pkthdr\1ext", pdata ? "" : "\n");
1425 		if (pdata)
1426 			printf(", %*D\n", pdata, (u_char *)m2->m_data, "-");
1427 		if (len != -1)
1428 			len -= m2->m_len;
1429 		m2 = m2->m_next;
1430 	}
1431 	if (len > 0)
1432 		printf("%d bytes unaccounted for.\n", len);
1433 	return;
1434 }
1435 
1436 u_int
1437 m_fixhdr(struct mbuf *m0)
1438 {
1439 	u_int len;
1440 
1441 	len = m_length(m0, NULL);
1442 	m0->m_pkthdr.len = len;
1443 	return (len);
1444 }
1445 
1446 u_int
1447 m_length(struct mbuf *m0, struct mbuf **last)
1448 {
1449 	struct mbuf *m;
1450 	u_int len;
1451 
1452 	len = 0;
1453 	for (m = m0; m != NULL; m = m->m_next) {
1454 		len += m->m_len;
1455 		if (m->m_next == NULL)
1456 			break;
1457 	}
1458 	if (last != NULL)
1459 		*last = m;
1460 	return (len);
1461 }
1462 
1463 /*
1464  * Defragment a mbuf chain, returning the shortest possible
1465  * chain of mbufs and clusters.  If allocation fails and
1466  * this cannot be completed, NULL will be returned, but
1467  * the passed in chain will be unchanged.  Upon success,
1468  * the original chain will be freed, and the new chain
1469  * will be returned.
1470  *
1471  * If a non-packet header is passed in, the original
1472  * mbuf (chain?) will be returned unharmed.
1473  */
1474 struct mbuf *
1475 m_defrag(struct mbuf *m0, int how)
1476 {
1477 	struct mbuf *m_new = NULL, *m_final = NULL;
1478 	int progress = 0, length;
1479 
1480 	MBUF_CHECKSLEEP(how);
1481 	if (!(m0->m_flags & M_PKTHDR))
1482 		return (m0);
1483 
1484 	m_fixhdr(m0); /* Needed sanity check */
1485 
1486 #ifdef MBUF_STRESS_TEST
1487 	if (m_defragrandomfailures) {
1488 		int temp = arc4random() & 0xff;
1489 		if (temp == 0xba)
1490 			goto nospace;
1491 	}
1492 #endif
1493 
1494 	if (m0->m_pkthdr.len > MHLEN)
1495 		m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1496 	else
1497 		m_final = m_gethdr(how, MT_DATA);
1498 
1499 	if (m_final == NULL)
1500 		goto nospace;
1501 
1502 	if (m_dup_pkthdr(m_final, m0, how) == 0)
1503 		goto nospace;
1504 
1505 	m_new = m_final;
1506 
1507 	while (progress < m0->m_pkthdr.len) {
1508 		length = m0->m_pkthdr.len - progress;
1509 		if (length > MCLBYTES)
1510 			length = MCLBYTES;
1511 
1512 		if (m_new == NULL) {
1513 			if (length > MLEN)
1514 				m_new = m_getcl(how, MT_DATA, 0);
1515 			else
1516 				m_new = m_get(how, MT_DATA);
1517 			if (m_new == NULL)
1518 				goto nospace;
1519 		}
1520 
1521 		m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1522 		progress += length;
1523 		m_new->m_len = length;
1524 		if (m_new != m_final)
1525 			m_cat(m_final, m_new);
1526 		m_new = NULL;
1527 	}
1528 #ifdef MBUF_STRESS_TEST
1529 	if (m0->m_next == NULL)
1530 		m_defraguseless++;
1531 #endif
1532 	m_freem(m0);
1533 	m0 = m_final;
1534 #ifdef MBUF_STRESS_TEST
1535 	m_defragpackets++;
1536 	m_defragbytes += m0->m_pkthdr.len;
1537 #endif
1538 	return (m0);
1539 nospace:
1540 #ifdef MBUF_STRESS_TEST
1541 	m_defragfailure++;
1542 #endif
1543 	if (m_final)
1544 		m_freem(m_final);
1545 	return (NULL);
1546 }
1547 
1548 /*
1549  * Defragment an mbuf chain, returning at most maxfrags separate
1550  * mbufs+clusters.  If this is not possible NULL is returned and
1551  * the original mbuf chain is left in it's present (potentially
1552  * modified) state.  We use two techniques: collapsing consecutive
1553  * mbufs and replacing consecutive mbufs by a cluster.
1554  *
1555  * NB: this should really be named m_defrag but that name is taken
1556  */
1557 struct mbuf *
1558 m_collapse(struct mbuf *m0, int how, int maxfrags)
1559 {
1560 	struct mbuf *m, *n, *n2, **prev;
1561 	u_int curfrags;
1562 
1563 	/*
1564 	 * Calculate the current number of frags.
1565 	 */
1566 	curfrags = 0;
1567 	for (m = m0; m != NULL; m = m->m_next)
1568 		curfrags++;
1569 	/*
1570 	 * First, try to collapse mbufs.  Note that we always collapse
1571 	 * towards the front so we don't need to deal with moving the
1572 	 * pkthdr.  This may be suboptimal if the first mbuf has much
1573 	 * less data than the following.
1574 	 */
1575 	m = m0;
1576 again:
1577 	for (;;) {
1578 		n = m->m_next;
1579 		if (n == NULL)
1580 			break;
1581 		if ((m->m_flags & M_RDONLY) == 0 &&
1582 		    n->m_len < M_TRAILINGSPACE(m)) {
1583 			bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
1584 				n->m_len);
1585 			m->m_len += n->m_len;
1586 			m->m_next = n->m_next;
1587 			m_free(n);
1588 			if (--curfrags <= maxfrags)
1589 				return m0;
1590 		} else
1591 			m = n;
1592 	}
1593 	KASSERT(maxfrags > 1,
1594 		("maxfrags %u, but normal collapse failed", maxfrags));
1595 	/*
1596 	 * Collapse consecutive mbufs to a cluster.
1597 	 */
1598 	prev = &m0->m_next;		/* NB: not the first mbuf */
1599 	while ((n = *prev) != NULL) {
1600 		if ((n2 = n->m_next) != NULL &&
1601 		    n->m_len + n2->m_len < MCLBYTES) {
1602 			m = m_getcl(how, MT_DATA, 0);
1603 			if (m == NULL)
1604 				goto bad;
1605 			bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
1606 			bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
1607 				n2->m_len);
1608 			m->m_len = n->m_len + n2->m_len;
1609 			m->m_next = n2->m_next;
1610 			*prev = m;
1611 			m_free(n);
1612 			m_free(n2);
1613 			if (--curfrags <= maxfrags)	/* +1 cl -2 mbufs */
1614 				return m0;
1615 			/*
1616 			 * Still not there, try the normal collapse
1617 			 * again before we allocate another cluster.
1618 			 */
1619 			goto again;
1620 		}
1621 		prev = &n->m_next;
1622 	}
1623 	/*
1624 	 * No place where we can collapse to a cluster; punt.
1625 	 * This can occur if, for example, you request 2 frags
1626 	 * but the packet requires that both be clusters (we
1627 	 * never reallocate the first mbuf to avoid moving the
1628 	 * packet header).
1629 	 */
1630 bad:
1631 	return NULL;
1632 }
1633 
1634 #ifdef MBUF_STRESS_TEST
1635 
1636 /*
1637  * Fragment an mbuf chain.  There's no reason you'd ever want to do
1638  * this in normal usage, but it's great for stress testing various
1639  * mbuf consumers.
1640  *
1641  * If fragmentation is not possible, the original chain will be
1642  * returned.
1643  *
1644  * Possible length values:
1645  * 0	 no fragmentation will occur
1646  * > 0	each fragment will be of the specified length
1647  * -1	each fragment will be the same random value in length
1648  * -2	each fragment's length will be entirely random
1649  * (Random values range from 1 to 256)
1650  */
1651 struct mbuf *
1652 m_fragment(struct mbuf *m0, int how, int length)
1653 {
1654 	struct mbuf *m_new = NULL, *m_final = NULL;
1655 	int progress = 0;
1656 
1657 	if (!(m0->m_flags & M_PKTHDR))
1658 		return (m0);
1659 
1660 	if ((length == 0) || (length < -2))
1661 		return (m0);
1662 
1663 	m_fixhdr(m0); /* Needed sanity check */
1664 
1665 	m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1666 
1667 	if (m_final == NULL)
1668 		goto nospace;
1669 
1670 	if (m_dup_pkthdr(m_final, m0, how) == 0)
1671 		goto nospace;
1672 
1673 	m_new = m_final;
1674 
1675 	if (length == -1)
1676 		length = 1 + (arc4random() & 255);
1677 
1678 	while (progress < m0->m_pkthdr.len) {
1679 		int fraglen;
1680 
1681 		if (length > 0)
1682 			fraglen = length;
1683 		else
1684 			fraglen = 1 + (arc4random() & 255);
1685 		if (fraglen > m0->m_pkthdr.len - progress)
1686 			fraglen = m0->m_pkthdr.len - progress;
1687 
1688 		if (fraglen > MCLBYTES)
1689 			fraglen = MCLBYTES;
1690 
1691 		if (m_new == NULL) {
1692 			m_new = m_getcl(how, MT_DATA, 0);
1693 			if (m_new == NULL)
1694 				goto nospace;
1695 		}
1696 
1697 		m_copydata(m0, progress, fraglen, mtod(m_new, caddr_t));
1698 		progress += fraglen;
1699 		m_new->m_len = fraglen;
1700 		if (m_new != m_final)
1701 			m_cat(m_final, m_new);
1702 		m_new = NULL;
1703 	}
1704 	m_freem(m0);
1705 	m0 = m_final;
1706 	return (m0);
1707 nospace:
1708 	if (m_final)
1709 		m_freem(m_final);
1710 	/* Return the original chain on failure */
1711 	return (m0);
1712 }
1713 
1714 #endif
1715 
1716 /*
1717  * Copy the contents of uio into a properly sized mbuf chain.
1718  */
1719 struct mbuf *
1720 m_uiotombuf(struct uio *uio, int how, int len, int align, int flags)
1721 {
1722 	struct mbuf *m, *mb;
1723 	int error, length, total;
1724 	int progress = 0;
1725 
1726 	/*
1727 	 * len can be zero or an arbitrary large value bound by
1728 	 * the total data supplied by the uio.
1729 	 */
1730 	if (len > 0)
1731 		total = min(uio->uio_resid, len);
1732 	else
1733 		total = uio->uio_resid;
1734 
1735 	/*
1736 	 * The smallest unit returned by m_getm2() is a single mbuf
1737 	 * with pkthdr.  We can't align past it.
1738 	 */
1739 	if (align >= MHLEN)
1740 		return (NULL);
1741 
1742 	/*
1743 	 * Give us the full allocation or nothing.
1744 	 * If len is zero return the smallest empty mbuf.
1745 	 */
1746 	m = m_getm2(NULL, max(total + align, 1), how, MT_DATA, flags);
1747 	if (m == NULL)
1748 		return (NULL);
1749 	m->m_data += align;
1750 
1751 	/* Fill all mbufs with uio data and update header information. */
1752 	for (mb = m; mb != NULL; mb = mb->m_next) {
1753 		length = min(M_TRAILINGSPACE(mb), total - progress);
1754 
1755 		error = uiomove(mtod(mb, void *), length, uio);
1756 		if (error) {
1757 			m_freem(m);
1758 			return (NULL);
1759 		}
1760 
1761 		mb->m_len = length;
1762 		progress += length;
1763 		if (flags & M_PKTHDR)
1764 			m->m_pkthdr.len += length;
1765 	}
1766 	KASSERT(progress == total, ("%s: progress != total", __func__));
1767 
1768 	return (m);
1769 }
1770 
1771 /*
1772  * Copy an mbuf chain into a uio limited by len if set.
1773  */
1774 int
1775 m_mbuftouio(struct uio *uio, struct mbuf *m, int len)
1776 {
1777 	int error, length, total;
1778 	int progress = 0;
1779 
1780 	if (len > 0)
1781 		total = min(uio->uio_resid, len);
1782 	else
1783 		total = uio->uio_resid;
1784 
1785 	/* Fill the uio with data from the mbufs. */
1786 	for (; m != NULL; m = m->m_next) {
1787 		length = min(m->m_len, total - progress);
1788 
1789 		error = uiomove(mtod(m, void *), length, uio);
1790 		if (error)
1791 			return (error);
1792 
1793 		progress += length;
1794 	}
1795 
1796 	return (0);
1797 }
1798 
1799 /*
1800  * Set the m_data pointer of a newly-allocated mbuf
1801  * to place an object of the specified size at the
1802  * end of the mbuf, longword aligned.
1803  */
1804 void
1805 m_align(struct mbuf *m, int len)
1806 {
1807 	int adjust;
1808 
1809 	if (m->m_flags & M_EXT)
1810 		adjust = m->m_ext.ext_size - len;
1811 	else if (m->m_flags & M_PKTHDR)
1812 		adjust = MHLEN - len;
1813 	else
1814 		adjust = MLEN - len;
1815 	m->m_data += adjust &~ (sizeof(long)-1);
1816 }
1817 
1818 /*
1819  * Create a writable copy of the mbuf chain.  While doing this
1820  * we compact the chain with a goal of producing a chain with
1821  * at most two mbufs.  The second mbuf in this chain is likely
1822  * to be a cluster.  The primary purpose of this work is to create
1823  * a writable packet for encryption, compression, etc.  The
1824  * secondary goal is to linearize the data so the data can be
1825  * passed to crypto hardware in the most efficient manner possible.
1826  */
1827 struct mbuf *
1828 m_unshare(struct mbuf *m0, int how)
1829 {
1830 	struct mbuf *m, *mprev;
1831 	struct mbuf *n, *mfirst, *mlast;
1832 	int len, off;
1833 
1834 	mprev = NULL;
1835 	for (m = m0; m != NULL; m = mprev->m_next) {
1836 		/*
1837 		 * Regular mbufs are ignored unless there's a cluster
1838 		 * in front of it that we can use to coalesce.  We do
1839 		 * the latter mainly so later clusters can be coalesced
1840 		 * also w/o having to handle them specially (i.e. convert
1841 		 * mbuf+cluster -> cluster).  This optimization is heavily
1842 		 * influenced by the assumption that we're running over
1843 		 * Ethernet where MCLBYTES is large enough that the max
1844 		 * packet size will permit lots of coalescing into a
1845 		 * single cluster.  This in turn permits efficient
1846 		 * crypto operations, especially when using hardware.
1847 		 */
1848 		if ((m->m_flags & M_EXT) == 0) {
1849 			if (mprev && (mprev->m_flags & M_EXT) &&
1850 			    m->m_len <= M_TRAILINGSPACE(mprev)) {
1851 				/* XXX: this ignores mbuf types */
1852 				memcpy(mtod(mprev, caddr_t) + mprev->m_len,
1853 				       mtod(m, caddr_t), m->m_len);
1854 				mprev->m_len += m->m_len;
1855 				mprev->m_next = m->m_next;	/* unlink from chain */
1856 				m_free(m);			/* reclaim mbuf */
1857 #if 0
1858 				newipsecstat.ips_mbcoalesced++;
1859 #endif
1860 			} else {
1861 				mprev = m;
1862 			}
1863 			continue;
1864 		}
1865 		/*
1866 		 * Writable mbufs are left alone (for now).
1867 		 */
1868 		if (M_WRITABLE(m)) {
1869 			mprev = m;
1870 			continue;
1871 		}
1872 
1873 		/*
1874 		 * Not writable, replace with a copy or coalesce with
1875 		 * the previous mbuf if possible (since we have to copy
1876 		 * it anyway, we try to reduce the number of mbufs and
1877 		 * clusters so that future work is easier).
1878 		 */
1879 		KASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags));
1880 		/* NB: we only coalesce into a cluster or larger */
1881 		if (mprev != NULL && (mprev->m_flags & M_EXT) &&
1882 		    m->m_len <= M_TRAILINGSPACE(mprev)) {
1883 			/* XXX: this ignores mbuf types */
1884 			memcpy(mtod(mprev, caddr_t) + mprev->m_len,
1885 			       mtod(m, caddr_t), m->m_len);
1886 			mprev->m_len += m->m_len;
1887 			mprev->m_next = m->m_next;	/* unlink from chain */
1888 			m_free(m);			/* reclaim mbuf */
1889 #if 0
1890 			newipsecstat.ips_clcoalesced++;
1891 #endif
1892 			continue;
1893 		}
1894 
1895 		/*
1896 		 * Allocate new space to hold the copy...
1897 		 */
1898 		/* XXX why can M_PKTHDR be set past the first mbuf? */
1899 		if (mprev == NULL && (m->m_flags & M_PKTHDR)) {
1900 			/*
1901 			 * NB: if a packet header is present we must
1902 			 * allocate the mbuf separately from any cluster
1903 			 * because M_MOVE_PKTHDR will smash the data
1904 			 * pointer and drop the M_EXT marker.
1905 			 */
1906 			MGETHDR(n, how, m->m_type);
1907 			if (n == NULL) {
1908 				m_freem(m0);
1909 				return (NULL);
1910 			}
1911 			M_MOVE_PKTHDR(n, m);
1912 			MCLGET(n, how);
1913 			if ((n->m_flags & M_EXT) == 0) {
1914 				m_free(n);
1915 				m_freem(m0);
1916 				return (NULL);
1917 			}
1918 		} else {
1919 			n = m_getcl(how, m->m_type, m->m_flags);
1920 			if (n == NULL) {
1921 				m_freem(m0);
1922 				return (NULL);
1923 			}
1924 		}
1925 		/*
1926 		 * ... and copy the data.  We deal with jumbo mbufs
1927 		 * (i.e. m_len > MCLBYTES) by splitting them into
1928 		 * clusters.  We could just malloc a buffer and make
1929 		 * it external but too many device drivers don't know
1930 		 * how to break up the non-contiguous memory when
1931 		 * doing DMA.
1932 		 */
1933 		len = m->m_len;
1934 		off = 0;
1935 		mfirst = n;
1936 		mlast = NULL;
1937 		for (;;) {
1938 			int cc = min(len, MCLBYTES);
1939 			memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, cc);
1940 			n->m_len = cc;
1941 			if (mlast != NULL)
1942 				mlast->m_next = n;
1943 			mlast = n;
1944 #if 0
1945 			newipsecstat.ips_clcopied++;
1946 #endif
1947 
1948 			len -= cc;
1949 			if (len <= 0)
1950 				break;
1951 			off += cc;
1952 
1953 			n = m_getcl(how, m->m_type, m->m_flags);
1954 			if (n == NULL) {
1955 				m_freem(mfirst);
1956 				m_freem(m0);
1957 				return (NULL);
1958 			}
1959 		}
1960 		n->m_next = m->m_next;
1961 		if (mprev == NULL)
1962 			m0 = mfirst;		/* new head of chain */
1963 		else
1964 			mprev->m_next = mfirst;	/* replace old mbuf */
1965 		m_free(m);			/* release old mbuf */
1966 		mprev = mfirst;
1967 	}
1968 	return (m0);
1969 }
1970 
1971 #ifdef MBUF_PROFILING
1972 
1973 #define MP_BUCKETS 32 /* don't just change this as things may overflow.*/
1974 struct mbufprofile {
1975 	uintmax_t wasted[MP_BUCKETS];
1976 	uintmax_t used[MP_BUCKETS];
1977 	uintmax_t segments[MP_BUCKETS];
1978 } mbprof;
1979 
1980 #define MP_MAXDIGITS 21	/* strlen("16,000,000,000,000,000,000") == 21 */
1981 #define MP_NUMLINES 6
1982 #define MP_NUMSPERLINE 16
1983 #define MP_EXTRABYTES 64	/* > strlen("used:\nwasted:\nsegments:\n") */
1984 /* work out max space needed and add a bit of spare space too */
1985 #define MP_MAXLINE ((MP_MAXDIGITS+1) * MP_NUMSPERLINE)
1986 #define MP_BUFSIZE ((MP_MAXLINE * MP_NUMLINES) + 1 + MP_EXTRABYTES)
1987 
1988 char mbprofbuf[MP_BUFSIZE];
1989 
1990 void
1991 m_profile(struct mbuf *m)
1992 {
1993 	int segments = 0;
1994 	int used = 0;
1995 	int wasted = 0;
1996 
1997 	while (m) {
1998 		segments++;
1999 		used += m->m_len;
2000 		if (m->m_flags & M_EXT) {
2001 			wasted += MHLEN - sizeof(m->m_ext) +
2002 			    m->m_ext.ext_size - m->m_len;
2003 		} else {
2004 			if (m->m_flags & M_PKTHDR)
2005 				wasted += MHLEN - m->m_len;
2006 			else
2007 				wasted += MLEN - m->m_len;
2008 		}
2009 		m = m->m_next;
2010 	}
2011 	/* be paranoid.. it helps */
2012 	if (segments > MP_BUCKETS - 1)
2013 		segments = MP_BUCKETS - 1;
2014 	if (used > 100000)
2015 		used = 100000;
2016 	if (wasted > 100000)
2017 		wasted = 100000;
2018 	/* store in the appropriate bucket */
2019 	/* don't bother locking. if it's slightly off, so what? */
2020 	mbprof.segments[segments]++;
2021 	mbprof.used[fls(used)]++;
2022 	mbprof.wasted[fls(wasted)]++;
2023 }
2024 
2025 static void
2026 mbprof_textify(void)
2027 {
2028 	int offset;
2029 	char *c;
2030 	u_int64_t *p;
2031 
2032 
2033 	p = &mbprof.wasted[0];
2034 	c = mbprofbuf;
2035 	offset = snprintf(c, MP_MAXLINE + 10,
2036 	    "wasted:\n"
2037 	    "%ju %ju %ju %ju %ju %ju %ju %ju "
2038 	    "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2039 	    p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2040 	    p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2041 #ifdef BIG_ARRAY
2042 	p = &mbprof.wasted[16];
2043 	c += offset;
2044 	offset = snprintf(c, MP_MAXLINE,
2045 	    "%ju %ju %ju %ju %ju %ju %ju %ju "
2046 	    "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2047 	    p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2048 	    p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2049 #endif
2050 	p = &mbprof.used[0];
2051 	c += offset;
2052 	offset = snprintf(c, MP_MAXLINE + 10,
2053 	    "used:\n"
2054 	    "%ju %ju %ju %ju %ju %ju %ju %ju "
2055 	    "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2056 	    p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2057 	    p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2058 #ifdef BIG_ARRAY
2059 	p = &mbprof.used[16];
2060 	c += offset;
2061 	offset = snprintf(c, MP_MAXLINE,
2062 	    "%ju %ju %ju %ju %ju %ju %ju %ju "
2063 	    "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2064 	    p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2065 	    p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2066 #endif
2067 	p = &mbprof.segments[0];
2068 	c += offset;
2069 	offset = snprintf(c, MP_MAXLINE + 10,
2070 	    "segments:\n"
2071 	    "%ju %ju %ju %ju %ju %ju %ju %ju "
2072 	    "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2073 	    p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2074 	    p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2075 #ifdef BIG_ARRAY
2076 	p = &mbprof.segments[16];
2077 	c += offset;
2078 	offset = snprintf(c, MP_MAXLINE,
2079 	    "%ju %ju %ju %ju %ju %ju %ju %ju "
2080 	    "%ju %ju %ju %ju %ju %ju %ju %jju",
2081 	    p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2082 	    p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2083 #endif
2084 }
2085 
2086 static int
2087 mbprof_handler(SYSCTL_HANDLER_ARGS)
2088 {
2089 	int error;
2090 
2091 	mbprof_textify();
2092 	error = SYSCTL_OUT(req, mbprofbuf, strlen(mbprofbuf) + 1);
2093 	return (error);
2094 }
2095 
2096 static int
2097 mbprof_clr_handler(SYSCTL_HANDLER_ARGS)
2098 {
2099 	int clear, error;
2100 
2101 	clear = 0;
2102 	error = sysctl_handle_int(oidp, &clear, 0, req);
2103 	if (error || !req->newptr)
2104 		return (error);
2105 
2106 	if (clear) {
2107 		bzero(&mbprof, sizeof(mbprof));
2108 	}
2109 
2110 	return (error);
2111 }
2112 
2113 
2114 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofile, CTLTYPE_STRING|CTLFLAG_RD,
2115 	    NULL, 0, mbprof_handler, "A", "mbuf profiling statistics");
2116 
2117 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofileclr, CTLTYPE_INT|CTLFLAG_RW,
2118 	    NULL, 0, mbprof_clr_handler, "I", "clear mbuf profiling statistics");
2119 #endif
2120 
2121