xref: /dragonfly/sys/kern/uipc_mbuf.c (revision ce0e08e2)
1 /*
2  * Copyright (c) 2004 Jeffrey M. Hsu.  All rights reserved.
3  * Copyright (c) 2004 The DragonFly Project.  All rights reserved.
4  *
5  * This code is derived from software contributed to The DragonFly Project
6  * by Jeffrey M. Hsu.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of The DragonFly Project nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific, prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  */
33 
34 /*
35  * Copyright (c) 1982, 1986, 1988, 1991, 1993
36  *	The Regents of the University of California.  All rights reserved.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  * 3. All advertising materials mentioning features or use of this software
47  *    must display the following acknowledgement:
48  *	This product includes software developed by the University of
49  *	California, Berkeley and its contributors.
50  * 4. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  *
66  * @(#)uipc_mbuf.c	8.2 (Berkeley) 1/4/94
67  * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $
68  * $DragonFly: src/sys/kern/uipc_mbuf.c,v 1.70 2008/11/20 14:21:01 sephe Exp $
69  */
70 
71 #include "opt_param.h"
72 #include "opt_ddb.h"
73 #include "opt_mbuf_stress_test.h"
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/malloc.h>
77 #include <sys/mbuf.h>
78 #include <sys/kernel.h>
79 #include <sys/sysctl.h>
80 #include <sys/domain.h>
81 #include <sys/objcache.h>
82 #include <sys/tree.h>
83 #include <sys/protosw.h>
84 #include <sys/uio.h>
85 #include <sys/thread.h>
86 #include <sys/globaldata.h>
87 #include <sys/thread2.h>
88 
89 #include <machine/atomic.h>
90 
91 #include <vm/vm.h>
92 #include <vm/vm_kern.h>
93 #include <vm/vm_extern.h>
94 
95 #ifdef INVARIANTS
96 #include <machine/cpu.h>
97 #endif
98 
99 /*
100  * mbuf cluster meta-data
101  */
102 struct mbcluster {
103 	int32_t	mcl_refs;
104 	void	*mcl_data;
105 };
106 
107 /*
108  * mbuf tracking for debugging purposes
109  */
110 #ifdef MBUF_DEBUG
111 
112 static MALLOC_DEFINE(M_MTRACK, "mtrack", "mtrack");
113 
114 struct mbctrack;
115 RB_HEAD(mbuf_rb_tree, mbtrack);
116 RB_PROTOTYPE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *);
117 
118 struct mbtrack {
119 	RB_ENTRY(mbtrack) rb_node;
120 	int trackid;
121 	struct mbuf *m;
122 };
123 
124 static int
125 mbtrack_cmp(struct mbtrack *mb1, struct mbtrack *mb2)
126 {
127 	if (mb1->m < mb2->m)
128 		return(-1);
129 	if (mb1->m > mb2->m)
130 		return(1);
131 	return(0);
132 }
133 
134 RB_GENERATE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *, m);
135 
136 struct mbuf_rb_tree	mbuf_track_root;
137 
138 static void
139 mbuftrack(struct mbuf *m)
140 {
141 	struct mbtrack *mbt;
142 
143 	crit_enter();
144 	mbt = kmalloc(sizeof(*mbt), M_MTRACK, M_INTWAIT|M_ZERO);
145 	mbt->m = m;
146 	if (mbuf_rb_tree_RB_INSERT(&mbuf_track_root, mbt))
147 		panic("mbuftrack: mbuf %p already being tracked\n", m);
148 	crit_exit();
149 }
150 
151 static void
152 mbufuntrack(struct mbuf *m)
153 {
154 	struct mbtrack *mbt;
155 
156 	crit_enter();
157 	mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m);
158 	if (mbt == NULL) {
159 		kprintf("mbufuntrack: mbuf %p was not tracked\n", m);
160 	} else {
161 		mbuf_rb_tree_RB_REMOVE(&mbuf_track_root, mbt);
162 		kfree(mbt, M_MTRACK);
163 	}
164 	crit_exit();
165 }
166 
167 void
168 mbuftrackid(struct mbuf *m, int trackid)
169 {
170 	struct mbtrack *mbt;
171 	struct mbuf *n;
172 
173 	crit_enter();
174 	while (m) {
175 		n = m->m_nextpkt;
176 		while (m) {
177 			mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m);
178 			if (mbt)
179 				mbt->trackid = trackid;
180 			m = m->m_next;
181 		}
182 		m = n;
183 	}
184 	crit_exit();
185 }
186 
187 static int
188 mbuftrack_callback(struct mbtrack *mbt, void *arg)
189 {
190 	struct sysctl_req *req = arg;
191 	char buf[64];
192 	int error;
193 
194 	ksnprintf(buf, sizeof(buf), "mbuf %p track %d\n", mbt->m, mbt->trackid);
195 
196 	error = SYSCTL_OUT(req, buf, strlen(buf));
197 	if (error)
198 		return(-error);
199 	return(0);
200 }
201 
202 static int
203 mbuftrack_show(SYSCTL_HANDLER_ARGS)
204 {
205 	int error;
206 
207 	crit_enter();
208 	error = mbuf_rb_tree_RB_SCAN(&mbuf_track_root, NULL,
209 				     mbuftrack_callback, req);
210 	crit_exit();
211 	return (-error);
212 }
213 SYSCTL_PROC(_kern_ipc, OID_AUTO, showmbufs, CTLFLAG_RD|CTLTYPE_STRING,
214 	    0, 0, mbuftrack_show, "A", "Show all in-use mbufs");
215 
216 #else
217 
218 #define mbuftrack(m)
219 #define mbufuntrack(m)
220 
221 #endif
222 
223 static void mbinit(void *);
224 SYSINIT(mbuf, SI_BOOT2_MACHDEP, SI_ORDER_FIRST, mbinit, NULL)
225 
226 static u_long	mbtypes[SMP_MAXCPU][MT_NTYPES];
227 
228 static struct mbstat mbstat[SMP_MAXCPU];
229 int	max_linkhdr;
230 int	max_protohdr;
231 int	max_hdr;
232 int	max_datalen;
233 int	m_defragpackets;
234 int	m_defragbytes;
235 int	m_defraguseless;
236 int	m_defragfailure;
237 #ifdef MBUF_STRESS_TEST
238 int	m_defragrandomfailures;
239 #endif
240 
241 struct objcache *mbuf_cache, *mbufphdr_cache;
242 struct objcache *mclmeta_cache;
243 struct objcache *mbufcluster_cache, *mbufphdrcluster_cache;
244 
245 int	nmbclusters;
246 int	nmbufs;
247 
248 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
249 	   &max_linkhdr, 0, "");
250 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
251 	   &max_protohdr, 0, "");
252 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, "");
253 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
254 	   &max_datalen, 0, "");
255 SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW,
256 	   &mbuf_wait, 0, "");
257 static int do_mbstat(SYSCTL_HANDLER_ARGS);
258 
259 SYSCTL_PROC(_kern_ipc, KIPC_MBSTAT, mbstat, CTLTYPE_STRUCT|CTLFLAG_RD,
260 	0, 0, do_mbstat, "S,mbstat", "");
261 
262 static int do_mbtypes(SYSCTL_HANDLER_ARGS);
263 
264 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbtypes, CTLTYPE_ULONG|CTLFLAG_RD,
265 	0, 0, do_mbtypes, "LU", "");
266 
267 static int
268 do_mbstat(SYSCTL_HANDLER_ARGS)
269 {
270 	struct mbstat mbstat_total;
271 	struct mbstat *mbstat_totalp;
272 	int i;
273 
274 	bzero(&mbstat_total, sizeof(mbstat_total));
275 	mbstat_totalp = &mbstat_total;
276 
277 	for (i = 0; i < ncpus; i++)
278 	{
279 		mbstat_total.m_mbufs += mbstat[i].m_mbufs;
280 		mbstat_total.m_clusters += mbstat[i].m_clusters;
281 		mbstat_total.m_spare += mbstat[i].m_spare;
282 		mbstat_total.m_clfree += mbstat[i].m_clfree;
283 		mbstat_total.m_drops += mbstat[i].m_drops;
284 		mbstat_total.m_wait += mbstat[i].m_wait;
285 		mbstat_total.m_drain += mbstat[i].m_drain;
286 		mbstat_total.m_mcfail += mbstat[i].m_mcfail;
287 		mbstat_total.m_mpfail += mbstat[i].m_mpfail;
288 
289 	}
290 	/*
291 	 * The following fields are not cumulative fields so just
292 	 * get their values once.
293 	 */
294 	mbstat_total.m_msize = mbstat[0].m_msize;
295 	mbstat_total.m_mclbytes = mbstat[0].m_mclbytes;
296 	mbstat_total.m_minclsize = mbstat[0].m_minclsize;
297 	mbstat_total.m_mlen = mbstat[0].m_mlen;
298 	mbstat_total.m_mhlen = mbstat[0].m_mhlen;
299 
300 	return(sysctl_handle_opaque(oidp, mbstat_totalp, sizeof(mbstat_total), req));
301 }
302 
303 static int
304 do_mbtypes(SYSCTL_HANDLER_ARGS)
305 {
306 	u_long totals[MT_NTYPES];
307 	int i, j;
308 
309 	for (i = 0; i < MT_NTYPES; i++)
310 		totals[i] = 0;
311 
312 	for (i = 0; i < ncpus; i++)
313 	{
314 		for (j = 0; j < MT_NTYPES; j++)
315 			totals[j] += mbtypes[i][j];
316 	}
317 
318 	return(sysctl_handle_opaque(oidp, totals, sizeof(totals), req));
319 }
320 
321 /*
322  * These are read-only because we do not currently have any code
323  * to adjust the objcache limits after the fact.  The variables
324  * may only be set as boot-time tunables.
325  */
326 SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD,
327 	   &nmbclusters, 0, "Maximum number of mbuf clusters available");
328 SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0,
329 	   "Maximum number of mbufs available");
330 
331 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
332 	   &m_defragpackets, 0, "");
333 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
334 	   &m_defragbytes, 0, "");
335 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
336 	   &m_defraguseless, 0, "");
337 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
338 	   &m_defragfailure, 0, "");
339 #ifdef MBUF_STRESS_TEST
340 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
341 	   &m_defragrandomfailures, 0, "");
342 #endif
343 
344 static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf");
345 static MALLOC_DEFINE(M_MBUFCL, "mbufcl", "mbufcl");
346 static MALLOC_DEFINE(M_MCLMETA, "mclmeta", "mclmeta");
347 
348 static void m_reclaim (void);
349 static void m_mclref(void *arg);
350 static void m_mclfree(void *arg);
351 
352 #ifndef NMBCLUSTERS
353 #define NMBCLUSTERS	(512 + maxusers * 16)
354 #endif
355 #ifndef NMBUFS
356 #define NMBUFS		(nmbclusters * 2)
357 #endif
358 
359 /*
360  * Perform sanity checks of tunables declared above.
361  */
362 static void
363 tunable_mbinit(void *dummy)
364 {
365 	/*
366 	 * This has to be done before VM init.
367 	 */
368 	nmbclusters = NMBCLUSTERS;
369 	TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
370 	nmbufs = NMBUFS;
371 	TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
372 	/* Sanity checks */
373 	if (nmbufs < nmbclusters * 2)
374 		nmbufs = nmbclusters * 2;
375 }
376 SYSINIT(tunable_mbinit, SI_BOOT1_TUNABLES, SI_ORDER_ANY,
377 	tunable_mbinit, NULL);
378 
379 /* "number of clusters of pages" */
380 #define NCL_INIT	1
381 
382 #define NMB_INIT	16
383 
384 /*
385  * The mbuf object cache only guarantees that m_next and m_nextpkt are
386  * NULL and that m_data points to the beginning of the data area.  In
387  * particular, m_len and m_pkthdr.len are uninitialized.  It is the
388  * responsibility of the caller to initialize those fields before use.
389  */
390 
391 static boolean_t __inline
392 mbuf_ctor(void *obj, void *private, int ocflags)
393 {
394 	struct mbuf *m = obj;
395 
396 	m->m_next = NULL;
397 	m->m_nextpkt = NULL;
398 	m->m_data = m->m_dat;
399 	m->m_flags = 0;
400 
401 	return (TRUE);
402 }
403 
404 /*
405  * Initialize the mbuf and the packet header fields.
406  */
407 static boolean_t
408 mbufphdr_ctor(void *obj, void *private, int ocflags)
409 {
410 	struct mbuf *m = obj;
411 
412 	m->m_next = NULL;
413 	m->m_nextpkt = NULL;
414 	m->m_data = m->m_pktdat;
415 	m->m_flags = M_PKTHDR | M_PHCACHE;
416 
417 	m->m_pkthdr.rcvif = NULL;	/* eliminate XXX JH */
418 	SLIST_INIT(&m->m_pkthdr.tags);
419 	m->m_pkthdr.csum_flags = 0;	/* eliminate XXX JH */
420 	m->m_pkthdr.fw_flags = 0;	/* eliminate XXX JH */
421 
422 	return (TRUE);
423 }
424 
425 /*
426  * A mbcluster object consists of 2K (MCLBYTES) cluster and a refcount.
427  */
428 static boolean_t
429 mclmeta_ctor(void *obj, void *private, int ocflags)
430 {
431 	struct mbcluster *cl = obj;
432 	void *buf;
433 
434 	if (ocflags & M_NOWAIT)
435 		buf = kmalloc(MCLBYTES, M_MBUFCL, M_NOWAIT | M_ZERO);
436 	else
437 		buf = kmalloc(MCLBYTES, M_MBUFCL, M_INTWAIT | M_ZERO);
438 	if (buf == NULL)
439 		return (FALSE);
440 	cl->mcl_refs = 0;
441 	cl->mcl_data = buf;
442 	return (TRUE);
443 }
444 
445 static void
446 mclmeta_dtor(void *obj, void *private)
447 {
448 	struct mbcluster *mcl = obj;
449 
450 	KKASSERT(mcl->mcl_refs == 0);
451 	kfree(mcl->mcl_data, M_MBUFCL);
452 }
453 
454 static void
455 linkcluster(struct mbuf *m, struct mbcluster *cl)
456 {
457 	/*
458 	 * Add the cluster to the mbuf.  The caller will detect that the
459 	 * mbuf now has an attached cluster.
460 	 */
461 	m->m_ext.ext_arg = cl;
462 	m->m_ext.ext_buf = cl->mcl_data;
463 	m->m_ext.ext_ref = m_mclref;
464 	m->m_ext.ext_free = m_mclfree;
465 	m->m_ext.ext_size = MCLBYTES;
466 	atomic_add_int(&cl->mcl_refs, 1);
467 
468 	m->m_data = m->m_ext.ext_buf;
469 	m->m_flags |= M_EXT | M_EXT_CLUSTER;
470 }
471 
472 static boolean_t
473 mbufphdrcluster_ctor(void *obj, void *private, int ocflags)
474 {
475 	struct mbuf *m = obj;
476 	struct mbcluster *cl;
477 
478 	mbufphdr_ctor(obj, private, ocflags);
479 	cl = objcache_get(mclmeta_cache, ocflags);
480 	if (cl == NULL)
481 		return (FALSE);
482 	m->m_flags |= M_CLCACHE;
483 	linkcluster(m, cl);
484 	return (TRUE);
485 }
486 
487 static boolean_t
488 mbufcluster_ctor(void *obj, void *private, int ocflags)
489 {
490 	struct mbuf *m = obj;
491 	struct mbcluster *cl;
492 
493 	mbuf_ctor(obj, private, ocflags);
494 	cl = objcache_get(mclmeta_cache, ocflags);
495 	if (cl == NULL)
496 		return (FALSE);
497 	m->m_flags |= M_CLCACHE;
498 	linkcluster(m, cl);
499 	return (TRUE);
500 }
501 
502 /*
503  * Used for both the cluster and cluster PHDR caches.
504  *
505  * The mbuf may have lost its cluster due to sharing, deal
506  * with the situation by checking M_EXT.
507  */
508 static void
509 mbufcluster_dtor(void *obj, void *private)
510 {
511 	struct mbuf *m = obj;
512 	struct mbcluster *mcl;
513 
514 	if (m->m_flags & M_EXT) {
515 		KKASSERT((m->m_flags & M_EXT_CLUSTER) != 0);
516 		mcl = m->m_ext.ext_arg;
517 		KKASSERT(mcl->mcl_refs == 1);
518 		mcl->mcl_refs = 0;
519 		objcache_put(mclmeta_cache, mcl);
520 	}
521 }
522 
523 struct objcache_malloc_args mbuf_malloc_args = { MSIZE, M_MBUF };
524 struct objcache_malloc_args mclmeta_malloc_args =
525 	{ sizeof(struct mbcluster), M_MCLMETA };
526 
527 /* ARGSUSED*/
528 static void
529 mbinit(void *dummy)
530 {
531 	int mb_limit, cl_limit, mbcl_limit;
532 	int limit;
533 	int i;
534 
535 	/*
536 	 * Initialize statistics
537 	 */
538 	for (i = 0; i < ncpus; i++) {
539 		atomic_set_long_nonlocked(&mbstat[i].m_msize, MSIZE);
540 		atomic_set_long_nonlocked(&mbstat[i].m_mclbytes, MCLBYTES);
541 		atomic_set_long_nonlocked(&mbstat[i].m_minclsize, MINCLSIZE);
542 		atomic_set_long_nonlocked(&mbstat[i].m_mlen, MLEN);
543 		atomic_set_long_nonlocked(&mbstat[i].m_mhlen, MHLEN);
544 	}
545 
546 	/*
547 	 * Create objtect caches and save cluster limits, which will
548 	 * be used to adjust backing kmalloc pools' limit later.
549 	 */
550 
551 	mb_limit = cl_limit = mbcl_limit = 0;
552 
553 	limit = nmbufs;
554 	mbuf_cache = objcache_create("mbuf", &limit, 0,
555 	    mbuf_ctor, NULL, NULL,
556 	    objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
557 	if (limit > mb_limit)
558 		mb_limit = limit;
559 
560 	limit = nmbufs;
561 	mbufphdr_cache = objcache_create("mbuf pkt hdr", &limit, 64,
562 	    mbufphdr_ctor, NULL, NULL,
563 	    objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
564 	if (limit > mb_limit)
565 		mb_limit = limit;
566 
567 	cl_limit = nmbclusters;
568 	mclmeta_cache = objcache_create("cluster mbuf", &cl_limit, 0,
569 	    mclmeta_ctor, mclmeta_dtor, NULL,
570 	    objcache_malloc_alloc, objcache_malloc_free, &mclmeta_malloc_args);
571 
572 	limit = nmbclusters;
573 	mbufcluster_cache = objcache_create("mbuf + cluster", &limit, 0,
574 	    mbufcluster_ctor, mbufcluster_dtor, NULL,
575 	    objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
576 	if (limit > mbcl_limit)
577 		mbcl_limit = limit;
578 
579 	limit = nmbclusters;
580 	mbufphdrcluster_cache = objcache_create("mbuf pkt hdr + cluster",
581 	    &limit, 64, mbufphdrcluster_ctor, mbufcluster_dtor, NULL,
582 	    objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
583 	if (limit > mbcl_limit)
584 		mbcl_limit = limit;
585 
586 	/*
587 	 * Adjust backing kmalloc pools' limit
588 	 *
589 	 * NOTE: We raise the limit by another 1/8 to take the effect
590 	 * of loosememuse into account.
591 	 */
592 	cl_limit += cl_limit / 8;
593 	kmalloc_raise_limit(mclmeta_malloc_args.mtype,
594 			    mclmeta_malloc_args.objsize * cl_limit);
595 	kmalloc_raise_limit(M_MBUFCL, MCLBYTES * cl_limit);
596 
597 	mb_limit += mbcl_limit;
598 	mb_limit += mb_limit / 4; /* save some space for non-pkthdr mbufs */
599 	mb_limit += mb_limit / 8;
600 	kmalloc_raise_limit(mbuf_malloc_args.mtype,
601 			    mbuf_malloc_args.objsize * mb_limit);
602 }
603 
604 /*
605  * Return the number of references to this mbuf's data.  0 is returned
606  * if the mbuf is not M_EXT, a reference count is returned if it is
607  * M_EXT | M_EXT_CLUSTER, and 99 is returned if it is a special M_EXT.
608  */
609 int
610 m_sharecount(struct mbuf *m)
611 {
612 	switch (m->m_flags & (M_EXT | M_EXT_CLUSTER)) {
613 	case 0:
614 		return (0);
615 	case M_EXT:
616 		return (99);
617 	case M_EXT | M_EXT_CLUSTER:
618 		return (((struct mbcluster *)m->m_ext.ext_arg)->mcl_refs);
619 	}
620 	/* NOTREACHED */
621 	return (0);		/* to shut up compiler */
622 }
623 
624 /*
625  * change mbuf to new type
626  */
627 void
628 m_chtype(struct mbuf *m, int type)
629 {
630 	struct globaldata *gd = mycpu;
631 
632 	atomic_add_long_nonlocked(&mbtypes[gd->gd_cpuid][type], 1);
633 	atomic_subtract_long_nonlocked(&mbtypes[gd->gd_cpuid][m->m_type], 1);
634 	atomic_set_short_nonlocked(&m->m_type, type);
635 }
636 
637 static void
638 m_reclaim(void)
639 {
640 	struct domain *dp;
641 	struct protosw *pr;
642 
643 	crit_enter();
644 	SLIST_FOREACH(dp, &domains, dom_next) {
645 		for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) {
646 			if (pr->pr_drain)
647 				(*pr->pr_drain)();
648 		}
649 	}
650 	crit_exit();
651 	atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_drain, 1);
652 }
653 
654 static void __inline
655 updatestats(struct mbuf *m, int type)
656 {
657 	struct globaldata *gd = mycpu;
658 	m->m_type = type;
659 
660 	mbuftrack(m);
661 
662 	atomic_add_long_nonlocked(&mbtypes[gd->gd_cpuid][type], 1);
663 	atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mbufs, 1);
664 
665 }
666 
667 /*
668  * Allocate an mbuf.
669  */
670 struct mbuf *
671 m_get(int how, int type)
672 {
673 	struct mbuf *m;
674 	int ntries = 0;
675 	int ocf = MBTOM(how);
676 
677 retryonce:
678 
679 	m = objcache_get(mbuf_cache, ocf);
680 
681 	if (m == NULL) {
682 		if ((how & MB_TRYWAIT) && ntries++ == 0) {
683 			struct objcache *reclaimlist[] = {
684 				mbufphdr_cache,
685 				mbufcluster_cache, mbufphdrcluster_cache
686 			};
687 			const int nreclaims = __arysize(reclaimlist);
688 
689 			if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf))
690 				m_reclaim();
691 			goto retryonce;
692 		}
693 		return (NULL);
694 	}
695 
696 	updatestats(m, type);
697 	return (m);
698 }
699 
700 struct mbuf *
701 m_gethdr(int how, int type)
702 {
703 	struct mbuf *m;
704 	int ocf = MBTOM(how);
705 	int ntries = 0;
706 
707 retryonce:
708 
709 	m = objcache_get(mbufphdr_cache, ocf);
710 
711 	if (m == NULL) {
712 		if ((how & MB_TRYWAIT) && ntries++ == 0) {
713 			struct objcache *reclaimlist[] = {
714 				mbuf_cache,
715 				mbufcluster_cache, mbufphdrcluster_cache
716 			};
717 			const int nreclaims = __arysize(reclaimlist);
718 
719 			if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf))
720 				m_reclaim();
721 			goto retryonce;
722 		}
723 		return (NULL);
724 	}
725 
726 	updatestats(m, type);
727 	return (m);
728 }
729 
730 /*
731  * Get a mbuf (not a mbuf cluster!) and zero it.
732  * Deprecated.
733  */
734 struct mbuf *
735 m_getclr(int how, int type)
736 {
737 	struct mbuf *m;
738 
739 	m = m_get(how, type);
740 	if (m != NULL)
741 		bzero(m->m_data, MLEN);
742 	return (m);
743 }
744 
745 /*
746  * Returns an mbuf with an attached cluster.
747  * Because many network drivers use this kind of buffers a lot, it is
748  * convenient to keep a small pool of free buffers of this kind.
749  * Even a small size such as 10 gives about 10% improvement in the
750  * forwarding rate in a bridge or router.
751  */
752 struct mbuf *
753 m_getcl(int how, short type, int flags)
754 {
755 	struct mbuf *m;
756 	int ocflags = MBTOM(how);
757 	int ntries = 0;
758 
759 retryonce:
760 
761 	if (flags & M_PKTHDR)
762 		m = objcache_get(mbufphdrcluster_cache, ocflags);
763 	else
764 		m = objcache_get(mbufcluster_cache, ocflags);
765 
766 	if (m == NULL) {
767 		if ((how & MB_TRYWAIT) && ntries++ == 0) {
768 			struct objcache *reclaimlist[1];
769 
770 			if (flags & M_PKTHDR)
771 				reclaimlist[0] = mbufcluster_cache;
772 			else
773 				reclaimlist[0] = mbufphdrcluster_cache;
774 			if (!objcache_reclaimlist(reclaimlist, 1, ocflags))
775 				m_reclaim();
776 			goto retryonce;
777 		}
778 		return (NULL);
779 	}
780 
781 	m->m_type = type;
782 
783 	mbuftrack(m);
784 
785 	atomic_add_long_nonlocked(&mbtypes[mycpu->gd_cpuid][type], 1);
786 	atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1);
787 	return (m);
788 }
789 
790 /*
791  * Allocate chain of requested length.
792  */
793 struct mbuf *
794 m_getc(int len, int how, int type)
795 {
796 	struct mbuf *n, *nfirst = NULL, **ntail = &nfirst;
797 	int nsize;
798 
799 	while (len > 0) {
800 		n = m_getl(len, how, type, 0, &nsize);
801 		if (n == NULL)
802 			goto failed;
803 		n->m_len = 0;
804 		*ntail = n;
805 		ntail = &n->m_next;
806 		len -= nsize;
807 	}
808 	return (nfirst);
809 
810 failed:
811 	m_freem(nfirst);
812 	return (NULL);
813 }
814 
815 /*
816  * Allocate len-worth of mbufs and/or mbuf clusters (whatever fits best)
817  * and return a pointer to the head of the allocated chain. If m0 is
818  * non-null, then we assume that it is a single mbuf or an mbuf chain to
819  * which we want len bytes worth of mbufs and/or clusters attached, and so
820  * if we succeed in allocating it, we will just return a pointer to m0.
821  *
822  * If we happen to fail at any point during the allocation, we will free
823  * up everything we have already allocated and return NULL.
824  *
825  * Deprecated.  Use m_getc() and m_cat() instead.
826  */
827 struct mbuf *
828 m_getm(struct mbuf *m0, int len, int type, int how)
829 {
830 	struct mbuf *nfirst;
831 
832 	nfirst = m_getc(len, how, type);
833 
834 	if (m0 != NULL) {
835 		m_last(m0)->m_next = nfirst;
836 		return (m0);
837 	}
838 
839 	return (nfirst);
840 }
841 
842 /*
843  * Adds a cluster to a normal mbuf, M_EXT is set on success.
844  * Deprecated.  Use m_getcl() instead.
845  */
846 void
847 m_mclget(struct mbuf *m, int how)
848 {
849 	struct mbcluster *mcl;
850 
851 	KKASSERT((m->m_flags & M_EXT) == 0);
852 	mcl = objcache_get(mclmeta_cache, MBTOM(how));
853 	if (mcl != NULL) {
854 		linkcluster(m, mcl);
855 		atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1);
856 	}
857 }
858 
859 /*
860  * Updates to mbcluster must be MPSAFE.  Only an entity which already has
861  * a reference to the cluster can ref it, so we are in no danger of
862  * racing an add with a subtract.  But the operation must still be atomic
863  * since multiple entities may have a reference on the cluster.
864  *
865  * m_mclfree() is almost the same but it must contend with two entities
866  * freeing the cluster at the same time.  If there is only one reference
867  * count we are the only entity referencing the cluster and no further
868  * locking is required.  Otherwise we must protect against a race to 0
869  * with the serializer.
870  */
871 static void
872 m_mclref(void *arg)
873 {
874 	struct mbcluster *mcl = arg;
875 
876 	atomic_add_int(&mcl->mcl_refs, 1);
877 }
878 
879 /*
880  * When dereferencing a cluster we have to deal with a N->0 race, where
881  * N entities free their references simultaniously.  To do this we use
882  * atomic_fetchadd_int().
883  */
884 static void
885 m_mclfree(void *arg)
886 {
887 	struct mbcluster *mcl = arg;
888 
889 	if (atomic_fetchadd_int(&mcl->mcl_refs, -1) == 1)
890 		objcache_put(mclmeta_cache, mcl);
891 }
892 
893 extern void db_print_backtrace(void);
894 
895 /*
896  * Free a single mbuf and any associated external storage.  The successor,
897  * if any, is returned.
898  *
899  * We do need to check non-first mbuf for m_aux, since some of existing
900  * code does not call M_PREPEND properly.
901  * (example: call to bpf_mtap from drivers)
902  */
903 struct mbuf *
904 m_free(struct mbuf *m)
905 {
906 	struct mbuf *n;
907 	struct globaldata *gd = mycpu;
908 
909 	KASSERT(m->m_type != MT_FREE, ("freeing free mbuf %p", m));
910 	atomic_subtract_long_nonlocked(&mbtypes[gd->gd_cpuid][m->m_type], 1);
911 
912 	n = m->m_next;
913 
914 	/*
915 	 * Make sure the mbuf is in constructed state before returning it
916 	 * to the objcache.
917 	 */
918 	m->m_next = NULL;
919 	mbufuntrack(m);
920 #ifdef notyet
921 	KKASSERT(m->m_nextpkt == NULL);
922 #else
923 	if (m->m_nextpkt != NULL) {
924 #ifdef DDB
925 		static int afewtimes = 10;
926 
927 		if (afewtimes-- > 0) {
928 			kprintf("mfree: m->m_nextpkt != NULL\n");
929 			db_print_backtrace();
930 		}
931 #endif
932 		m->m_nextpkt = NULL;
933 	}
934 #endif
935 	if (m->m_flags & M_PKTHDR) {
936 		m_tag_delete_chain(m);		/* eliminate XXX JH */
937 	}
938 
939 	m->m_flags &= (M_EXT | M_EXT_CLUSTER | M_CLCACHE | M_PHCACHE);
940 
941 	/*
942 	 * Clean the M_PKTHDR state so we can return the mbuf to its original
943 	 * cache.  This is based on the PHCACHE flag which tells us whether
944 	 * the mbuf was originally allocated out of a packet-header cache
945 	 * or a non-packet-header cache.
946 	 */
947 	if (m->m_flags & M_PHCACHE) {
948 		m->m_flags |= M_PKTHDR;
949 		m->m_pkthdr.rcvif = NULL;	/* eliminate XXX JH */
950 		m->m_pkthdr.csum_flags = 0;	/* eliminate XXX JH */
951 		m->m_pkthdr.fw_flags = 0;	/* eliminate XXX JH */
952 		SLIST_INIT(&m->m_pkthdr.tags);
953 	}
954 
955 	/*
956 	 * Handle remaining flags combinations.  M_CLCACHE tells us whether
957 	 * the mbuf was originally allocated from a cluster cache or not,
958 	 * and is totally separate from whether the mbuf is currently
959 	 * associated with a cluster.
960 	 */
961 	crit_enter();
962 	switch(m->m_flags & (M_CLCACHE | M_EXT | M_EXT_CLUSTER)) {
963 	case M_CLCACHE | M_EXT | M_EXT_CLUSTER:
964 		/*
965 		 * mbuf+cluster cache case.  The mbuf was allocated from the
966 		 * combined mbuf_cluster cache and can be returned to the
967 		 * cache if the cluster hasn't been shared.
968 		 */
969 		if (m_sharecount(m) == 1) {
970 			/*
971 			 * The cluster has not been shared, we can just
972 			 * reset the data pointer and return the mbuf
973 			 * to the cluster cache.  Note that the reference
974 			 * count is left intact (it is still associated with
975 			 * an mbuf).
976 			 */
977 			m->m_data = m->m_ext.ext_buf;
978 			if (m->m_flags & M_PHCACHE)
979 				objcache_put(mbufphdrcluster_cache, m);
980 			else
981 				objcache_put(mbufcluster_cache, m);
982 			atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1);
983 		} else {
984 			/*
985 			 * Hell.  Someone else has a ref on this cluster,
986 			 * we have to disconnect it which means we can't
987 			 * put it back into the mbufcluster_cache, we
988 			 * have to destroy the mbuf.
989 			 *
990 			 * Other mbuf references to the cluster will typically
991 			 * be M_EXT | M_EXT_CLUSTER but without M_CLCACHE.
992 			 *
993 			 * XXX we could try to connect another cluster to
994 			 * it.
995 			 */
996 			m->m_ext.ext_free(m->m_ext.ext_arg);
997 			m->m_flags &= ~(M_EXT | M_EXT_CLUSTER);
998 			if (m->m_flags & M_PHCACHE)
999 				objcache_dtor(mbufphdrcluster_cache, m);
1000 			else
1001 				objcache_dtor(mbufcluster_cache, m);
1002 		}
1003 		break;
1004 	case M_EXT | M_EXT_CLUSTER:
1005 		/*
1006 		 * Normal cluster associated with an mbuf that was allocated
1007 		 * from the normal mbuf pool rather then the cluster pool.
1008 		 * The cluster has to be independantly disassociated from the
1009 		 * mbuf.
1010 		 */
1011 		if (m_sharecount(m) == 1)
1012 			atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1);
1013 		/* fall through */
1014 	case M_EXT:
1015 		/*
1016 		 * Normal cluster association case, disconnect the cluster from
1017 		 * the mbuf.  The cluster may or may not be custom.
1018 		 */
1019 		m->m_ext.ext_free(m->m_ext.ext_arg);
1020 		m->m_flags &= ~(M_EXT | M_EXT_CLUSTER);
1021 		/* fall through */
1022 	case 0:
1023 		/*
1024 		 * return the mbuf to the mbuf cache.
1025 		 */
1026 		if (m->m_flags & M_PHCACHE) {
1027 			m->m_data = m->m_pktdat;
1028 			objcache_put(mbufphdr_cache, m);
1029 		} else {
1030 			m->m_data = m->m_dat;
1031 			objcache_put(mbuf_cache, m);
1032 		}
1033 		atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mbufs, 1);
1034 		break;
1035 	default:
1036 		if (!panicstr)
1037 			panic("bad mbuf flags %p %08x\n", m, m->m_flags);
1038 		break;
1039 	}
1040 	crit_exit();
1041 	return (n);
1042 }
1043 
1044 void
1045 m_freem(struct mbuf *m)
1046 {
1047 	crit_enter();
1048 	while (m)
1049 		m = m_free(m);
1050 	crit_exit();
1051 }
1052 
1053 /*
1054  * mbuf utility routines
1055  */
1056 
1057 /*
1058  * Lesser-used path for M_PREPEND: allocate new mbuf to prepend to chain and
1059  * copy junk along.
1060  */
1061 struct mbuf *
1062 m_prepend(struct mbuf *m, int len, int how)
1063 {
1064 	struct mbuf *mn;
1065 
1066 	if (m->m_flags & M_PKTHDR)
1067 	    mn = m_gethdr(how, m->m_type);
1068 	else
1069 	    mn = m_get(how, m->m_type);
1070 	if (mn == NULL) {
1071 		m_freem(m);
1072 		return (NULL);
1073 	}
1074 	if (m->m_flags & M_PKTHDR)
1075 		M_MOVE_PKTHDR(mn, m);
1076 	mn->m_next = m;
1077 	m = mn;
1078 	if (len < MHLEN)
1079 		MH_ALIGN(m, len);
1080 	m->m_len = len;
1081 	return (m);
1082 }
1083 
1084 /*
1085  * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
1086  * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
1087  * The wait parameter is a choice of MB_WAIT/MB_DONTWAIT from caller.
1088  * Note that the copy is read-only, because clusters are not copied,
1089  * only their reference counts are incremented.
1090  */
1091 struct mbuf *
1092 m_copym(const struct mbuf *m, int off0, int len, int wait)
1093 {
1094 	struct mbuf *n, **np;
1095 	int off = off0;
1096 	struct mbuf *top;
1097 	int copyhdr = 0;
1098 
1099 	KASSERT(off >= 0, ("m_copym, negative off %d", off));
1100 	KASSERT(len >= 0, ("m_copym, negative len %d", len));
1101 	if (off == 0 && m->m_flags & M_PKTHDR)
1102 		copyhdr = 1;
1103 	while (off > 0) {
1104 		KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
1105 		if (off < m->m_len)
1106 			break;
1107 		off -= m->m_len;
1108 		m = m->m_next;
1109 	}
1110 	np = &top;
1111 	top = 0;
1112 	while (len > 0) {
1113 		if (m == NULL) {
1114 			KASSERT(len == M_COPYALL,
1115 			    ("m_copym, length > size of mbuf chain"));
1116 			break;
1117 		}
1118 		/*
1119 		 * Because we are sharing any cluster attachment below,
1120 		 * be sure to get an mbuf that does not have a cluster
1121 		 * associated with it.
1122 		 */
1123 		if (copyhdr)
1124 			n = m_gethdr(wait, m->m_type);
1125 		else
1126 			n = m_get(wait, m->m_type);
1127 		*np = n;
1128 		if (n == NULL)
1129 			goto nospace;
1130 		if (copyhdr) {
1131 			if (!m_dup_pkthdr(n, m, wait))
1132 				goto nospace;
1133 			if (len == M_COPYALL)
1134 				n->m_pkthdr.len -= off0;
1135 			else
1136 				n->m_pkthdr.len = len;
1137 			copyhdr = 0;
1138 		}
1139 		n->m_len = min(len, m->m_len - off);
1140 		if (m->m_flags & M_EXT) {
1141 			KKASSERT((n->m_flags & M_EXT) == 0);
1142 			n->m_data = m->m_data + off;
1143 			m->m_ext.ext_ref(m->m_ext.ext_arg);
1144 			n->m_ext = m->m_ext;
1145 			n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1146 		} else {
1147 			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
1148 			    (unsigned)n->m_len);
1149 		}
1150 		if (len != M_COPYALL)
1151 			len -= n->m_len;
1152 		off = 0;
1153 		m = m->m_next;
1154 		np = &n->m_next;
1155 	}
1156 	if (top == NULL)
1157 		atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
1158 	return (top);
1159 nospace:
1160 	m_freem(top);
1161 	atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
1162 	return (NULL);
1163 }
1164 
1165 /*
1166  * Copy an entire packet, including header (which must be present).
1167  * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
1168  * Note that the copy is read-only, because clusters are not copied,
1169  * only their reference counts are incremented.
1170  * Preserve alignment of the first mbuf so if the creator has left
1171  * some room at the beginning (e.g. for inserting protocol headers)
1172  * the copies also have the room available.
1173  */
1174 struct mbuf *
1175 m_copypacket(struct mbuf *m, int how)
1176 {
1177 	struct mbuf *top, *n, *o;
1178 
1179 	n = m_gethdr(how, m->m_type);
1180 	top = n;
1181 	if (!n)
1182 		goto nospace;
1183 
1184 	if (!m_dup_pkthdr(n, m, how))
1185 		goto nospace;
1186 	n->m_len = m->m_len;
1187 	if (m->m_flags & M_EXT) {
1188 		KKASSERT((n->m_flags & M_EXT) == 0);
1189 		n->m_data = m->m_data;
1190 		m->m_ext.ext_ref(m->m_ext.ext_arg);
1191 		n->m_ext = m->m_ext;
1192 		n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1193 	} else {
1194 		n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
1195 		bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1196 	}
1197 
1198 	m = m->m_next;
1199 	while (m) {
1200 		o = m_get(how, m->m_type);
1201 		if (!o)
1202 			goto nospace;
1203 
1204 		n->m_next = o;
1205 		n = n->m_next;
1206 
1207 		n->m_len = m->m_len;
1208 		if (m->m_flags & M_EXT) {
1209 			KKASSERT((n->m_flags & M_EXT) == 0);
1210 			n->m_data = m->m_data;
1211 			m->m_ext.ext_ref(m->m_ext.ext_arg);
1212 			n->m_ext = m->m_ext;
1213 			n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1214 		} else {
1215 			bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1216 		}
1217 
1218 		m = m->m_next;
1219 	}
1220 	return top;
1221 nospace:
1222 	m_freem(top);
1223 	atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
1224 	return (NULL);
1225 }
1226 
1227 /*
1228  * Copy data from an mbuf chain starting "off" bytes from the beginning,
1229  * continuing for "len" bytes, into the indicated buffer.
1230  */
1231 void
1232 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
1233 {
1234 	unsigned count;
1235 
1236 	KASSERT(off >= 0, ("m_copydata, negative off %d", off));
1237 	KASSERT(len >= 0, ("m_copydata, negative len %d", len));
1238 	while (off > 0) {
1239 		KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
1240 		if (off < m->m_len)
1241 			break;
1242 		off -= m->m_len;
1243 		m = m->m_next;
1244 	}
1245 	while (len > 0) {
1246 		KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
1247 		count = min(m->m_len - off, len);
1248 		bcopy(mtod(m, caddr_t) + off, cp, count);
1249 		len -= count;
1250 		cp += count;
1251 		off = 0;
1252 		m = m->m_next;
1253 	}
1254 }
1255 
1256 /*
1257  * Copy a packet header mbuf chain into a completely new chain, including
1258  * copying any mbuf clusters.  Use this instead of m_copypacket() when
1259  * you need a writable copy of an mbuf chain.
1260  */
1261 struct mbuf *
1262 m_dup(struct mbuf *m, int how)
1263 {
1264 	struct mbuf **p, *top = NULL;
1265 	int remain, moff, nsize;
1266 
1267 	/* Sanity check */
1268 	if (m == NULL)
1269 		return (NULL);
1270 	KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __func__));
1271 
1272 	/* While there's more data, get a new mbuf, tack it on, and fill it */
1273 	remain = m->m_pkthdr.len;
1274 	moff = 0;
1275 	p = &top;
1276 	while (remain > 0 || top == NULL) {	/* allow m->m_pkthdr.len == 0 */
1277 		struct mbuf *n;
1278 
1279 		/* Get the next new mbuf */
1280 		n = m_getl(remain, how, m->m_type, top == NULL ? M_PKTHDR : 0,
1281 			   &nsize);
1282 		if (n == NULL)
1283 			goto nospace;
1284 		if (top == NULL)
1285 			if (!m_dup_pkthdr(n, m, how))
1286 				goto nospace0;
1287 
1288 		/* Link it into the new chain */
1289 		*p = n;
1290 		p = &n->m_next;
1291 
1292 		/* Copy data from original mbuf(s) into new mbuf */
1293 		n->m_len = 0;
1294 		while (n->m_len < nsize && m != NULL) {
1295 			int chunk = min(nsize - n->m_len, m->m_len - moff);
1296 
1297 			bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1298 			moff += chunk;
1299 			n->m_len += chunk;
1300 			remain -= chunk;
1301 			if (moff == m->m_len) {
1302 				m = m->m_next;
1303 				moff = 0;
1304 			}
1305 		}
1306 
1307 		/* Check correct total mbuf length */
1308 		KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
1309 			("%s: bogus m_pkthdr.len", __func__));
1310 	}
1311 	return (top);
1312 
1313 nospace:
1314 	m_freem(top);
1315 nospace0:
1316 	atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
1317 	return (NULL);
1318 }
1319 
1320 /*
1321  * Concatenate mbuf chain n to m.
1322  * Both chains must be of the same type (e.g. MT_DATA).
1323  * Any m_pkthdr is not updated.
1324  */
1325 void
1326 m_cat(struct mbuf *m, struct mbuf *n)
1327 {
1328 	m = m_last(m);
1329 	while (n) {
1330 		if (m->m_flags & M_EXT ||
1331 		    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
1332 			/* just join the two chains */
1333 			m->m_next = n;
1334 			return;
1335 		}
1336 		/* splat the data from one into the other */
1337 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1338 		    (u_int)n->m_len);
1339 		m->m_len += n->m_len;
1340 		n = m_free(n);
1341 	}
1342 }
1343 
1344 void
1345 m_adj(struct mbuf *mp, int req_len)
1346 {
1347 	int len = req_len;
1348 	struct mbuf *m;
1349 	int count;
1350 
1351 	if ((m = mp) == NULL)
1352 		return;
1353 	if (len >= 0) {
1354 		/*
1355 		 * Trim from head.
1356 		 */
1357 		while (m != NULL && len > 0) {
1358 			if (m->m_len <= len) {
1359 				len -= m->m_len;
1360 				m->m_len = 0;
1361 				m = m->m_next;
1362 			} else {
1363 				m->m_len -= len;
1364 				m->m_data += len;
1365 				len = 0;
1366 			}
1367 		}
1368 		m = mp;
1369 		if (mp->m_flags & M_PKTHDR)
1370 			m->m_pkthdr.len -= (req_len - len);
1371 	} else {
1372 		/*
1373 		 * Trim from tail.  Scan the mbuf chain,
1374 		 * calculating its length and finding the last mbuf.
1375 		 * If the adjustment only affects this mbuf, then just
1376 		 * adjust and return.  Otherwise, rescan and truncate
1377 		 * after the remaining size.
1378 		 */
1379 		len = -len;
1380 		count = 0;
1381 		for (;;) {
1382 			count += m->m_len;
1383 			if (m->m_next == (struct mbuf *)0)
1384 				break;
1385 			m = m->m_next;
1386 		}
1387 		if (m->m_len >= len) {
1388 			m->m_len -= len;
1389 			if (mp->m_flags & M_PKTHDR)
1390 				mp->m_pkthdr.len -= len;
1391 			return;
1392 		}
1393 		count -= len;
1394 		if (count < 0)
1395 			count = 0;
1396 		/*
1397 		 * Correct length for chain is "count".
1398 		 * Find the mbuf with last data, adjust its length,
1399 		 * and toss data from remaining mbufs on chain.
1400 		 */
1401 		m = mp;
1402 		if (m->m_flags & M_PKTHDR)
1403 			m->m_pkthdr.len = count;
1404 		for (; m; m = m->m_next) {
1405 			if (m->m_len >= count) {
1406 				m->m_len = count;
1407 				break;
1408 			}
1409 			count -= m->m_len;
1410 		}
1411 		while (m->m_next)
1412 			(m = m->m_next) ->m_len = 0;
1413 	}
1414 }
1415 
1416 /*
1417  * Rearrange an mbuf chain so that len bytes are contiguous
1418  * and in the data area of an mbuf (so that mtod will work for a structure
1419  * of size len).  Returns the resulting mbuf chain on success, frees it and
1420  * returns null on failure.  If there is room, it will add up to
1421  * max_protohdr-len extra bytes to the contiguous region in an attempt to
1422  * avoid being called next time.
1423  */
1424 struct mbuf *
1425 m_pullup(struct mbuf *n, int len)
1426 {
1427 	struct mbuf *m;
1428 	int count;
1429 	int space;
1430 
1431 	/*
1432 	 * If first mbuf has no cluster, and has room for len bytes
1433 	 * without shifting current data, pullup into it,
1434 	 * otherwise allocate a new mbuf to prepend to the chain.
1435 	 */
1436 	if (!(n->m_flags & M_EXT) &&
1437 	    n->m_data + len < &n->m_dat[MLEN] &&
1438 	    n->m_next) {
1439 		if (n->m_len >= len)
1440 			return (n);
1441 		m = n;
1442 		n = n->m_next;
1443 		len -= m->m_len;
1444 	} else {
1445 		if (len > MHLEN)
1446 			goto bad;
1447 		if (n->m_flags & M_PKTHDR)
1448 			m = m_gethdr(MB_DONTWAIT, n->m_type);
1449 		else
1450 			m = m_get(MB_DONTWAIT, n->m_type);
1451 		if (m == NULL)
1452 			goto bad;
1453 		m->m_len = 0;
1454 		if (n->m_flags & M_PKTHDR)
1455 			M_MOVE_PKTHDR(m, n);
1456 	}
1457 	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1458 	do {
1459 		count = min(min(max(len, max_protohdr), space), n->m_len);
1460 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1461 		  (unsigned)count);
1462 		len -= count;
1463 		m->m_len += count;
1464 		n->m_len -= count;
1465 		space -= count;
1466 		if (n->m_len)
1467 			n->m_data += count;
1468 		else
1469 			n = m_free(n);
1470 	} while (len > 0 && n);
1471 	if (len > 0) {
1472 		m_free(m);
1473 		goto bad;
1474 	}
1475 	m->m_next = n;
1476 	return (m);
1477 bad:
1478 	m_freem(n);
1479 	atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
1480 	return (NULL);
1481 }
1482 
1483 /*
1484  * Partition an mbuf chain in two pieces, returning the tail --
1485  * all but the first len0 bytes.  In case of failure, it returns NULL and
1486  * attempts to restore the chain to its original state.
1487  *
1488  * Note that the resulting mbufs might be read-only, because the new
1489  * mbuf can end up sharing an mbuf cluster with the original mbuf if
1490  * the "breaking point" happens to lie within a cluster mbuf. Use the
1491  * M_WRITABLE() macro to check for this case.
1492  */
1493 struct mbuf *
1494 m_split(struct mbuf *m0, int len0, int wait)
1495 {
1496 	struct mbuf *m, *n;
1497 	unsigned len = len0, remain;
1498 
1499 	for (m = m0; m && len > m->m_len; m = m->m_next)
1500 		len -= m->m_len;
1501 	if (m == NULL)
1502 		return (NULL);
1503 	remain = m->m_len - len;
1504 	if (m0->m_flags & M_PKTHDR) {
1505 		n = m_gethdr(wait, m0->m_type);
1506 		if (n == NULL)
1507 			return (NULL);
1508 		n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1509 		n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1510 		m0->m_pkthdr.len = len0;
1511 		if (m->m_flags & M_EXT)
1512 			goto extpacket;
1513 		if (remain > MHLEN) {
1514 			/* m can't be the lead packet */
1515 			MH_ALIGN(n, 0);
1516 			n->m_next = m_split(m, len, wait);
1517 			if (n->m_next == NULL) {
1518 				m_free(n);
1519 				return (NULL);
1520 			} else {
1521 				n->m_len = 0;
1522 				return (n);
1523 			}
1524 		} else
1525 			MH_ALIGN(n, remain);
1526 	} else if (remain == 0) {
1527 		n = m->m_next;
1528 		m->m_next = 0;
1529 		return (n);
1530 	} else {
1531 		n = m_get(wait, m->m_type);
1532 		if (n == NULL)
1533 			return (NULL);
1534 		M_ALIGN(n, remain);
1535 	}
1536 extpacket:
1537 	if (m->m_flags & M_EXT) {
1538 		KKASSERT((n->m_flags & M_EXT) == 0);
1539 		n->m_data = m->m_data + len;
1540 		m->m_ext.ext_ref(m->m_ext.ext_arg);
1541 		n->m_ext = m->m_ext;
1542 		n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1543 	} else {
1544 		bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1545 	}
1546 	n->m_len = remain;
1547 	m->m_len = len;
1548 	n->m_next = m->m_next;
1549 	m->m_next = 0;
1550 	return (n);
1551 }
1552 
1553 /*
1554  * Routine to copy from device local memory into mbufs.
1555  * Note: "offset" is ill-defined and always called as 0, so ignore it.
1556  */
1557 struct mbuf *
1558 m_devget(char *buf, int len, int offset, struct ifnet *ifp,
1559     void (*copy)(volatile const void *from, volatile void *to, size_t length))
1560 {
1561 	struct mbuf *m, *mfirst = NULL, **mtail;
1562 	int nsize, flags;
1563 
1564 	if (copy == NULL)
1565 		copy = bcopy;
1566 	mtail = &mfirst;
1567 	flags = M_PKTHDR;
1568 
1569 	while (len > 0) {
1570 		m = m_getl(len, MB_DONTWAIT, MT_DATA, flags, &nsize);
1571 		if (m == NULL) {
1572 			m_freem(mfirst);
1573 			return (NULL);
1574 		}
1575 		m->m_len = min(len, nsize);
1576 
1577 		if (flags & M_PKTHDR) {
1578 			if (len + max_linkhdr <= nsize)
1579 				m->m_data += max_linkhdr;
1580 			m->m_pkthdr.rcvif = ifp;
1581 			m->m_pkthdr.len = len;
1582 			flags = 0;
1583 		}
1584 
1585 		copy(buf, m->m_data, (unsigned)m->m_len);
1586 		buf += m->m_len;
1587 		len -= m->m_len;
1588 		*mtail = m;
1589 		mtail = &m->m_next;
1590 	}
1591 
1592 	return (mfirst);
1593 }
1594 
1595 /*
1596  * Routine to pad mbuf to the specified length 'padto'.
1597  */
1598 int
1599 m_devpad(struct mbuf *m, int padto)
1600 {
1601 	struct mbuf *last = NULL;
1602 	int padlen;
1603 
1604 	if (padto <= m->m_pkthdr.len)
1605 		return 0;
1606 
1607 	padlen = padto - m->m_pkthdr.len;
1608 
1609 	/* if there's only the packet-header and we can pad there, use it. */
1610 	if (m->m_pkthdr.len == m->m_len && M_TRAILINGSPACE(m) >= padlen) {
1611 		last = m;
1612 	} else {
1613 		/*
1614 		 * Walk packet chain to find last mbuf. We will either
1615 		 * pad there, or append a new mbuf and pad it
1616 		 */
1617 		for (last = m; last->m_next != NULL; last = last->m_next)
1618 			; /* EMPTY */
1619 
1620 		/* `last' now points to last in chain. */
1621 		if (M_TRAILINGSPACE(last) < padlen) {
1622 			struct mbuf *n;
1623 
1624 			/* Allocate new empty mbuf, pad it.  Compact later. */
1625 			MGET(n, MB_DONTWAIT, MT_DATA);
1626 			if (n == NULL)
1627 				return ENOBUFS;
1628 			n->m_len = 0;
1629 			last->m_next = n;
1630 			last = n;
1631 		}
1632 	}
1633 	KKASSERT(M_TRAILINGSPACE(last) >= padlen);
1634 	KKASSERT(M_WRITABLE(last));
1635 
1636 	/* Now zero the pad area */
1637 	bzero(mtod(last, char *) + last->m_len, padlen);
1638 	last->m_len += padlen;
1639 	m->m_pkthdr.len += padlen;
1640 	return 0;
1641 }
1642 
1643 /*
1644  * Copy data from a buffer back into the indicated mbuf chain,
1645  * starting "off" bytes from the beginning, extending the mbuf
1646  * chain if necessary.
1647  */
1648 void
1649 m_copyback(struct mbuf *m0, int off, int len, caddr_t cp)
1650 {
1651 	int mlen;
1652 	struct mbuf *m = m0, *n;
1653 	int totlen = 0;
1654 
1655 	if (m0 == NULL)
1656 		return;
1657 	while (off > (mlen = m->m_len)) {
1658 		off -= mlen;
1659 		totlen += mlen;
1660 		if (m->m_next == NULL) {
1661 			n = m_getclr(MB_DONTWAIT, m->m_type);
1662 			if (n == NULL)
1663 				goto out;
1664 			n->m_len = min(MLEN, len + off);
1665 			m->m_next = n;
1666 		}
1667 		m = m->m_next;
1668 	}
1669 	while (len > 0) {
1670 		mlen = min (m->m_len - off, len);
1671 		bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
1672 		cp += mlen;
1673 		len -= mlen;
1674 		mlen += off;
1675 		off = 0;
1676 		totlen += mlen;
1677 		if (len == 0)
1678 			break;
1679 		if (m->m_next == NULL) {
1680 			n = m_get(MB_DONTWAIT, m->m_type);
1681 			if (n == NULL)
1682 				break;
1683 			n->m_len = min(MLEN, len);
1684 			m->m_next = n;
1685 		}
1686 		m = m->m_next;
1687 	}
1688 out:	if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1689 		m->m_pkthdr.len = totlen;
1690 }
1691 
1692 void
1693 m_print(const struct mbuf *m)
1694 {
1695 	int len;
1696 	const struct mbuf *m2;
1697 
1698 	len = m->m_pkthdr.len;
1699 	m2 = m;
1700 	while (len) {
1701 		kprintf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-");
1702 		len -= m2->m_len;
1703 		m2 = m2->m_next;
1704 	}
1705 	return;
1706 }
1707 
1708 /*
1709  * "Move" mbuf pkthdr from "from" to "to".
1710  * "from" must have M_PKTHDR set, and "to" must be empty.
1711  */
1712 void
1713 m_move_pkthdr(struct mbuf *to, struct mbuf *from)
1714 {
1715 	KASSERT((to->m_flags & M_PKTHDR), ("m_move_pkthdr: not packet header"));
1716 
1717 	to->m_flags |= from->m_flags & M_COPYFLAGS;
1718 	to->m_pkthdr = from->m_pkthdr;		/* especially tags */
1719 	SLIST_INIT(&from->m_pkthdr.tags);	/* purge tags from src */
1720 }
1721 
1722 /*
1723  * Duplicate "from"'s mbuf pkthdr in "to".
1724  * "from" must have M_PKTHDR set, and "to" must be empty.
1725  * In particular, this does a deep copy of the packet tags.
1726  */
1727 int
1728 m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how)
1729 {
1730 	KASSERT((to->m_flags & M_PKTHDR), ("m_dup_pkthdr: not packet header"));
1731 
1732 	to->m_flags = (from->m_flags & M_COPYFLAGS) |
1733 		      (to->m_flags & ~M_COPYFLAGS);
1734 	to->m_pkthdr = from->m_pkthdr;
1735 	SLIST_INIT(&to->m_pkthdr.tags);
1736 	return (m_tag_copy_chain(to, from, how));
1737 }
1738 
1739 /*
1740  * Defragment a mbuf chain, returning the shortest possible
1741  * chain of mbufs and clusters.  If allocation fails and
1742  * this cannot be completed, NULL will be returned, but
1743  * the passed in chain will be unchanged.  Upon success,
1744  * the original chain will be freed, and the new chain
1745  * will be returned.
1746  *
1747  * If a non-packet header is passed in, the original
1748  * mbuf (chain?) will be returned unharmed.
1749  *
1750  * m_defrag_nofree doesn't free the passed in mbuf.
1751  */
1752 struct mbuf *
1753 m_defrag(struct mbuf *m0, int how)
1754 {
1755 	struct mbuf *m_new;
1756 
1757 	if ((m_new = m_defrag_nofree(m0, how)) == NULL)
1758 		return (NULL);
1759 	if (m_new != m0)
1760 		m_freem(m0);
1761 	return (m_new);
1762 }
1763 
1764 struct mbuf *
1765 m_defrag_nofree(struct mbuf *m0, int how)
1766 {
1767 	struct mbuf	*m_new = NULL, *m_final = NULL;
1768 	int		progress = 0, length, nsize;
1769 
1770 	if (!(m0->m_flags & M_PKTHDR))
1771 		return (m0);
1772 
1773 #ifdef MBUF_STRESS_TEST
1774 	if (m_defragrandomfailures) {
1775 		int temp = karc4random() & 0xff;
1776 		if (temp == 0xba)
1777 			goto nospace;
1778 	}
1779 #endif
1780 
1781 	m_final = m_getl(m0->m_pkthdr.len, how, MT_DATA, M_PKTHDR, &nsize);
1782 	if (m_final == NULL)
1783 		goto nospace;
1784 	m_final->m_len = 0;	/* in case m0->m_pkthdr.len is zero */
1785 
1786 	if (m_dup_pkthdr(m_final, m0, how) == 0)
1787 		goto nospace;
1788 
1789 	m_new = m_final;
1790 
1791 	while (progress < m0->m_pkthdr.len) {
1792 		length = m0->m_pkthdr.len - progress;
1793 		if (length > MCLBYTES)
1794 			length = MCLBYTES;
1795 
1796 		if (m_new == NULL) {
1797 			m_new = m_getl(length, how, MT_DATA, 0, &nsize);
1798 			if (m_new == NULL)
1799 				goto nospace;
1800 		}
1801 
1802 		m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1803 		progress += length;
1804 		m_new->m_len = length;
1805 		if (m_new != m_final)
1806 			m_cat(m_final, m_new);
1807 		m_new = NULL;
1808 	}
1809 	if (m0->m_next == NULL)
1810 		m_defraguseless++;
1811 	m_defragpackets++;
1812 	m_defragbytes += m_final->m_pkthdr.len;
1813 	return (m_final);
1814 nospace:
1815 	m_defragfailure++;
1816 	if (m_new)
1817 		m_free(m_new);
1818 	m_freem(m_final);
1819 	return (NULL);
1820 }
1821 
1822 /*
1823  * Move data from uio into mbufs.
1824  */
1825 struct mbuf *
1826 m_uiomove(struct uio *uio)
1827 {
1828 	struct mbuf *m;			/* current working mbuf */
1829 	struct mbuf *head = NULL;	/* result mbuf chain */
1830 	struct mbuf **mp = &head;
1831 	int resid = uio->uio_resid, nsize, flags = M_PKTHDR, error;
1832 
1833 	do {
1834 		m = m_getl(resid, MB_WAIT, MT_DATA, flags, &nsize);
1835 		if (flags) {
1836 			m->m_pkthdr.len = 0;
1837 			/* Leave room for protocol headers. */
1838 			if (resid < MHLEN)
1839 				MH_ALIGN(m, resid);
1840 			flags = 0;
1841 		}
1842 		m->m_len = min(nsize, resid);
1843 		error = uiomove(mtod(m, caddr_t), m->m_len, uio);
1844 		if (error) {
1845 			m_free(m);
1846 			goto failed;
1847 		}
1848 		*mp = m;
1849 		mp = &m->m_next;
1850 		head->m_pkthdr.len += m->m_len;
1851 		resid -= m->m_len;
1852 	} while (resid > 0);
1853 
1854 	return (head);
1855 
1856 failed:
1857 	m_freem(head);
1858 	return (NULL);
1859 }
1860 
1861 struct mbuf *
1862 m_last(struct mbuf *m)
1863 {
1864 	while (m->m_next)
1865 		m = m->m_next;
1866 	return (m);
1867 }
1868 
1869 /*
1870  * Return the number of bytes in an mbuf chain.
1871  * If lastm is not NULL, also return the last mbuf.
1872  */
1873 u_int
1874 m_lengthm(struct mbuf *m, struct mbuf **lastm)
1875 {
1876 	u_int len = 0;
1877 	struct mbuf *prev = m;
1878 
1879 	while (m) {
1880 		len += m->m_len;
1881 		prev = m;
1882 		m = m->m_next;
1883 	}
1884 	if (lastm != NULL)
1885 		*lastm = prev;
1886 	return (len);
1887 }
1888 
1889 /*
1890  * Like m_lengthm(), except also keep track of mbuf usage.
1891  */
1892 u_int
1893 m_countm(struct mbuf *m, struct mbuf **lastm, u_int *pmbcnt)
1894 {
1895 	u_int len = 0, mbcnt = 0;
1896 	struct mbuf *prev = m;
1897 
1898 	while (m) {
1899 		len += m->m_len;
1900 		mbcnt += MSIZE;
1901 		if (m->m_flags & M_EXT)
1902 			mbcnt += m->m_ext.ext_size;
1903 		prev = m;
1904 		m = m->m_next;
1905 	}
1906 	if (lastm != NULL)
1907 		*lastm = prev;
1908 	*pmbcnt = mbcnt;
1909 	return (len);
1910 }
1911