xref: /dragonfly/sys/kern/uipc_mbuf.c (revision 6700dd34)
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 2004 Jeffrey M. Hsu.  All rights reserved.
5  * Copyright (c) 2004 The DragonFly Project.  All rights reserved.
6  *
7  * This code is derived from software contributed to The DragonFly Project
8  * by Jeffrey M. Hsu.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 /*
37  * Copyright (c) 1982, 1986, 1988, 1991, 1993
38  *	The Regents of the University of California.  All rights reserved.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  * 1. Redistributions of source code must retain the above copyright
44  *    notice, this list of conditions and the following disclaimer.
45  * 2. Redistributions in binary form must reproduce the above copyright
46  *    notice, this list of conditions and the following disclaimer in the
47  *    documentation and/or other materials provided with the distribution.
48  * 3. Neither the name of the University nor the names of its contributors
49  *    may be used to endorse or promote products derived from this software
50  *    without specific prior written permission.
51  *
52  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62  * SUCH DAMAGE.
63  *
64  * @(#)uipc_mbuf.c	8.2 (Berkeley) 1/4/94
65  * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $
66  */
67 
68 #include "opt_param.h"
69 #include "opt_mbuf_stress_test.h"
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/file.h>
73 #include <sys/malloc.h>
74 #include <sys/mbuf.h>
75 #include <sys/kernel.h>
76 #include <sys/sysctl.h>
77 #include <sys/domain.h>
78 #include <sys/objcache.h>
79 #include <sys/tree.h>
80 #include <sys/protosw.h>
81 #include <sys/uio.h>
82 #include <sys/thread.h>
83 #include <sys/proc.h>
84 #include <sys/globaldata.h>
85 
86 #include <sys/thread2.h>
87 #include <sys/spinlock2.h>
88 
89 #include <machine/atomic.h>
90 #include <machine/limits.h>
91 
92 #include <vm/vm.h>
93 #include <vm/vm_kern.h>
94 #include <vm/vm_extern.h>
95 
96 #ifdef INVARIANTS
97 #include <machine/cpu.h>
98 #endif
99 
100 /*
101  * mbuf cluster meta-data
102  */
103 struct mbcluster {
104 	int32_t	mcl_refs;
105 	void	*mcl_data;
106 };
107 
108 /*
109  * mbuf tracking for debugging purposes
110  */
111 #ifdef MBUF_DEBUG
112 
113 static MALLOC_DEFINE(M_MTRACK, "mtrack", "mtrack");
114 
115 struct mbctrack;
116 RB_HEAD(mbuf_rb_tree, mbtrack);
117 RB_PROTOTYPE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *);
118 
119 struct mbtrack {
120 	RB_ENTRY(mbtrack) rb_node;
121 	int trackid;
122 	struct mbuf *m;
123 };
124 
125 static int
126 mbtrack_cmp(struct mbtrack *mb1, struct mbtrack *mb2)
127 {
128 	if (mb1->m < mb2->m)
129 		return(-1);
130 	if (mb1->m > mb2->m)
131 		return(1);
132 	return(0);
133 }
134 
135 RB_GENERATE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *, m);
136 
137 struct mbuf_rb_tree	mbuf_track_root;
138 static struct spinlock	mbuf_track_spin = SPINLOCK_INITIALIZER(mbuf_track_spin, "mbuf_track_spin");
139 
140 static void
141 mbuftrack(struct mbuf *m)
142 {
143 	struct mbtrack *mbt;
144 
145 	mbt = kmalloc(sizeof(*mbt), M_MTRACK, M_INTWAIT|M_ZERO);
146 	spin_lock(&mbuf_track_spin);
147 	mbt->m = m;
148 	if (mbuf_rb_tree_RB_INSERT(&mbuf_track_root, mbt)) {
149 		spin_unlock(&mbuf_track_spin);
150 		panic("mbuftrack: mbuf %p already being tracked", m);
151 	}
152 	spin_unlock(&mbuf_track_spin);
153 }
154 
155 static void
156 mbufuntrack(struct mbuf *m)
157 {
158 	struct mbtrack *mbt;
159 
160 	spin_lock(&mbuf_track_spin);
161 	mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m);
162 	if (mbt == NULL) {
163 		spin_unlock(&mbuf_track_spin);
164 		panic("mbufuntrack: mbuf %p was not tracked", m);
165 	} else {
166 		mbuf_rb_tree_RB_REMOVE(&mbuf_track_root, mbt);
167 		spin_unlock(&mbuf_track_spin);
168 		kfree(mbt, M_MTRACK);
169 	}
170 }
171 
172 void
173 mbuftrackid(struct mbuf *m, int trackid)
174 {
175 	struct mbtrack *mbt;
176 	struct mbuf *n;
177 
178 	spin_lock(&mbuf_track_spin);
179 	while (m) {
180 		n = m->m_nextpkt;
181 		while (m) {
182 			mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m);
183 			if (mbt == NULL) {
184 				spin_unlock(&mbuf_track_spin);
185 				panic("mbuftrackid: mbuf %p not tracked", m);
186 			}
187 			mbt->trackid = trackid;
188 			m = m->m_next;
189 		}
190 		m = n;
191 	}
192 	spin_unlock(&mbuf_track_spin);
193 }
194 
195 static int
196 mbuftrack_callback(struct mbtrack *mbt, void *arg)
197 {
198 	struct sysctl_req *req = arg;
199 	char buf[64];
200 	int error;
201 
202 	ksnprintf(buf, sizeof(buf), "mbuf %p track %d\n", mbt->m, mbt->trackid);
203 
204 	spin_unlock(&mbuf_track_spin);
205 	error = SYSCTL_OUT(req, buf, strlen(buf));
206 	spin_lock(&mbuf_track_spin);
207 	if (error)
208 		return(-error);
209 	return(0);
210 }
211 
212 static int
213 mbuftrack_show(SYSCTL_HANDLER_ARGS)
214 {
215 	int error;
216 
217 	spin_lock(&mbuf_track_spin);
218 	error = mbuf_rb_tree_RB_SCAN(&mbuf_track_root, NULL,
219 				     mbuftrack_callback, req);
220 	spin_unlock(&mbuf_track_spin);
221 	return (-error);
222 }
223 SYSCTL_PROC(_kern_ipc, OID_AUTO, showmbufs, CTLFLAG_RD|CTLTYPE_STRING,
224 	    0, 0, mbuftrack_show, "A", "Show all in-use mbufs");
225 
226 #else
227 
228 #define mbuftrack(m)
229 #define mbufuntrack(m)
230 
231 #endif
232 
233 static void mbinit(void *);
234 SYSINIT(mbuf, SI_BOOT2_MACHDEP, SI_ORDER_FIRST, mbinit, NULL);
235 
236 struct mbtypes_stat {
237 	u_long	stats[MT_NTYPES];
238 } __cachealign;
239 
240 static struct mbtypes_stat	mbtypes[SMP_MAXCPU];
241 
242 static struct mbstat mbstat[SMP_MAXCPU] __cachealign;
243 int	max_linkhdr;
244 int	max_protohdr;
245 int	max_hdr;
246 int	max_datalen;
247 int	m_defragpackets;
248 int	m_defragbytes;
249 int	m_defraguseless;
250 int	m_defragfailure;
251 #ifdef MBUF_STRESS_TEST
252 int	m_defragrandomfailures;
253 #endif
254 
255 struct objcache *mbuf_cache, *mbufphdr_cache;
256 struct objcache *mclmeta_cache, *mjclmeta_cache;
257 struct objcache *mbufcluster_cache, *mbufphdrcluster_cache;
258 struct objcache *mbufjcluster_cache, *mbufphdrjcluster_cache;
259 
260 struct lock	mbupdate_lk = LOCK_INITIALIZER("mbupdate", 0, LK_CANRECURSE);
261 
262 int		nmbclusters;
263 static int	nmbjclusters;
264 int		nmbufs;
265 
266 static int	mjclph_cachefrac;
267 static int	mjcl_cachefrac;
268 static int	mclph_cachefrac;
269 static int	mcl_cachefrac;
270 
271 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
272 	&max_linkhdr, 0, "Max size of a link-level header");
273 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
274 	&max_protohdr, 0, "Max size of a protocol header");
275 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0,
276 	"Max size of link+protocol headers");
277 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
278 	&max_datalen, 0, "Max data payload size without headers");
279 SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW,
280 	&mbuf_wait, 0, "Time in ticks to sleep after failed mbuf allocations");
281 static int do_mbstat(SYSCTL_HANDLER_ARGS);
282 
283 SYSCTL_PROC(_kern_ipc, KIPC_MBSTAT, mbstat, CTLTYPE_STRUCT|CTLFLAG_RD,
284 	0, 0, do_mbstat, "S,mbstat", "mbuf usage statistics");
285 
286 static int do_mbtypes(SYSCTL_HANDLER_ARGS);
287 
288 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbtypes, CTLTYPE_ULONG|CTLFLAG_RD,
289 	0, 0, do_mbtypes, "LU", "");
290 
291 static int
292 do_mbstat(SYSCTL_HANDLER_ARGS)
293 {
294 	struct mbstat mbstat_total;
295 	struct mbstat *mbstat_totalp;
296 	int i;
297 
298 	bzero(&mbstat_total, sizeof(mbstat_total));
299 	mbstat_totalp = &mbstat_total;
300 
301 	for (i = 0; i < ncpus; i++) {
302 		mbstat_total.m_mbufs += mbstat[i].m_mbufs;
303 		mbstat_total.m_clusters += mbstat[i].m_clusters;
304 		mbstat_total.m_jclusters += mbstat[i].m_jclusters;
305 		mbstat_total.m_clfree += mbstat[i].m_clfree;
306 		mbstat_total.m_drops += mbstat[i].m_drops;
307 		mbstat_total.m_wait += mbstat[i].m_wait;
308 		mbstat_total.m_drain += mbstat[i].m_drain;
309 		mbstat_total.m_mcfail += mbstat[i].m_mcfail;
310 		mbstat_total.m_mpfail += mbstat[i].m_mpfail;
311 
312 	}
313 	/*
314 	 * The following fields are not cumulative fields so just
315 	 * get their values once.
316 	 */
317 	mbstat_total.m_msize = mbstat[0].m_msize;
318 	mbstat_total.m_mclbytes = mbstat[0].m_mclbytes;
319 	mbstat_total.m_minclsize = mbstat[0].m_minclsize;
320 	mbstat_total.m_mlen = mbstat[0].m_mlen;
321 	mbstat_total.m_mhlen = mbstat[0].m_mhlen;
322 
323 	return(sysctl_handle_opaque(oidp, mbstat_totalp, sizeof(mbstat_total), req));
324 }
325 
326 static int
327 do_mbtypes(SYSCTL_HANDLER_ARGS)
328 {
329 	u_long totals[MT_NTYPES];
330 	int i, j;
331 
332 	for (i = 0; i < MT_NTYPES; i++)
333 		totals[i] = 0;
334 
335 	for (i = 0; i < ncpus; i++) {
336 		for (j = 0; j < MT_NTYPES; j++)
337 			totals[j] += mbtypes[i].stats[j];
338 	}
339 
340 	return(sysctl_handle_opaque(oidp, totals, sizeof(totals), req));
341 }
342 
343 /*
344  * The variables may be set as boot-time tunables or live.  Setting these
345  * values too low can deadlock your network.  Network interfaces may also
346  * adjust nmbclusters and/or nmbjclusters to account for preloading the
347  * hardware rings.
348  */
349 static int sysctl_nmbclusters(SYSCTL_HANDLER_ARGS);
350 static int sysctl_nmbjclusters(SYSCTL_HANDLER_ARGS);
351 static int sysctl_nmbufs(SYSCTL_HANDLER_ARGS);
352 SYSCTL_PROC(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLTYPE_INT | CTLFLAG_RW,
353 	   0, 0, sysctl_nmbclusters, "I",
354 	   "Maximum number of mbuf clusters available");
355 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjclusters, CTLTYPE_INT | CTLFLAG_RW,
356 	   0, 0, sysctl_nmbjclusters, "I",
357 	   "Maximum number of mbuf jclusters available");
358 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbufs, CTLTYPE_INT | CTLFLAG_RW,
359 	   0, 0, sysctl_nmbufs, "I",
360 	   "Maximum number of mbufs available");
361 
362 SYSCTL_INT(_kern_ipc, OID_AUTO, mjclph_cachefrac, CTLFLAG_RD,
363 	   &mjclph_cachefrac, 0,
364 	   "Fraction of cacheable mbuf jclusters w/ pkthdr");
365 SYSCTL_INT(_kern_ipc, OID_AUTO, mjcl_cachefrac, CTLFLAG_RD,
366 	   &mjcl_cachefrac, 0,
367 	   "Fraction of cacheable mbuf jclusters");
368 SYSCTL_INT(_kern_ipc, OID_AUTO, mclph_cachefrac, CTLFLAG_RD,
369     	   &mclph_cachefrac, 0,
370 	   "Fraction of cacheable mbuf clusters w/ pkthdr");
371 SYSCTL_INT(_kern_ipc, OID_AUTO, mcl_cachefrac, CTLFLAG_RD,
372     	   &mcl_cachefrac, 0, "Fraction of cacheable mbuf clusters");
373 
374 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
375 	   &m_defragpackets, 0, "Number of defragment packets");
376 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
377 	   &m_defragbytes, 0, "Number of defragment bytes");
378 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
379 	   &m_defraguseless, 0, "Number of useless defragment mbuf chain operations");
380 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
381 	   &m_defragfailure, 0, "Number of failed defragment mbuf chain operations");
382 #ifdef MBUF_STRESS_TEST
383 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
384 	   &m_defragrandomfailures, 0, "");
385 #endif
386 
387 static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf");
388 static MALLOC_DEFINE(M_MBUFCL, "mbufcl", "mbufcl");
389 static MALLOC_DEFINE(M_MCLMETA, "mclmeta", "mclmeta");
390 
391 static void m_reclaim (void);
392 static void m_mclref(void *arg);
393 static void m_mclfree(void *arg);
394 static void m_mjclfree(void *arg);
395 
396 static void mbupdatelimits(void);
397 
398 /*
399  * Generally scale default mbufs to maxproc.
400  *
401  * NOTE: Default NMBUFS must take into account a possible DOS attack
402  *	 using fd passing on unix domain sockets.
403  */
404 #ifndef NMBCLUSTERS
405 #define NMBCLUSTERS	(512 + maxproc * 4)
406 #endif
407 #ifndef BASE_CACHEFRAC
408 #define BASE_CACHEFRAC	16
409 #endif
410 #ifndef MJCLPH_CACHEFRAC
411 #define MJCLPH_CACHEFRAC (BASE_CACHEFRAC * 2)
412 #endif
413 #ifndef MJCL_CACHEFRAC
414 #define MJCL_CACHEFRAC	(BASE_CACHEFRAC * 2)
415 #endif
416 #ifndef MCLPH_CACHEFRAC
417 #define MCLPH_CACHEFRAC	(BASE_CACHEFRAC * 2)
418 #endif
419 #ifndef MCL_CACHEFRAC
420 #define MCL_CACHEFRAC	(BASE_CACHEFRAC * 2)
421 #endif
422 #ifndef NMBJCLUSTERS
423 #define NMBJCLUSTERS	(NMBCLUSTERS / 4)
424 #endif
425 #ifndef NMBUFS
426 #define NMBUFS		(nmbclusters / 2 + maxfiles)
427 #endif
428 
429 #define NMBCLUSTERS_MIN	(NMBCLUSTERS / 2)
430 #define NMBJCLUSTERS_MIN (NMBJCLUSTERS / 2)
431 #define NMBUFS_MIN	(NMBUFS / 2)
432 
433 /*
434  * Perform sanity checks of tunables declared above.
435  */
436 static void
437 tunable_mbinit(void *dummy)
438 {
439 	/*
440 	 * This has to be done before VM init.
441 	 */
442 	nmbclusters = NMBCLUSTERS;
443 	TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
444 	mjclph_cachefrac = MJCLPH_CACHEFRAC;
445 	TUNABLE_INT_FETCH("kern.ipc.mjclph_cachefrac", &mjclph_cachefrac);
446 	mjcl_cachefrac = MJCL_CACHEFRAC;
447 	TUNABLE_INT_FETCH("kern.ipc.mjcl_cachefrac", &mjcl_cachefrac);
448 	mclph_cachefrac = MCLPH_CACHEFRAC;
449 	TUNABLE_INT_FETCH("kern.ipc.mclph_cachefrac", &mclph_cachefrac);
450 	mcl_cachefrac = MCL_CACHEFRAC;
451 	TUNABLE_INT_FETCH("kern.ipc.mcl_cachefrac", &mcl_cachefrac);
452 
453 	/*
454 	 * WARNING! each mcl cache feeds two mbuf caches, so the minimum
455 	 *	    cachefrac is 2.  For safety, use 3.
456 	 */
457 	if (mjclph_cachefrac < 3)
458 		mjclph_cachefrac = 3;
459 	if (mjcl_cachefrac < 3)
460 		mjcl_cachefrac = 3;
461 	if (mclph_cachefrac < 3)
462 		mclph_cachefrac = 3;
463 	if (mcl_cachefrac < 3)
464 		mcl_cachefrac = 3;
465 
466 	nmbjclusters = NMBJCLUSTERS;
467 	TUNABLE_INT_FETCH("kern.ipc.nmbjclusters", &nmbjclusters);
468 
469 	nmbufs = NMBUFS;
470 	TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
471 
472 	/* Sanity checks */
473 	if (nmbufs < nmbclusters * 2)
474 		nmbufs = nmbclusters * 2;
475 }
476 SYSINIT(tunable_mbinit, SI_BOOT1_TUNABLES, SI_ORDER_ANY,
477 	tunable_mbinit, NULL);
478 
479 static void
480 mbinclimit(int *limit, int inc, int minlim)
481 {
482 	int new_limit;
483 
484 	lockmgr(&mbupdate_lk, LK_EXCLUSIVE);
485 
486 	new_limit = *limit + inc;
487 	if (new_limit < minlim)
488 		new_limit = minlim;
489 
490 	if (*limit != new_limit) {
491 		*limit = new_limit;
492 		mbupdatelimits();
493 	}
494 
495 	lockmgr(&mbupdate_lk, LK_RELEASE);
496 }
497 
498 static int
499 mbsetlimit(int *limit, int new_limit, int minlim)
500 {
501 	if (new_limit < minlim)
502 		return EINVAL;
503 
504 	lockmgr(&mbupdate_lk, LK_EXCLUSIVE);
505 	mbinclimit(limit, new_limit - *limit, minlim);
506 	lockmgr(&mbupdate_lk, LK_RELEASE);
507 	return 0;
508 }
509 
510 static int
511 sysctl_mblimit(SYSCTL_HANDLER_ARGS, int *limit, int minlim)
512 {
513 	int error, value;
514 
515 	value = *limit;
516 	error = sysctl_handle_int(oidp, &value, 0, req);
517 	if (error || req->newptr == NULL)
518 		return error;
519 
520 	return mbsetlimit(limit, value, minlim);
521 }
522 
523 /*
524  * Sysctl support to update nmbclusters, nmbjclusters, and nmbufs.
525  */
526 static int
527 sysctl_nmbclusters(SYSCTL_HANDLER_ARGS)
528 {
529 	return sysctl_mblimit(oidp, arg1, arg2, req, &nmbclusters,
530 	    NMBCLUSTERS_MIN);
531 }
532 
533 static int
534 sysctl_nmbjclusters(SYSCTL_HANDLER_ARGS)
535 {
536 	return sysctl_mblimit(oidp, arg1, arg2, req, &nmbjclusters,
537 	    NMBJCLUSTERS_MIN);
538 }
539 
540 static int
541 sysctl_nmbufs(SYSCTL_HANDLER_ARGS)
542 {
543 	return sysctl_mblimit(oidp, arg1, arg2, req, &nmbufs, NMBUFS_MIN);
544 }
545 
546 void
547 mcl_inclimit(int inc)
548 {
549 	mbinclimit(&nmbclusters, inc, NMBCLUSTERS_MIN);
550 }
551 
552 void
553 mjcl_inclimit(int inc)
554 {
555 	mbinclimit(&nmbjclusters, inc, NMBJCLUSTERS_MIN);
556 }
557 
558 void
559 mb_inclimit(int inc)
560 {
561 	mbinclimit(&nmbufs, inc, NMBUFS_MIN);
562 }
563 
564 /* "number of clusters of pages" */
565 #define NCL_INIT	1
566 
567 #define NMB_INIT	16
568 
569 /*
570  * The mbuf object cache only guarantees that m_next and m_nextpkt are
571  * NULL and that m_data points to the beginning of the data area.  In
572  * particular, m_len and m_pkthdr.len are uninitialized.  It is the
573  * responsibility of the caller to initialize those fields before use.
574  */
575 static __inline boolean_t
576 mbuf_ctor(void *obj, void *private, int ocflags)
577 {
578 	struct mbuf *m = obj;
579 
580 	m->m_next = NULL;
581 	m->m_nextpkt = NULL;
582 	m->m_data = m->m_dat;
583 	m->m_flags = 0;
584 
585 	return (TRUE);
586 }
587 
588 /*
589  * Initialize the mbuf and the packet header fields.
590  */
591 static boolean_t
592 mbufphdr_ctor(void *obj, void *private, int ocflags)
593 {
594 	struct mbuf *m = obj;
595 
596 	m->m_next = NULL;
597 	m->m_nextpkt = NULL;
598 	m->m_data = m->m_pktdat;
599 	m->m_flags = M_PKTHDR | M_PHCACHE;
600 
601 	m->m_pkthdr.rcvif = NULL;	/* eliminate XXX JH */
602 	SLIST_INIT(&m->m_pkthdr.tags);
603 	m->m_pkthdr.csum_flags = 0;	/* eliminate XXX JH */
604 	m->m_pkthdr.fw_flags = 0;	/* eliminate XXX JH */
605 
606 	return (TRUE);
607 }
608 
609 /*
610  * A mbcluster object consists of 2K (MCLBYTES) cluster and a refcount.
611  */
612 static boolean_t
613 mclmeta_ctor(void *obj, void *private, int ocflags)
614 {
615 	struct mbcluster *cl = obj;
616 	void *buf;
617 
618 	if (ocflags & M_NOWAIT)
619 		buf = kmalloc(MCLBYTES, M_MBUFCL, M_NOWAIT | M_ZERO);
620 	else
621 		buf = kmalloc(MCLBYTES, M_MBUFCL, M_INTWAIT | M_ZERO);
622 	if (buf == NULL)
623 		return (FALSE);
624 	cl->mcl_refs = 0;
625 	cl->mcl_data = buf;
626 	return (TRUE);
627 }
628 
629 static boolean_t
630 mjclmeta_ctor(void *obj, void *private, int ocflags)
631 {
632 	struct mbcluster *cl = obj;
633 	void *buf;
634 
635 	if (ocflags & M_NOWAIT)
636 		buf = kmalloc(MJUMPAGESIZE, M_MBUFCL, M_NOWAIT | M_ZERO);
637 	else
638 		buf = kmalloc(MJUMPAGESIZE, M_MBUFCL, M_INTWAIT | M_ZERO);
639 	if (buf == NULL)
640 		return (FALSE);
641 	cl->mcl_refs = 0;
642 	cl->mcl_data = buf;
643 	return (TRUE);
644 }
645 
646 static void
647 mclmeta_dtor(void *obj, void *private)
648 {
649 	struct mbcluster *mcl = obj;
650 
651 	KKASSERT(mcl->mcl_refs == 0);
652 	kfree(mcl->mcl_data, M_MBUFCL);
653 }
654 
655 static void
656 linkjcluster(struct mbuf *m, struct mbcluster *cl, uint size)
657 {
658 	/*
659 	 * Add the cluster to the mbuf.  The caller will detect that the
660 	 * mbuf now has an attached cluster.
661 	 */
662 	m->m_ext.ext_arg = cl;
663 	m->m_ext.ext_buf = cl->mcl_data;
664 	m->m_ext.ext_ref = m_mclref;
665 	if (size != MCLBYTES)
666 		m->m_ext.ext_free = m_mjclfree;
667 	else
668 		m->m_ext.ext_free = m_mclfree;
669 	m->m_ext.ext_size = size;
670 	atomic_add_int(&cl->mcl_refs, 1);
671 
672 	m->m_data = m->m_ext.ext_buf;
673 	m->m_flags |= M_EXT | M_EXT_CLUSTER;
674 }
675 
676 static void
677 linkcluster(struct mbuf *m, struct mbcluster *cl)
678 {
679 	linkjcluster(m, cl, MCLBYTES);
680 }
681 
682 static boolean_t
683 mbufphdrcluster_ctor(void *obj, void *private, int ocflags)
684 {
685 	struct mbuf *m = obj;
686 	struct mbcluster *cl;
687 
688 	mbufphdr_ctor(obj, private, ocflags);
689 	cl = objcache_get(mclmeta_cache, ocflags);
690 	if (cl == NULL) {
691 		++mbstat[mycpu->gd_cpuid].m_drops;
692 		return (FALSE);
693 	}
694 	m->m_flags |= M_CLCACHE;
695 	linkcluster(m, cl);
696 	return (TRUE);
697 }
698 
699 static boolean_t
700 mbufphdrjcluster_ctor(void *obj, void *private, int ocflags)
701 {
702 	struct mbuf *m = obj;
703 	struct mbcluster *cl;
704 
705 	mbufphdr_ctor(obj, private, ocflags);
706 	cl = objcache_get(mjclmeta_cache, ocflags);
707 	if (cl == NULL) {
708 		++mbstat[mycpu->gd_cpuid].m_drops;
709 		return (FALSE);
710 	}
711 	m->m_flags |= M_CLCACHE;
712 	linkjcluster(m, cl, MJUMPAGESIZE);
713 	return (TRUE);
714 }
715 
716 static boolean_t
717 mbufcluster_ctor(void *obj, void *private, int ocflags)
718 {
719 	struct mbuf *m = obj;
720 	struct mbcluster *cl;
721 
722 	mbuf_ctor(obj, private, ocflags);
723 	cl = objcache_get(mclmeta_cache, ocflags);
724 	if (cl == NULL) {
725 		++mbstat[mycpu->gd_cpuid].m_drops;
726 		return (FALSE);
727 	}
728 	m->m_flags |= M_CLCACHE;
729 	linkcluster(m, cl);
730 	return (TRUE);
731 }
732 
733 static boolean_t
734 mbufjcluster_ctor(void *obj, void *private, int ocflags)
735 {
736 	struct mbuf *m = obj;
737 	struct mbcluster *cl;
738 
739 	mbuf_ctor(obj, private, ocflags);
740 	cl = objcache_get(mjclmeta_cache, ocflags);
741 	if (cl == NULL) {
742 		++mbstat[mycpu->gd_cpuid].m_drops;
743 		return (FALSE);
744 	}
745 	m->m_flags |= M_CLCACHE;
746 	linkjcluster(m, cl, MJUMPAGESIZE);
747 	return (TRUE);
748 }
749 
750 /*
751  * Used for both the cluster and cluster PHDR caches.
752  *
753  * The mbuf may have lost its cluster due to sharing, deal
754  * with the situation by checking M_EXT.
755  */
756 static void
757 mbufcluster_dtor(void *obj, void *private)
758 {
759 	struct mbuf *m = obj;
760 	struct mbcluster *mcl;
761 
762 	if (m->m_flags & M_EXT) {
763 		KKASSERT((m->m_flags & M_EXT_CLUSTER) != 0);
764 		mcl = m->m_ext.ext_arg;
765 		KKASSERT(mcl->mcl_refs == 1);
766 		mcl->mcl_refs = 0;
767 		if (m->m_flags & M_EXT && m->m_ext.ext_size != MCLBYTES)
768 			objcache_put(mjclmeta_cache, mcl);
769 		else
770 			objcache_put(mclmeta_cache, mcl);
771 	}
772 }
773 
774 struct objcache_malloc_args mbuf_malloc_args = { MSIZE, M_MBUF };
775 struct objcache_malloc_args mclmeta_malloc_args =
776 	{ sizeof(struct mbcluster), M_MCLMETA };
777 
778 /* ARGSUSED*/
779 static void
780 mbinit(void *dummy)
781 {
782 	int mb_limit, cl_limit, ncl_limit, jcl_limit;
783 	int limit;
784 	int i;
785 
786 	/*
787 	 * Initialize statistics
788 	 */
789 	for (i = 0; i < ncpus; i++) {
790 		mbstat[i].m_msize = MSIZE;
791 		mbstat[i].m_mclbytes = MCLBYTES;
792 		mbstat[i].m_mjumpagesize = MJUMPAGESIZE;
793 		mbstat[i].m_minclsize = MINCLSIZE;
794 		mbstat[i].m_mlen = MLEN;
795 		mbstat[i].m_mhlen = MHLEN;
796 	}
797 
798 	/*
799 	 * Create object caches and save cluster limits, which will
800 	 * be used to adjust backing kmalloc pools' limit later.
801 	 */
802 
803 	mb_limit = cl_limit = 0;
804 
805 	limit = nmbufs;
806 	mbuf_cache = objcache_create("mbuf",
807 	    limit, nmbufs / BASE_CACHEFRAC,
808 	    mbuf_ctor, NULL, NULL,
809 	    objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
810 	mb_limit += limit;
811 
812 	limit = nmbufs;
813 	mbufphdr_cache = objcache_create("mbuf pkt hdr",
814 	    limit, nmbufs / BASE_CACHEFRAC,
815 	    mbufphdr_ctor, NULL, NULL,
816 	    objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
817 	mb_limit += limit;
818 
819 	ncl_limit = nmbclusters;
820 	mclmeta_cache = objcache_create("cluster mbuf",
821 	    ncl_limit, nmbclusters / BASE_CACHEFRAC,
822 	    mclmeta_ctor, mclmeta_dtor, NULL,
823 	    objcache_malloc_alloc, objcache_malloc_free, &mclmeta_malloc_args);
824 	cl_limit += ncl_limit;
825 
826 	jcl_limit = nmbjclusters;
827 	mjclmeta_cache = objcache_create("jcluster mbuf",
828 	    jcl_limit, nmbjclusters / BASE_CACHEFRAC,
829 	    mjclmeta_ctor, mclmeta_dtor, NULL,
830 	    objcache_malloc_alloc, objcache_malloc_free, &mclmeta_malloc_args);
831 	cl_limit += jcl_limit;
832 
833 	limit = nmbclusters;
834 	mbufcluster_cache = objcache_create("mbuf + cluster",
835 	    limit, nmbclusters / mcl_cachefrac,
836 	    mbufcluster_ctor, mbufcluster_dtor, NULL,
837 	    objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
838 	mb_limit += limit;
839 
840 	limit = nmbclusters;
841 	mbufphdrcluster_cache = objcache_create("mbuf pkt hdr + cluster",
842 	    limit, nmbclusters / mclph_cachefrac,
843 	    mbufphdrcluster_ctor, mbufcluster_dtor, NULL,
844 	    objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
845 	mb_limit += limit;
846 
847 	limit = nmbjclusters;
848 	mbufjcluster_cache = objcache_create("mbuf + jcluster",
849 	    limit, nmbjclusters / mjcl_cachefrac,
850 	    mbufjcluster_ctor, mbufcluster_dtor, NULL,
851 	    objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
852 	mb_limit += limit;
853 
854 	limit = nmbjclusters;
855 	mbufphdrjcluster_cache = objcache_create("mbuf pkt hdr + jcluster",
856 	    limit, nmbjclusters / mjclph_cachefrac,
857 	    mbufphdrjcluster_ctor, mbufcluster_dtor, NULL,
858 	    objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
859 	mb_limit += limit;
860 
861 	/*
862 	 * Adjust backing kmalloc pools' limit
863 	 *
864 	 * NOTE: We raise the limit by another 1/8 to take the effect
865 	 * of loosememuse into account.
866 	 */
867 	cl_limit += cl_limit / 8;
868 	kmalloc_raise_limit(mclmeta_malloc_args.mtype,
869 			    mclmeta_malloc_args.objsize * (size_t)cl_limit);
870 	kmalloc_raise_limit(M_MBUFCL,
871 			    (MCLBYTES * (size_t)ncl_limit) +
872 			    (MJUMPAGESIZE * (size_t)jcl_limit));
873 
874 	mb_limit += mb_limit / 8;
875 	kmalloc_raise_limit(mbuf_malloc_args.mtype,
876 			    mbuf_malloc_args.objsize * (size_t)mb_limit);
877 }
878 
879 /*
880  * Adjust mbuf limits after changes have been made
881  *
882  * Caller must hold mbupdate_lk
883  */
884 static void
885 mbupdatelimits(void)
886 {
887 	int mb_limit, cl_limit, ncl_limit, jcl_limit;
888 	int limit;
889 
890 	KASSERT(lockstatus(&mbupdate_lk, curthread) != 0,
891 	    ("mbupdate_lk is not held"));
892 
893 	/*
894 	 * Figure out adjustments to object caches after nmbufs, nmbclusters,
895 	 * or nmbjclusters has been modified.
896 	 */
897 	mb_limit = cl_limit = 0;
898 
899 	limit = nmbufs;
900 	objcache_set_cluster_limit(mbuf_cache, limit);
901 	mb_limit += limit;
902 
903 	limit = nmbufs;
904 	objcache_set_cluster_limit(mbufphdr_cache, limit);
905 	mb_limit += limit;
906 
907 	ncl_limit = nmbclusters;
908 	objcache_set_cluster_limit(mclmeta_cache, ncl_limit);
909 	cl_limit += ncl_limit;
910 
911 	jcl_limit = nmbjclusters;
912 	objcache_set_cluster_limit(mjclmeta_cache, jcl_limit);
913 	cl_limit += jcl_limit;
914 
915 	limit = nmbclusters;
916 	objcache_set_cluster_limit(mbufcluster_cache, limit);
917 	mb_limit += limit;
918 
919 	limit = nmbclusters;
920 	objcache_set_cluster_limit(mbufphdrcluster_cache, limit);
921 	mb_limit += limit;
922 
923 	limit = nmbjclusters;
924 	objcache_set_cluster_limit(mbufjcluster_cache, limit);
925 	mb_limit += limit;
926 
927 	limit = nmbjclusters;
928 	objcache_set_cluster_limit(mbufphdrjcluster_cache, limit);
929 	mb_limit += limit;
930 
931 	/*
932 	 * Adjust backing kmalloc pools' limit
933 	 *
934 	 * NOTE: We raise the limit by another 1/8 to take the effect
935 	 * of loosememuse into account.
936 	 */
937 	cl_limit += cl_limit / 8;
938 	kmalloc_raise_limit(mclmeta_malloc_args.mtype,
939 			    mclmeta_malloc_args.objsize * (size_t)cl_limit);
940 	kmalloc_raise_limit(M_MBUFCL,
941 			    (MCLBYTES * (size_t)ncl_limit) +
942 			    (MJUMPAGESIZE * (size_t)jcl_limit));
943 	mb_limit += mb_limit / 8;
944 	kmalloc_raise_limit(mbuf_malloc_args.mtype,
945 			    mbuf_malloc_args.objsize * (size_t)mb_limit);
946 }
947 
948 /*
949  * Return the number of references to this mbuf's data.  0 is returned
950  * if the mbuf is not M_EXT, a reference count is returned if it is
951  * M_EXT | M_EXT_CLUSTER, and 99 is returned if it is a special M_EXT.
952  */
953 int
954 m_sharecount(struct mbuf *m)
955 {
956 	switch (m->m_flags & (M_EXT | M_EXT_CLUSTER)) {
957 	case 0:
958 		return (0);
959 	case M_EXT:
960 		return (99);
961 	case M_EXT | M_EXT_CLUSTER:
962 		return (((struct mbcluster *)m->m_ext.ext_arg)->mcl_refs);
963 	}
964 	/* NOTREACHED */
965 	return (0);		/* to shut up compiler */
966 }
967 
968 /*
969  * change mbuf to new type
970  */
971 void
972 m_chtype(struct mbuf *m, int type)
973 {
974 	struct globaldata *gd = mycpu;
975 
976 	++mbtypes[gd->gd_cpuid].stats[type];
977 	--mbtypes[gd->gd_cpuid].stats[m->m_type];
978 	m->m_type = type;
979 }
980 
981 static void
982 m_reclaim(void)
983 {
984 	struct domain *dp;
985 	struct protosw *pr;
986 
987 	kprintf("Debug: m_reclaim() called\n");
988 
989 	SLIST_FOREACH(dp, &domains, dom_next) {
990 		for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) {
991 			if (pr->pr_drain)
992 				(*pr->pr_drain)();
993 		}
994 	}
995 	++mbstat[mycpu->gd_cpuid].m_drain;
996 }
997 
998 static __inline void
999 updatestats(struct mbuf *m, int type)
1000 {
1001 	struct globaldata *gd = mycpu;
1002 
1003 	m->m_type = type;
1004 	mbuftrack(m);
1005 #ifdef MBUF_DEBUG
1006 	KASSERT(m->m_next == NULL, ("mbuf %p: bad m_next in get", m));
1007 	KASSERT(m->m_nextpkt == NULL, ("mbuf %p: bad m_nextpkt in get", m));
1008 #endif
1009 
1010 	++mbtypes[gd->gd_cpuid].stats[type];
1011 	++mbstat[gd->gd_cpuid].m_mbufs;
1012 
1013 }
1014 
1015 /*
1016  * Allocate an mbuf.
1017  */
1018 struct mbuf *
1019 m_get(int how, int type)
1020 {
1021 	struct mbuf *m;
1022 	int ntries = 0;
1023 	int ocf = MB_OCFLAG(how);
1024 
1025 retryonce:
1026 
1027 	m = objcache_get(mbuf_cache, ocf);
1028 
1029 	if (m == NULL) {
1030 		if ((ocf & M_WAITOK) && ntries++ == 0) {
1031 			struct objcache *reclaimlist[] = {
1032 				mbufphdr_cache,
1033 				mbufcluster_cache,
1034 				mbufphdrcluster_cache,
1035 				mbufjcluster_cache,
1036 				mbufphdrjcluster_cache
1037 			};
1038 			const int nreclaims = NELEM(reclaimlist);
1039 
1040 			if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf))
1041 				m_reclaim();
1042 			goto retryonce;
1043 		}
1044 		++mbstat[mycpu->gd_cpuid].m_drops;
1045 		return (NULL);
1046 	}
1047 #ifdef MBUF_DEBUG
1048 	KASSERT(m->m_data == m->m_dat, ("mbuf %p: bad m_data in get", m));
1049 #endif
1050 	m->m_len = 0;
1051 
1052 	updatestats(m, type);
1053 	return (m);
1054 }
1055 
1056 struct mbuf *
1057 m_gethdr(int how, int type)
1058 {
1059 	struct mbuf *m;
1060 	int ocf = MB_OCFLAG(how);
1061 	int ntries = 0;
1062 
1063 retryonce:
1064 
1065 	m = objcache_get(mbufphdr_cache, ocf);
1066 
1067 	if (m == NULL) {
1068 		if ((ocf & M_WAITOK) && ntries++ == 0) {
1069 			struct objcache *reclaimlist[] = {
1070 				mbuf_cache,
1071 				mbufcluster_cache, mbufphdrcluster_cache,
1072 				mbufjcluster_cache, mbufphdrjcluster_cache
1073 			};
1074 			const int nreclaims = NELEM(reclaimlist);
1075 
1076 			if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf))
1077 				m_reclaim();
1078 			goto retryonce;
1079 		}
1080 		++mbstat[mycpu->gd_cpuid].m_drops;
1081 		return (NULL);
1082 	}
1083 #ifdef MBUF_DEBUG
1084 	KASSERT(m->m_data == m->m_pktdat, ("mbuf %p: bad m_data in get", m));
1085 #endif
1086 	m->m_len = 0;
1087 	m->m_pkthdr.len = 0;
1088 
1089 	updatestats(m, type);
1090 	return (m);
1091 }
1092 
1093 /*
1094  * Get a mbuf (not a mbuf cluster!) and zero it.
1095  * Deprecated.
1096  */
1097 struct mbuf *
1098 m_getclr(int how, int type)
1099 {
1100 	struct mbuf *m;
1101 
1102 	m = m_get(how, type);
1103 	if (m != NULL)
1104 		bzero(m->m_data, MLEN);
1105 	return (m);
1106 }
1107 
1108 static struct mbuf *
1109 m_getcl_cache(int how, short type, int flags, struct objcache *mbclc,
1110     struct objcache *mbphclc, u_long *cl_stats)
1111 {
1112 	struct mbuf *m = NULL;
1113 	int ocflags = MB_OCFLAG(how);
1114 	int ntries = 0;
1115 
1116 retryonce:
1117 
1118 	if (flags & M_PKTHDR)
1119 		m = objcache_get(mbphclc, ocflags);
1120 	else
1121 		m = objcache_get(mbclc, ocflags);
1122 
1123 	if (m == NULL) {
1124 		if ((ocflags & M_WAITOK) && ntries++ == 0) {
1125 			struct objcache *reclaimlist[1];
1126 
1127 			if (flags & M_PKTHDR)
1128 				reclaimlist[0] = mbclc;
1129 			else
1130 				reclaimlist[0] = mbphclc;
1131 			if (!objcache_reclaimlist(reclaimlist, 1, ocflags))
1132 				m_reclaim();
1133 			goto retryonce;
1134 		}
1135 		++mbstat[mycpu->gd_cpuid].m_drops;
1136 		return (NULL);
1137 	}
1138 
1139 #ifdef MBUF_DEBUG
1140 	KASSERT(m->m_data == m->m_ext.ext_buf,
1141 		("mbuf %p: bad m_data in get", m));
1142 #endif
1143 	m->m_type = type;
1144 	m->m_len = 0;
1145 	m->m_pkthdr.len = 0;	/* just do it unconditonally */
1146 
1147 	mbuftrack(m);
1148 
1149 	++mbtypes[mycpu->gd_cpuid].stats[type];
1150 	++(*cl_stats);
1151 	return (m);
1152 }
1153 
1154 struct mbuf *
1155 m_getjcl(int how, short type, int flags, size_t size)
1156 {
1157 	struct objcache *mbclc, *mbphclc;
1158 	u_long *cl_stats;
1159 
1160 	switch (size) {
1161 	case MCLBYTES:
1162 		mbclc = mbufcluster_cache;
1163 		mbphclc = mbufphdrcluster_cache;
1164 		cl_stats = &mbstat[mycpu->gd_cpuid].m_clusters;
1165 		break;
1166 
1167 	default:
1168 		mbclc = mbufjcluster_cache;
1169 		mbphclc = mbufphdrjcluster_cache;
1170 		cl_stats = &mbstat[mycpu->gd_cpuid].m_jclusters;
1171 		break;
1172 	}
1173 	return m_getcl_cache(how, type, flags, mbclc, mbphclc, cl_stats);
1174 }
1175 
1176 /*
1177  * Returns an mbuf with an attached cluster.
1178  * Because many network drivers use this kind of buffers a lot, it is
1179  * convenient to keep a small pool of free buffers of this kind.
1180  * Even a small size such as 10 gives about 10% improvement in the
1181  * forwarding rate in a bridge or router.
1182  */
1183 struct mbuf *
1184 m_getcl(int how, short type, int flags)
1185 {
1186 	return m_getcl_cache(how, type, flags,
1187 	    mbufcluster_cache, mbufphdrcluster_cache,
1188 	    &mbstat[mycpu->gd_cpuid].m_clusters);
1189 }
1190 
1191 /*
1192  * Allocate chain of requested length.
1193  */
1194 struct mbuf *
1195 m_getc(int len, int how, int type)
1196 {
1197 	struct mbuf *n, *nfirst = NULL, **ntail = &nfirst;
1198 	int nsize;
1199 
1200 	while (len > 0) {
1201 		n = m_getl(len, how, type, 0, &nsize);
1202 		if (n == NULL)
1203 			goto failed;
1204 		n->m_len = 0;
1205 		*ntail = n;
1206 		ntail = &n->m_next;
1207 		len -= nsize;
1208 	}
1209 	return (nfirst);
1210 
1211 failed:
1212 	m_freem(nfirst);
1213 	return (NULL);
1214 }
1215 
1216 /*
1217  * Allocate len-worth of mbufs and/or mbuf clusters (whatever fits best)
1218  * and return a pointer to the head of the allocated chain. If m0 is
1219  * non-null, then we assume that it is a single mbuf or an mbuf chain to
1220  * which we want len bytes worth of mbufs and/or clusters attached, and so
1221  * if we succeed in allocating it, we will just return a pointer to m0.
1222  *
1223  * If we happen to fail at any point during the allocation, we will free
1224  * up everything we have already allocated and return NULL.
1225  *
1226  * Deprecated.  Use m_getc() and m_cat() instead.
1227  */
1228 struct mbuf *
1229 m_getm(struct mbuf *m0, int len, int type, int how)
1230 {
1231 	struct mbuf *nfirst;
1232 
1233 	nfirst = m_getc(len, how, type);
1234 
1235 	if (m0 != NULL) {
1236 		m_last(m0)->m_next = nfirst;
1237 		return (m0);
1238 	}
1239 
1240 	return (nfirst);
1241 }
1242 
1243 /*
1244  * Adds a cluster to a normal mbuf, M_EXT is set on success.
1245  * Deprecated.  Use m_getcl() instead.
1246  */
1247 void
1248 m_mclget(struct mbuf *m, int how)
1249 {
1250 	struct mbcluster *mcl;
1251 
1252 	KKASSERT((m->m_flags & M_EXT) == 0);
1253 	mcl = objcache_get(mclmeta_cache, MB_OCFLAG(how));
1254 	if (mcl != NULL) {
1255 		linkcluster(m, mcl);
1256 		++mbstat[mycpu->gd_cpuid].m_clusters;
1257 	} else {
1258 		++mbstat[mycpu->gd_cpuid].m_drops;
1259 	}
1260 }
1261 
1262 /*
1263  * Updates to mbcluster must be MPSAFE.  Only an entity which already has
1264  * a reference to the cluster can ref it, so we are in no danger of
1265  * racing an add with a subtract.  But the operation must still be atomic
1266  * since multiple entities may have a reference on the cluster.
1267  *
1268  * m_mclfree() is almost the same but it must contend with two entities
1269  * freeing the cluster at the same time.
1270  */
1271 static void
1272 m_mclref(void *arg)
1273 {
1274 	struct mbcluster *mcl = arg;
1275 
1276 	atomic_add_int(&mcl->mcl_refs, 1);
1277 }
1278 
1279 /*
1280  * When dereferencing a cluster we have to deal with a N->0 race, where
1281  * N entities free their references simultaniously.  To do this we use
1282  * atomic_fetchadd_int().
1283  */
1284 static void
1285 m_mclfree(void *arg)
1286 {
1287 	struct mbcluster *mcl = arg;
1288 
1289 	if (atomic_fetchadd_int(&mcl->mcl_refs, -1) == 1) {
1290 		--mbstat[mycpu->gd_cpuid].m_clusters;
1291 		objcache_put(mclmeta_cache, mcl);
1292 	}
1293 }
1294 
1295 static void
1296 m_mjclfree(void *arg)
1297 {
1298 	struct mbcluster *mcl = arg;
1299 
1300 	if (atomic_fetchadd_int(&mcl->mcl_refs, -1) == 1) {
1301 		--mbstat[mycpu->gd_cpuid].m_jclusters;
1302 		objcache_put(mjclmeta_cache, mcl);
1303 	}
1304 }
1305 
1306 /*
1307  * Free a single mbuf and any associated external storage.  The successor,
1308  * if any, is returned.
1309  *
1310  * We do need to check non-first mbuf for m_aux, since some of existing
1311  * code does not call M_PREPEND properly.
1312  * (example: call to bpf_mtap from drivers)
1313  */
1314 
1315 #ifdef MBUF_DEBUG
1316 
1317 struct mbuf  *
1318 _m_free(struct mbuf *m, const char *func)
1319 
1320 #else
1321 
1322 struct mbuf *
1323 m_free(struct mbuf *m)
1324 
1325 #endif
1326 {
1327 	struct mbuf *n;
1328 	struct globaldata *gd = mycpu;
1329 
1330 	KASSERT(m->m_type != MT_FREE, ("freeing free mbuf %p", m));
1331 	KASSERT(M_TRAILINGSPACE(m) >= 0, ("overflowed mbuf %p", m));
1332 	--mbtypes[gd->gd_cpuid].stats[m->m_type];
1333 
1334 	n = m->m_next;
1335 
1336 	/*
1337 	 * Make sure the mbuf is in constructed state before returning it
1338 	 * to the objcache.
1339 	 */
1340 	m->m_next = NULL;
1341 	mbufuntrack(m);
1342 #ifdef MBUF_DEBUG
1343 	m->m_hdr.mh_lastfunc = func;
1344 #endif
1345 #ifdef notyet
1346 	KKASSERT(m->m_nextpkt == NULL);
1347 #else
1348 	if (m->m_nextpkt != NULL) {
1349 		static int afewtimes = 10;
1350 
1351 		if (afewtimes-- > 0) {
1352 			kprintf("mfree: m->m_nextpkt != NULL\n");
1353 			print_backtrace(-1);
1354 		}
1355 		m->m_nextpkt = NULL;
1356 	}
1357 #endif
1358 	if (m->m_flags & M_PKTHDR) {
1359 		m_tag_delete_chain(m);		/* eliminate XXX JH */
1360 	}
1361 
1362 	m->m_flags &= (M_EXT | M_EXT_CLUSTER | M_CLCACHE | M_PHCACHE);
1363 
1364 	/*
1365 	 * Clean the M_PKTHDR state so we can return the mbuf to its original
1366 	 * cache.  This is based on the PHCACHE flag which tells us whether
1367 	 * the mbuf was originally allocated out of a packet-header cache
1368 	 * or a non-packet-header cache.
1369 	 */
1370 	if (m->m_flags & M_PHCACHE) {
1371 		m->m_flags |= M_PKTHDR;
1372 		m->m_pkthdr.rcvif = NULL;	/* eliminate XXX JH */
1373 		m->m_pkthdr.csum_flags = 0;	/* eliminate XXX JH */
1374 		m->m_pkthdr.fw_flags = 0;	/* eliminate XXX JH */
1375 		SLIST_INIT(&m->m_pkthdr.tags);
1376 	}
1377 
1378 	/*
1379 	 * Handle remaining flags combinations.  M_CLCACHE tells us whether
1380 	 * the mbuf was originally allocated from a cluster cache or not,
1381 	 * and is totally separate from whether the mbuf is currently
1382 	 * associated with a cluster.
1383 	 */
1384 	switch(m->m_flags & (M_CLCACHE | M_EXT | M_EXT_CLUSTER)) {
1385 	case M_CLCACHE | M_EXT | M_EXT_CLUSTER:
1386 		/*
1387 		 * mbuf+cluster cache case.  The mbuf was allocated from the
1388 		 * combined mbuf_cluster cache and can be returned to the
1389 		 * cache if the cluster hasn't been shared.
1390 		 */
1391 		if (m_sharecount(m) == 1) {
1392 			/*
1393 			 * The cluster has not been shared, we can just
1394 			 * reset the data pointer and return the mbuf
1395 			 * to the cluster cache.  Note that the reference
1396 			 * count is left intact (it is still associated with
1397 			 * an mbuf).
1398 			 */
1399 			m->m_data = m->m_ext.ext_buf;
1400 			if (m->m_flags & M_EXT && m->m_ext.ext_size != MCLBYTES) {
1401 				if (m->m_flags & M_PHCACHE)
1402 					objcache_put(mbufphdrjcluster_cache, m);
1403 				else
1404 					objcache_put(mbufjcluster_cache, m);
1405 				--mbstat[mycpu->gd_cpuid].m_jclusters;
1406 			} else {
1407 				if (m->m_flags & M_PHCACHE)
1408 					objcache_put(mbufphdrcluster_cache, m);
1409 				else
1410 					objcache_put(mbufcluster_cache, m);
1411 				--mbstat[mycpu->gd_cpuid].m_clusters;
1412 			}
1413 		} else {
1414 			/*
1415 			 * Hell.  Someone else has a ref on this cluster,
1416 			 * we have to disconnect it which means we can't
1417 			 * put it back into the mbufcluster_cache, we
1418 			 * have to destroy the mbuf.
1419 			 *
1420 			 * Other mbuf references to the cluster will typically
1421 			 * be M_EXT | M_EXT_CLUSTER but without M_CLCACHE.
1422 			 *
1423 			 * XXX we could try to connect another cluster to
1424 			 * it.
1425 			 */
1426 			m->m_ext.ext_free(m->m_ext.ext_arg);
1427 			m->m_flags &= ~(M_EXT | M_EXT_CLUSTER);
1428 			if (m->m_ext.ext_size == MCLBYTES) {
1429 				if (m->m_flags & M_PHCACHE)
1430 					objcache_dtor(mbufphdrcluster_cache, m);
1431 				else
1432 					objcache_dtor(mbufcluster_cache, m);
1433 			} else {
1434 				if (m->m_flags & M_PHCACHE)
1435 					objcache_dtor(mbufphdrjcluster_cache, m);
1436 				else
1437 					objcache_dtor(mbufjcluster_cache, m);
1438 			}
1439 		}
1440 		break;
1441 	case M_EXT | M_EXT_CLUSTER:
1442 	case M_EXT:
1443 		/*
1444 		 * Normal cluster association case, disconnect the cluster from
1445 		 * the mbuf.  The cluster may or may not be custom.
1446 		 */
1447 		m->m_ext.ext_free(m->m_ext.ext_arg);
1448 		m->m_flags &= ~(M_EXT | M_EXT_CLUSTER);
1449 		/* fall through */
1450 	case 0:
1451 		/*
1452 		 * return the mbuf to the mbuf cache.
1453 		 */
1454 		if (m->m_flags & M_PHCACHE) {
1455 			m->m_data = m->m_pktdat;
1456 			objcache_put(mbufphdr_cache, m);
1457 		} else {
1458 			m->m_data = m->m_dat;
1459 			objcache_put(mbuf_cache, m);
1460 		}
1461 		--mbstat[mycpu->gd_cpuid].m_mbufs;
1462 		break;
1463 	default:
1464 		if (!panicstr)
1465 			panic("bad mbuf flags %p %08x", m, m->m_flags);
1466 		break;
1467 	}
1468 	return (n);
1469 }
1470 
1471 #ifdef MBUF_DEBUG
1472 
1473 void
1474 _m_freem(struct mbuf *m, const char *func)
1475 {
1476 	while (m)
1477 		m = _m_free(m, func);
1478 }
1479 
1480 #else
1481 
1482 void
1483 m_freem(struct mbuf *m)
1484 {
1485 	while (m)
1486 		m = m_free(m);
1487 }
1488 
1489 #endif
1490 
1491 void
1492 m_extadd(struct mbuf *m, caddr_t buf, u_int size,  void (*reff)(void *),
1493     void (*freef)(void *), void *arg)
1494 {
1495 	m->m_ext.ext_arg = arg;
1496 	m->m_ext.ext_buf = buf;
1497 	m->m_ext.ext_ref = reff;
1498 	m->m_ext.ext_free = freef;
1499 	m->m_ext.ext_size = size;
1500 	reff(arg);
1501 	m->m_data = buf;
1502 	m->m_flags |= M_EXT;
1503 }
1504 
1505 /*
1506  * mbuf utility routines
1507  */
1508 
1509 /*
1510  * Lesser-used path for M_PREPEND: allocate new mbuf to prepend to chain and
1511  * copy junk along.
1512  */
1513 struct mbuf *
1514 m_prepend(struct mbuf *m, int len, int how)
1515 {
1516 	struct mbuf *mn;
1517 
1518 	if (m->m_flags & M_PKTHDR)
1519 	    mn = m_gethdr(how, m->m_type);
1520 	else
1521 	    mn = m_get(how, m->m_type);
1522 	if (mn == NULL) {
1523 		m_freem(m);
1524 		return (NULL);
1525 	}
1526 	if (m->m_flags & M_PKTHDR)
1527 		M_MOVE_PKTHDR(mn, m);
1528 	mn->m_next = m;
1529 	m = mn;
1530 	if (len < MHLEN)
1531 		MH_ALIGN(m, len);
1532 	m->m_len = len;
1533 	return (m);
1534 }
1535 
1536 /*
1537  * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
1538  * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
1539  * The wait parameter is a choice of M_WAITOK/M_NOWAIT from caller.
1540  * Note that the copy is read-only, because clusters are not copied,
1541  * only their reference counts are incremented.
1542  */
1543 struct mbuf *
1544 m_copym(const struct mbuf *m, int off0, int len, int wait)
1545 {
1546 	struct mbuf *n, **np;
1547 	int off = off0;
1548 	struct mbuf *top;
1549 	int copyhdr = 0;
1550 
1551 	KASSERT(off >= 0, ("m_copym, negative off %d", off));
1552 	KASSERT(len >= 0, ("m_copym, negative len %d", len));
1553 	if (off == 0 && (m->m_flags & M_PKTHDR))
1554 		copyhdr = 1;
1555 	while (off > 0) {
1556 		KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
1557 		if (off < m->m_len)
1558 			break;
1559 		off -= m->m_len;
1560 		m = m->m_next;
1561 	}
1562 	np = &top;
1563 	top = NULL;
1564 	while (len > 0) {
1565 		if (m == NULL) {
1566 			KASSERT(len == M_COPYALL,
1567 			    ("m_copym, length > size of mbuf chain"));
1568 			break;
1569 		}
1570 		/*
1571 		 * Because we are sharing any cluster attachment below,
1572 		 * be sure to get an mbuf that does not have a cluster
1573 		 * associated with it.
1574 		 */
1575 		if (copyhdr)
1576 			n = m_gethdr(wait, m->m_type);
1577 		else
1578 			n = m_get(wait, m->m_type);
1579 		*np = n;
1580 		if (n == NULL)
1581 			goto nospace;
1582 		if (copyhdr) {
1583 			if (!m_dup_pkthdr(n, m, wait))
1584 				goto nospace;
1585 			if (len == M_COPYALL)
1586 				n->m_pkthdr.len -= off0;
1587 			else
1588 				n->m_pkthdr.len = len;
1589 			copyhdr = 0;
1590 		}
1591 		n->m_len = min(len, m->m_len - off);
1592 		if (m->m_flags & M_EXT) {
1593 			KKASSERT((n->m_flags & M_EXT) == 0);
1594 			n->m_data = m->m_data + off;
1595 			m->m_ext.ext_ref(m->m_ext.ext_arg);
1596 			n->m_ext = m->m_ext;
1597 			n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1598 		} else {
1599 			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
1600 			    (unsigned)n->m_len);
1601 		}
1602 		if (len != M_COPYALL)
1603 			len -= n->m_len;
1604 		off = 0;
1605 		m = m->m_next;
1606 		np = &n->m_next;
1607 	}
1608 	if (top == NULL)
1609 		++mbstat[mycpu->gd_cpuid].m_mcfail;
1610 	return (top);
1611 nospace:
1612 	m_freem(top);
1613 	++mbstat[mycpu->gd_cpuid].m_mcfail;
1614 	return (NULL);
1615 }
1616 
1617 /*
1618  * Copy an entire packet, including header (which must be present).
1619  * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
1620  * Note that the copy is read-only, because clusters are not copied,
1621  * only their reference counts are incremented.
1622  * Preserve alignment of the first mbuf so if the creator has left
1623  * some room at the beginning (e.g. for inserting protocol headers)
1624  * the copies also have the room available.
1625  */
1626 struct mbuf *
1627 m_copypacket(struct mbuf *m, int how)
1628 {
1629 	struct mbuf *top, *n, *o;
1630 
1631 	n = m_gethdr(how, m->m_type);
1632 	top = n;
1633 	if (!n)
1634 		goto nospace;
1635 
1636 	if (!m_dup_pkthdr(n, m, how))
1637 		goto nospace;
1638 	n->m_len = m->m_len;
1639 	if (m->m_flags & M_EXT) {
1640 		KKASSERT((n->m_flags & M_EXT) == 0);
1641 		n->m_data = m->m_data;
1642 		m->m_ext.ext_ref(m->m_ext.ext_arg);
1643 		n->m_ext = m->m_ext;
1644 		n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1645 	} else {
1646 		n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
1647 		bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1648 	}
1649 
1650 	m = m->m_next;
1651 	while (m) {
1652 		o = m_get(how, m->m_type);
1653 		if (!o)
1654 			goto nospace;
1655 
1656 		n->m_next = o;
1657 		n = n->m_next;
1658 
1659 		n->m_len = m->m_len;
1660 		if (m->m_flags & M_EXT) {
1661 			KKASSERT((n->m_flags & M_EXT) == 0);
1662 			n->m_data = m->m_data;
1663 			m->m_ext.ext_ref(m->m_ext.ext_arg);
1664 			n->m_ext = m->m_ext;
1665 			n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1666 		} else {
1667 			bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1668 		}
1669 
1670 		m = m->m_next;
1671 	}
1672 	return top;
1673 nospace:
1674 	m_freem(top);
1675 	++mbstat[mycpu->gd_cpuid].m_mcfail;
1676 	return (NULL);
1677 }
1678 
1679 /*
1680  * Copy data from an mbuf chain starting "off" bytes from the beginning,
1681  * continuing for "len" bytes, into the indicated buffer.
1682  */
1683 void
1684 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
1685 {
1686 	unsigned count;
1687 
1688 	KASSERT(off >= 0, ("m_copydata, negative off %d", off));
1689 	KASSERT(len >= 0, ("m_copydata, negative len %d", len));
1690 	while (off > 0) {
1691 		KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
1692 		if (off < m->m_len)
1693 			break;
1694 		off -= m->m_len;
1695 		m = m->m_next;
1696 	}
1697 	while (len > 0) {
1698 		KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
1699 		count = min(m->m_len - off, len);
1700 		bcopy(mtod(m, caddr_t) + off, cp, count);
1701 		len -= count;
1702 		cp += count;
1703 		off = 0;
1704 		m = m->m_next;
1705 	}
1706 }
1707 
1708 /*
1709  * Copy a packet header mbuf chain into a completely new chain, including
1710  * copying any mbuf clusters.  Use this instead of m_copypacket() when
1711  * you need a writable copy of an mbuf chain.
1712  */
1713 struct mbuf *
1714 m_dup(struct mbuf *m, int how)
1715 {
1716 	struct mbuf **p, *top = NULL;
1717 	int remain, moff, nsize;
1718 
1719 	/* Sanity check */
1720 	if (m == NULL)
1721 		return (NULL);
1722 	KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __func__));
1723 
1724 	/* While there's more data, get a new mbuf, tack it on, and fill it */
1725 	remain = m->m_pkthdr.len;
1726 	moff = 0;
1727 	p = &top;
1728 	while (remain > 0 || top == NULL) {	/* allow m->m_pkthdr.len == 0 */
1729 		struct mbuf *n;
1730 
1731 		/* Get the next new mbuf */
1732 		n = m_getl(remain, how, m->m_type, top == NULL ? M_PKTHDR : 0,
1733 			   &nsize);
1734 		if (n == NULL)
1735 			goto nospace;
1736 		if (top == NULL)
1737 			if (!m_dup_pkthdr(n, m, how))
1738 				goto nospace0;
1739 
1740 		/* Link it into the new chain */
1741 		*p = n;
1742 		p = &n->m_next;
1743 
1744 		/* Copy data from original mbuf(s) into new mbuf */
1745 		n->m_len = 0;
1746 		while (n->m_len < nsize && m != NULL) {
1747 			int chunk = min(nsize - n->m_len, m->m_len - moff);
1748 
1749 			bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1750 			moff += chunk;
1751 			n->m_len += chunk;
1752 			remain -= chunk;
1753 			if (moff == m->m_len) {
1754 				m = m->m_next;
1755 				moff = 0;
1756 			}
1757 		}
1758 
1759 		/* Check correct total mbuf length */
1760 		KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
1761 			("%s: bogus m_pkthdr.len", __func__));
1762 	}
1763 	return (top);
1764 
1765 nospace:
1766 	m_freem(top);
1767 nospace0:
1768 	++mbstat[mycpu->gd_cpuid].m_mcfail;
1769 	return (NULL);
1770 }
1771 
1772 /*
1773  * Copy the non-packet mbuf data chain into a new set of mbufs, including
1774  * copying any mbuf clusters.  This is typically used to realign a data
1775  * chain by nfs_realign().
1776  *
1777  * The original chain is left intact.  how should be M_WAITOK or M_NOWAIT
1778  * and NULL can be returned if M_NOWAIT is passed.
1779  *
1780  * Be careful to use cluster mbufs, a large mbuf chain converted to non
1781  * cluster mbufs can exhaust our supply of mbufs.
1782  */
1783 struct mbuf *
1784 m_dup_data(struct mbuf *m, int how)
1785 {
1786 	struct mbuf **p, *n, *top = NULL;
1787 	int mlen, moff, chunk, gsize, nsize;
1788 
1789 	/*
1790 	 * Degenerate case
1791 	 */
1792 	if (m == NULL)
1793 		return (NULL);
1794 
1795 	/*
1796 	 * Optimize the mbuf allocation but do not get too carried away.
1797 	 */
1798 	if (m->m_next || m->m_len > MLEN)
1799 		if (m->m_flags & M_EXT && m->m_ext.ext_size == MCLBYTES)
1800 			gsize = MCLBYTES;
1801 		else
1802 			gsize = MJUMPAGESIZE;
1803 	else
1804 		gsize = MLEN;
1805 
1806 	/* Chain control */
1807 	p = &top;
1808 	n = NULL;
1809 	nsize = 0;
1810 
1811 	/*
1812 	 * Scan the mbuf chain until nothing is left, the new mbuf chain
1813 	 * will be allocated on the fly as needed.
1814 	 */
1815 	while (m) {
1816 		mlen = m->m_len;
1817 		moff = 0;
1818 
1819 		while (mlen) {
1820 			KKASSERT(m->m_type == MT_DATA);
1821 			if (n == NULL) {
1822 				n = m_getl(gsize, how, MT_DATA, 0, &nsize);
1823 				n->m_len = 0;
1824 				if (n == NULL)
1825 					goto nospace;
1826 				*p = n;
1827 				p = &n->m_next;
1828 			}
1829 			chunk = imin(mlen, nsize);
1830 			bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1831 			mlen -= chunk;
1832 			moff += chunk;
1833 			n->m_len += chunk;
1834 			nsize -= chunk;
1835 			if (nsize == 0)
1836 				n = NULL;
1837 		}
1838 		m = m->m_next;
1839 	}
1840 	*p = NULL;
1841 	return(top);
1842 nospace:
1843 	*p = NULL;
1844 	m_freem(top);
1845 	++mbstat[mycpu->gd_cpuid].m_mcfail;
1846 	return (NULL);
1847 }
1848 
1849 /*
1850  * Concatenate mbuf chain n to m.
1851  * Both chains must be of the same type (e.g. MT_DATA).
1852  * Any m_pkthdr is not updated.
1853  */
1854 void
1855 m_cat(struct mbuf *m, struct mbuf *n)
1856 {
1857 	m = m_last(m);
1858 	while (n) {
1859 		if (m->m_flags & M_EXT ||
1860 		    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
1861 			/* just join the two chains */
1862 			m->m_next = n;
1863 			return;
1864 		}
1865 		/* splat the data from one into the other */
1866 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1867 		    (u_int)n->m_len);
1868 		m->m_len += n->m_len;
1869 		n = m_free(n);
1870 	}
1871 }
1872 
1873 void
1874 m_adj(struct mbuf *mp, int req_len)
1875 {
1876 	int len = req_len;
1877 	struct mbuf *m;
1878 	int count;
1879 
1880 	if ((m = mp) == NULL)
1881 		return;
1882 	if (len >= 0) {
1883 		/*
1884 		 * Trim from head.
1885 		 */
1886 		while (m != NULL && len > 0) {
1887 			if (m->m_len <= len) {
1888 				len -= m->m_len;
1889 				m->m_len = 0;
1890 				m = m->m_next;
1891 			} else {
1892 				m->m_len -= len;
1893 				m->m_data += len;
1894 				len = 0;
1895 			}
1896 		}
1897 		m = mp;
1898 		if (mp->m_flags & M_PKTHDR)
1899 			m->m_pkthdr.len -= (req_len - len);
1900 	} else {
1901 		/*
1902 		 * Trim from tail.  Scan the mbuf chain,
1903 		 * calculating its length and finding the last mbuf.
1904 		 * If the adjustment only affects this mbuf, then just
1905 		 * adjust and return.  Otherwise, rescan and truncate
1906 		 * after the remaining size.
1907 		 */
1908 		len = -len;
1909 		count = 0;
1910 		for (;;) {
1911 			count += m->m_len;
1912 			if (m->m_next == NULL)
1913 				break;
1914 			m = m->m_next;
1915 		}
1916 		if (m->m_len >= len) {
1917 			m->m_len -= len;
1918 			if (mp->m_flags & M_PKTHDR)
1919 				mp->m_pkthdr.len -= len;
1920 			return;
1921 		}
1922 		count -= len;
1923 		if (count < 0)
1924 			count = 0;
1925 		/*
1926 		 * Correct length for chain is "count".
1927 		 * Find the mbuf with last data, adjust its length,
1928 		 * and toss data from remaining mbufs on chain.
1929 		 */
1930 		m = mp;
1931 		if (m->m_flags & M_PKTHDR)
1932 			m->m_pkthdr.len = count;
1933 		for (; m; m = m->m_next) {
1934 			if (m->m_len >= count) {
1935 				m->m_len = count;
1936 				break;
1937 			}
1938 			count -= m->m_len;
1939 		}
1940 		while (m->m_next)
1941 			(m = m->m_next) ->m_len = 0;
1942 	}
1943 }
1944 
1945 /*
1946  * Set the m_data pointer of a newly-allocated mbuf
1947  * to place an object of the specified size at the
1948  * end of the mbuf, longword aligned.
1949  */
1950 void
1951 m_align(struct mbuf *m, int len)
1952 {
1953 	int adjust;
1954 
1955 	if (m->m_flags & M_EXT)
1956 		adjust = m->m_ext.ext_size - len;
1957 	else if (m->m_flags & M_PKTHDR)
1958 		adjust = MHLEN - len;
1959 	else
1960 		adjust = MLEN - len;
1961 	m->m_data += adjust &~ (sizeof(long)-1);
1962 }
1963 
1964 /*
1965  * Create a writable copy of the mbuf chain.  While doing this
1966  * we compact the chain with a goal of producing a chain with
1967  * at most two mbufs.  The second mbuf in this chain is likely
1968  * to be a cluster.  The primary purpose of this work is to create
1969  * a writable packet for encryption, compression, etc.  The
1970  * secondary goal is to linearize the data so the data can be
1971  * passed to crypto hardware in the most efficient manner possible.
1972  */
1973 struct mbuf *
1974 m_unshare(struct mbuf *m0, int how)
1975 {
1976 	struct mbuf *m, *mprev;
1977 	struct mbuf *n, *mfirst, *mlast;
1978 	int len, off;
1979 
1980 	mprev = NULL;
1981 	for (m = m0; m != NULL; m = mprev->m_next) {
1982 		/*
1983 		 * Regular mbufs are ignored unless there's a cluster
1984 		 * in front of it that we can use to coalesce.  We do
1985 		 * the latter mainly so later clusters can be coalesced
1986 		 * also w/o having to handle them specially (i.e. convert
1987 		 * mbuf+cluster -> cluster).  This optimization is heavily
1988 		 * influenced by the assumption that we're running over
1989 		 * Ethernet where MCLBYTES is large enough that the max
1990 		 * packet size will permit lots of coalescing into a
1991 		 * single cluster.  This in turn permits efficient
1992 		 * crypto operations, especially when using hardware.
1993 		 */
1994 		if ((m->m_flags & M_EXT) == 0) {
1995 			if (mprev && (mprev->m_flags & M_EXT) &&
1996 			    m->m_len <= M_TRAILINGSPACE(mprev)) {
1997 				/* XXX: this ignores mbuf types */
1998 				memcpy(mtod(mprev, caddr_t) + mprev->m_len,
1999 				       mtod(m, caddr_t), m->m_len);
2000 				mprev->m_len += m->m_len;
2001 				mprev->m_next = m->m_next;	/* unlink from chain */
2002 				m_free(m);			/* reclaim mbuf */
2003 			} else {
2004 				mprev = m;
2005 			}
2006 			continue;
2007 		}
2008 		/*
2009 		 * Writable mbufs are left alone (for now).
2010 		 */
2011 		if (M_WRITABLE(m)) {
2012 			mprev = m;
2013 			continue;
2014 		}
2015 
2016 		/*
2017 		 * Not writable, replace with a copy or coalesce with
2018 		 * the previous mbuf if possible (since we have to copy
2019 		 * it anyway, we try to reduce the number of mbufs and
2020 		 * clusters so that future work is easier).
2021 		 */
2022 		KASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags));
2023 		/* NB: we only coalesce into a cluster or larger */
2024 		if (mprev != NULL && (mprev->m_flags & M_EXT) &&
2025 		    m->m_len <= M_TRAILINGSPACE(mprev)) {
2026 			/* XXX: this ignores mbuf types */
2027 			memcpy(mtod(mprev, caddr_t) + mprev->m_len,
2028 			       mtod(m, caddr_t), m->m_len);
2029 			mprev->m_len += m->m_len;
2030 			mprev->m_next = m->m_next;	/* unlink from chain */
2031 			m_free(m);			/* reclaim mbuf */
2032 			continue;
2033 		}
2034 
2035 		/*
2036 		 * Allocate new space to hold the copy...
2037 		 */
2038 		/* XXX why can M_PKTHDR be set past the first mbuf? */
2039 		if (mprev == NULL && (m->m_flags & M_PKTHDR)) {
2040 			/*
2041 			 * NB: if a packet header is present we must
2042 			 * allocate the mbuf separately from any cluster
2043 			 * because M_MOVE_PKTHDR will smash the data
2044 			 * pointer and drop the M_EXT marker.
2045 			 */
2046 			MGETHDR(n, how, m->m_type);
2047 			if (n == NULL) {
2048 				m_freem(m0);
2049 				return (NULL);
2050 			}
2051 			M_MOVE_PKTHDR(n, m);
2052 			MCLGET(n, how);
2053 			if ((n->m_flags & M_EXT) == 0) {
2054 				m_free(n);
2055 				m_freem(m0);
2056 				return (NULL);
2057 			}
2058 		} else {
2059 			n = m_getcl(how, m->m_type, m->m_flags);
2060 			if (n == NULL) {
2061 				m_freem(m0);
2062 				return (NULL);
2063 			}
2064 		}
2065 		/*
2066 		 * ... and copy the data.  We deal with jumbo mbufs
2067 		 * (i.e. m_len > MCLBYTES) by splitting them into
2068 		 * clusters.  We could just malloc a buffer and make
2069 		 * it external but too many device drivers don't know
2070 		 * how to break up the non-contiguous memory when
2071 		 * doing DMA.
2072 		 */
2073 		len = m->m_len;
2074 		off = 0;
2075 		mfirst = n;
2076 		mlast = NULL;
2077 		for (;;) {
2078 			int cc = min(len, MCLBYTES);
2079 			memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, cc);
2080 			n->m_len = cc;
2081 			if (mlast != NULL)
2082 				mlast->m_next = n;
2083 			mlast = n;
2084 
2085 			len -= cc;
2086 			if (len <= 0)
2087 				break;
2088 			off += cc;
2089 
2090 			n = m_getcl(how, m->m_type, m->m_flags);
2091 			if (n == NULL) {
2092 				m_freem(mfirst);
2093 				m_freem(m0);
2094 				return (NULL);
2095 			}
2096 		}
2097 		n->m_next = m->m_next;
2098 		if (mprev == NULL)
2099 			m0 = mfirst;		/* new head of chain */
2100 		else
2101 			mprev->m_next = mfirst;	/* replace old mbuf */
2102 		m_free(m);			/* release old mbuf */
2103 		mprev = mfirst;
2104 	}
2105 	return (m0);
2106 }
2107 
2108 /*
2109  * Rearrange an mbuf chain so that len bytes are contiguous
2110  * and in the data area of an mbuf (so that mtod will work for a structure
2111  * of size len).  Returns the resulting mbuf chain on success, frees it and
2112  * returns null on failure.  If there is room, it will add up to
2113  * max_protohdr-len extra bytes to the contiguous region in an attempt to
2114  * avoid being called next time.
2115  */
2116 struct mbuf *
2117 m_pullup(struct mbuf *n, int len)
2118 {
2119 	struct mbuf *m;
2120 	int count;
2121 	int space;
2122 
2123 	/*
2124 	 * If first mbuf has no cluster, and has room for len bytes
2125 	 * without shifting current data, pullup into it,
2126 	 * otherwise allocate a new mbuf to prepend to the chain.
2127 	 */
2128 	if (!(n->m_flags & M_EXT) &&
2129 	    n->m_data + len < &n->m_dat[MLEN] &&
2130 	    n->m_next) {
2131 		if (n->m_len >= len)
2132 			return (n);
2133 		m = n;
2134 		n = n->m_next;
2135 		len -= m->m_len;
2136 	} else {
2137 		if (len > MHLEN)
2138 			goto bad;
2139 		if (n->m_flags & M_PKTHDR)
2140 			m = m_gethdr(M_NOWAIT, n->m_type);
2141 		else
2142 			m = m_get(M_NOWAIT, n->m_type);
2143 		if (m == NULL)
2144 			goto bad;
2145 		m->m_len = 0;
2146 		if (n->m_flags & M_PKTHDR)
2147 			M_MOVE_PKTHDR(m, n);
2148 	}
2149 	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
2150 	do {
2151 		count = min(min(max(len, max_protohdr), space), n->m_len);
2152 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
2153 		  (unsigned)count);
2154 		len -= count;
2155 		m->m_len += count;
2156 		n->m_len -= count;
2157 		space -= count;
2158 		if (n->m_len)
2159 			n->m_data += count;
2160 		else
2161 			n = m_free(n);
2162 	} while (len > 0 && n);
2163 	if (len > 0) {
2164 		m_free(m);
2165 		goto bad;
2166 	}
2167 	m->m_next = n;
2168 	return (m);
2169 bad:
2170 	m_freem(n);
2171 	++mbstat[mycpu->gd_cpuid].m_mcfail;
2172 	return (NULL);
2173 }
2174 
2175 /*
2176  * Partition an mbuf chain in two pieces, returning the tail --
2177  * all but the first len0 bytes.  In case of failure, it returns NULL and
2178  * attempts to restore the chain to its original state.
2179  *
2180  * Note that the resulting mbufs might be read-only, because the new
2181  * mbuf can end up sharing an mbuf cluster with the original mbuf if
2182  * the "breaking point" happens to lie within a cluster mbuf. Use the
2183  * M_WRITABLE() macro to check for this case.
2184  */
2185 struct mbuf *
2186 m_split(struct mbuf *m0, int len0, int wait)
2187 {
2188 	struct mbuf *m, *n;
2189 	unsigned len = len0, remain;
2190 
2191 	for (m = m0; m && len > m->m_len; m = m->m_next)
2192 		len -= m->m_len;
2193 	if (m == NULL)
2194 		return (NULL);
2195 	remain = m->m_len - len;
2196 	if (m0->m_flags & M_PKTHDR) {
2197 		n = m_gethdr(wait, m0->m_type);
2198 		if (n == NULL)
2199 			return (NULL);
2200 		n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
2201 		n->m_pkthdr.len = m0->m_pkthdr.len - len0;
2202 		m0->m_pkthdr.len = len0;
2203 		if (m->m_flags & M_EXT)
2204 			goto extpacket;
2205 		if (remain > MHLEN) {
2206 			/* m can't be the lead packet */
2207 			MH_ALIGN(n, 0);
2208 			n->m_next = m_split(m, len, wait);
2209 			if (n->m_next == NULL) {
2210 				m_free(n);
2211 				return (NULL);
2212 			} else {
2213 				n->m_len = 0;
2214 				return (n);
2215 			}
2216 		} else
2217 			MH_ALIGN(n, remain);
2218 	} else if (remain == 0) {
2219 		n = m->m_next;
2220 		m->m_next = NULL;
2221 		return (n);
2222 	} else {
2223 		n = m_get(wait, m->m_type);
2224 		if (n == NULL)
2225 			return (NULL);
2226 		M_ALIGN(n, remain);
2227 	}
2228 extpacket:
2229 	if (m->m_flags & M_EXT) {
2230 		KKASSERT((n->m_flags & M_EXT) == 0);
2231 		n->m_data = m->m_data + len;
2232 		m->m_ext.ext_ref(m->m_ext.ext_arg);
2233 		n->m_ext = m->m_ext;
2234 		n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
2235 	} else {
2236 		bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
2237 	}
2238 	n->m_len = remain;
2239 	m->m_len = len;
2240 	n->m_next = m->m_next;
2241 	m->m_next = NULL;
2242 	return (n);
2243 }
2244 
2245 /*
2246  * Routine to copy from device local memory into mbufs.
2247  * Note: "offset" is ill-defined and always called as 0, so ignore it.
2248  */
2249 struct mbuf *
2250 m_devget(char *buf, int len, int offset, struct ifnet *ifp)
2251 {
2252 	struct mbuf *m, *mfirst = NULL, **mtail;
2253 	int nsize, flags;
2254 
2255 	mtail = &mfirst;
2256 	flags = M_PKTHDR;
2257 
2258 	while (len > 0) {
2259 		m = m_getl(len, M_NOWAIT, MT_DATA, flags, &nsize);
2260 		if (m == NULL) {
2261 			m_freem(mfirst);
2262 			return (NULL);
2263 		}
2264 		m->m_len = min(len, nsize);
2265 
2266 		if (flags & M_PKTHDR) {
2267 			if (len + max_linkhdr <= nsize)
2268 				m->m_data += max_linkhdr;
2269 			m->m_pkthdr.rcvif = ifp;
2270 			m->m_pkthdr.len = len;
2271 			flags = 0;
2272 		}
2273 
2274 		bcopy(buf, m->m_data, (unsigned)m->m_len);
2275 		buf += m->m_len;
2276 		len -= m->m_len;
2277 		*mtail = m;
2278 		mtail = &m->m_next;
2279 	}
2280 
2281 	return (mfirst);
2282 }
2283 
2284 /*
2285  * Routine to pad mbuf to the specified length 'padto'.
2286  */
2287 int
2288 m_devpad(struct mbuf *m, int padto)
2289 {
2290 	struct mbuf *last = NULL;
2291 	int padlen;
2292 
2293 	if (padto <= m->m_pkthdr.len)
2294 		return 0;
2295 
2296 	padlen = padto - m->m_pkthdr.len;
2297 
2298 	/* if there's only the packet-header and we can pad there, use it. */
2299 	if (m->m_pkthdr.len == m->m_len && M_TRAILINGSPACE(m) >= padlen) {
2300 		last = m;
2301 	} else {
2302 		/*
2303 		 * Walk packet chain to find last mbuf. We will either
2304 		 * pad there, or append a new mbuf and pad it
2305 		 */
2306 		for (last = m; last->m_next != NULL; last = last->m_next)
2307 			; /* EMPTY */
2308 
2309 		/* `last' now points to last in chain. */
2310 		if (M_TRAILINGSPACE(last) < padlen) {
2311 			struct mbuf *n;
2312 
2313 			/* Allocate new empty mbuf, pad it.  Compact later. */
2314 			MGET(n, M_NOWAIT, MT_DATA);
2315 			if (n == NULL)
2316 				return ENOBUFS;
2317 			n->m_len = 0;
2318 			last->m_next = n;
2319 			last = n;
2320 		}
2321 	}
2322 	KKASSERT(M_TRAILINGSPACE(last) >= padlen);
2323 	KKASSERT(M_WRITABLE(last));
2324 
2325 	/* Now zero the pad area */
2326 	bzero(mtod(last, char *) + last->m_len, padlen);
2327 	last->m_len += padlen;
2328 	m->m_pkthdr.len += padlen;
2329 	return 0;
2330 }
2331 
2332 /*
2333  * Copy data from a buffer back into the indicated mbuf chain,
2334  * starting "off" bytes from the beginning, extending the mbuf
2335  * chain if necessary.
2336  */
2337 void
2338 m_copyback(struct mbuf *m0, int off, int len, caddr_t cp)
2339 {
2340 	int mlen;
2341 	struct mbuf *m = m0, *n;
2342 	int totlen = 0;
2343 
2344 	if (m0 == NULL)
2345 		return;
2346 	while (off > (mlen = m->m_len)) {
2347 		off -= mlen;
2348 		totlen += mlen;
2349 		if (m->m_next == NULL) {
2350 			n = m_getclr(M_NOWAIT, m->m_type);
2351 			if (n == NULL)
2352 				goto out;
2353 			n->m_len = min(MLEN, len + off);
2354 			m->m_next = n;
2355 		}
2356 		m = m->m_next;
2357 	}
2358 	while (len > 0) {
2359 		mlen = min (m->m_len - off, len);
2360 		bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
2361 		cp += mlen;
2362 		len -= mlen;
2363 		mlen += off;
2364 		off = 0;
2365 		totlen += mlen;
2366 		if (len == 0)
2367 			break;
2368 		if (m->m_next == NULL) {
2369 			n = m_get(M_NOWAIT, m->m_type);
2370 			if (n == NULL)
2371 				break;
2372 			n->m_len = min(MLEN, len);
2373 			m->m_next = n;
2374 		}
2375 		m = m->m_next;
2376 	}
2377 out:	if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
2378 		m->m_pkthdr.len = totlen;
2379 }
2380 
2381 /*
2382  * Append the specified data to the indicated mbuf chain,
2383  * Extend the mbuf chain if the new data does not fit in
2384  * existing space.
2385  *
2386  * Return 1 if able to complete the job; otherwise 0.
2387  */
2388 int
2389 m_append(struct mbuf *m0, int len, c_caddr_t cp)
2390 {
2391 	struct mbuf *m, *n;
2392 	int remainder, space;
2393 
2394 	for (m = m0; m->m_next != NULL; m = m->m_next)
2395 		;
2396 	remainder = len;
2397 	space = M_TRAILINGSPACE(m);
2398 	if (space > 0) {
2399 		/*
2400 		 * Copy into available space.
2401 		 */
2402 		if (space > remainder)
2403 			space = remainder;
2404 		bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
2405 		m->m_len += space;
2406 		cp += space, remainder -= space;
2407 	}
2408 	while (remainder > 0) {
2409 		/*
2410 		 * Allocate a new mbuf; could check space
2411 		 * and allocate a cluster instead.
2412 		 */
2413 		n = m_get(M_NOWAIT, m->m_type);
2414 		if (n == NULL)
2415 			break;
2416 		n->m_len = min(MLEN, remainder);
2417 		bcopy(cp, mtod(n, caddr_t), n->m_len);
2418 		cp += n->m_len, remainder -= n->m_len;
2419 		m->m_next = n;
2420 		m = n;
2421 	}
2422 	if (m0->m_flags & M_PKTHDR)
2423 		m0->m_pkthdr.len += len - remainder;
2424 	return (remainder == 0);
2425 }
2426 
2427 /*
2428  * Apply function f to the data in an mbuf chain starting "off" bytes from
2429  * the beginning, continuing for "len" bytes.
2430  */
2431 int
2432 m_apply(struct mbuf *m, int off, int len,
2433     int (*f)(void *, void *, u_int), void *arg)
2434 {
2435 	u_int count;
2436 	int rval;
2437 
2438 	KASSERT(off >= 0, ("m_apply, negative off %d", off));
2439 	KASSERT(len >= 0, ("m_apply, negative len %d", len));
2440 	while (off > 0) {
2441 		KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
2442 		if (off < m->m_len)
2443 			break;
2444 		off -= m->m_len;
2445 		m = m->m_next;
2446 	}
2447 	while (len > 0) {
2448 		KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
2449 		count = min(m->m_len - off, len);
2450 		rval = (*f)(arg, mtod(m, caddr_t) + off, count);
2451 		if (rval)
2452 			return (rval);
2453 		len -= count;
2454 		off = 0;
2455 		m = m->m_next;
2456 	}
2457 	return (0);
2458 }
2459 
2460 /*
2461  * Return a pointer to mbuf/offset of location in mbuf chain.
2462  */
2463 struct mbuf *
2464 m_getptr(struct mbuf *m, int loc, int *off)
2465 {
2466 
2467 	while (loc >= 0) {
2468 		/* Normal end of search. */
2469 		if (m->m_len > loc) {
2470 			*off = loc;
2471 			return (m);
2472 		} else {
2473 			loc -= m->m_len;
2474 			if (m->m_next == NULL) {
2475 				if (loc == 0) {
2476 					/* Point at the end of valid data. */
2477 					*off = m->m_len;
2478 					return (m);
2479 				}
2480 				return (NULL);
2481 			}
2482 			m = m->m_next;
2483 		}
2484 	}
2485 	return (NULL);
2486 }
2487 
2488 void
2489 m_print(const struct mbuf *m)
2490 {
2491 	int len;
2492 	const struct mbuf *m2;
2493 	char *hexstr;
2494 
2495 	len = m->m_pkthdr.len;
2496 	m2 = m;
2497 	hexstr = kmalloc(HEX_NCPYLEN(len), M_TEMP, M_ZERO | M_WAITOK);
2498 	while (len) {
2499 		kprintf("%p %s\n", m2, hexncpy(m2->m_data, m2->m_len, hexstr,
2500 			HEX_NCPYLEN(m2->m_len), "-"));
2501 		len -= m2->m_len;
2502 		m2 = m2->m_next;
2503 	}
2504 	kfree(hexstr, M_TEMP);
2505 	return;
2506 }
2507 
2508 /*
2509  * "Move" mbuf pkthdr from "from" to "to".
2510  * "from" must have M_PKTHDR set, and "to" must be empty.
2511  */
2512 void
2513 m_move_pkthdr(struct mbuf *to, struct mbuf *from)
2514 {
2515 	KASSERT((to->m_flags & M_PKTHDR), ("m_move_pkthdr: not packet header"));
2516 
2517 	to->m_flags |= from->m_flags & M_COPYFLAGS;
2518 	to->m_pkthdr = from->m_pkthdr;		/* especially tags */
2519 	SLIST_INIT(&from->m_pkthdr.tags);	/* purge tags from src */
2520 }
2521 
2522 /*
2523  * Duplicate "from"'s mbuf pkthdr in "to".
2524  * "from" must have M_PKTHDR set, and "to" must be empty.
2525  * In particular, this does a deep copy of the packet tags.
2526  */
2527 int
2528 m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how)
2529 {
2530 	KASSERT((to->m_flags & M_PKTHDR), ("m_dup_pkthdr: not packet header"));
2531 
2532 	to->m_flags = (from->m_flags & M_COPYFLAGS) |
2533 		      (to->m_flags & ~M_COPYFLAGS);
2534 	to->m_pkthdr = from->m_pkthdr;
2535 	SLIST_INIT(&to->m_pkthdr.tags);
2536 	return (m_tag_copy_chain(to, from, how));
2537 }
2538 
2539 /*
2540  * Defragment a mbuf chain, returning the shortest possible
2541  * chain of mbufs and clusters.  If allocation fails and
2542  * this cannot be completed, NULL will be returned, but
2543  * the passed in chain will be unchanged.  Upon success,
2544  * the original chain will be freed, and the new chain
2545  * will be returned.
2546  *
2547  * If a non-packet header is passed in, the original
2548  * mbuf (chain?) will be returned unharmed.
2549  *
2550  * m_defrag_nofree doesn't free the passed in mbuf.
2551  */
2552 struct mbuf *
2553 m_defrag(struct mbuf *m0, int how)
2554 {
2555 	struct mbuf *m_new;
2556 
2557 	if ((m_new = m_defrag_nofree(m0, how)) == NULL)
2558 		return (NULL);
2559 	if (m_new != m0)
2560 		m_freem(m0);
2561 	return (m_new);
2562 }
2563 
2564 struct mbuf *
2565 m_defrag_nofree(struct mbuf *m0, int how)
2566 {
2567 	struct mbuf	*m_new = NULL, *m_final = NULL;
2568 	int		progress = 0, length, nsize;
2569 
2570 	if (!(m0->m_flags & M_PKTHDR))
2571 		return (m0);
2572 
2573 #ifdef MBUF_STRESS_TEST
2574 	if (m_defragrandomfailures) {
2575 		int temp = karc4random() & 0xff;
2576 		if (temp == 0xba)
2577 			goto nospace;
2578 	}
2579 #endif
2580 
2581 	m_final = m_getl(m0->m_pkthdr.len, how, MT_DATA, M_PKTHDR, &nsize);
2582 	if (m_final == NULL)
2583 		goto nospace;
2584 	m_final->m_len = 0;	/* in case m0->m_pkthdr.len is zero */
2585 
2586 	if (m_dup_pkthdr(m_final, m0, how) == 0)
2587 		goto nospace;
2588 
2589 	m_new = m_final;
2590 
2591 	while (progress < m0->m_pkthdr.len) {
2592 		length = m0->m_pkthdr.len - progress;
2593 		if (length > MCLBYTES)
2594 			length = MCLBYTES;
2595 
2596 		if (m_new == NULL) {
2597 			m_new = m_getl(length, how, MT_DATA, 0, &nsize);
2598 			if (m_new == NULL)
2599 				goto nospace;
2600 		}
2601 
2602 		m_copydata(m0, progress, length, mtod(m_new, caddr_t));
2603 		progress += length;
2604 		m_new->m_len = length;
2605 		if (m_new != m_final)
2606 			m_cat(m_final, m_new);
2607 		m_new = NULL;
2608 	}
2609 	if (m0->m_next == NULL)
2610 		m_defraguseless++;
2611 	m_defragpackets++;
2612 	m_defragbytes += m_final->m_pkthdr.len;
2613 	return (m_final);
2614 nospace:
2615 	m_defragfailure++;
2616 	if (m_new)
2617 		m_free(m_new);
2618 	m_freem(m_final);
2619 	return (NULL);
2620 }
2621 
2622 /*
2623  * Move data from uio into mbufs.
2624  */
2625 struct mbuf *
2626 m_uiomove(struct uio *uio)
2627 {
2628 	struct mbuf *m;			/* current working mbuf */
2629 	struct mbuf *head = NULL;	/* result mbuf chain */
2630 	struct mbuf **mp = &head;
2631 	int flags = M_PKTHDR;
2632 	int nsize;
2633 	int error;
2634 	int resid;
2635 
2636 	do {
2637 		if (uio->uio_resid > INT_MAX)
2638 			resid = INT_MAX;
2639 		else
2640 			resid = (int)uio->uio_resid;
2641 		m = m_getl(resid, M_WAITOK, MT_DATA, flags, &nsize);
2642 		if (flags) {
2643 			m->m_pkthdr.len = 0;
2644 			/* Leave room for protocol headers. */
2645 			if (resid < MHLEN)
2646 				MH_ALIGN(m, resid);
2647 			flags = 0;
2648 		}
2649 		m->m_len = imin(nsize, resid);
2650 		error = uiomove(mtod(m, caddr_t), m->m_len, uio);
2651 		if (error) {
2652 			m_free(m);
2653 			goto failed;
2654 		}
2655 		*mp = m;
2656 		mp = &m->m_next;
2657 		head->m_pkthdr.len += m->m_len;
2658 	} while (uio->uio_resid > 0);
2659 
2660 	return (head);
2661 
2662 failed:
2663 	m_freem(head);
2664 	return (NULL);
2665 }
2666 
2667 struct mbuf *
2668 m_last(struct mbuf *m)
2669 {
2670 	while (m->m_next)
2671 		m = m->m_next;
2672 	return (m);
2673 }
2674 
2675 /*
2676  * Return the number of bytes in an mbuf chain.
2677  * If lastm is not NULL, also return the last mbuf.
2678  */
2679 u_int
2680 m_lengthm(struct mbuf *m, struct mbuf **lastm)
2681 {
2682 	u_int len = 0;
2683 	struct mbuf *prev = m;
2684 
2685 	while (m) {
2686 		len += m->m_len;
2687 		prev = m;
2688 		m = m->m_next;
2689 	}
2690 	if (lastm != NULL)
2691 		*lastm = prev;
2692 	return (len);
2693 }
2694 
2695 /*
2696  * Like m_lengthm(), except also keep track of mbuf usage.
2697  */
2698 u_int
2699 m_countm(struct mbuf *m, struct mbuf **lastm, u_int *pmbcnt)
2700 {
2701 	u_int len = 0, mbcnt = 0;
2702 	struct mbuf *prev = m;
2703 
2704 	while (m) {
2705 		len += m->m_len;
2706 		mbcnt += MSIZE;
2707 		if (m->m_flags & M_EXT)
2708 			mbcnt += m->m_ext.ext_size;
2709 		prev = m;
2710 		m = m->m_next;
2711 	}
2712 	if (lastm != NULL)
2713 		*lastm = prev;
2714 	*pmbcnt = mbcnt;
2715 	return (len);
2716 }
2717