xref: /dragonfly/sys/kern/uipc_mbuf.c (revision 0cfebe3d)
1 /*
2  * Copyright (c) 2004 Jeffrey M. Hsu.  All rights reserved.
3  * Copyright (c) 2004 The DragonFly Project.  All rights reserved.
4  *
5  * This code is derived from software contributed to The DragonFly Project
6  * by Jeffrey M. Hsu.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of The DragonFly Project nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific, prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  */
33 
34 /*
35  * Copyright (c) 1982, 1986, 1988, 1991, 1993
36  *	The Regents of the University of California.  All rights reserved.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  * 3. All advertising materials mentioning features or use of this software
47  *    must display the following acknowledgement:
48  *	This product includes software developed by the University of
49  *	California, Berkeley and its contributors.
50  * 4. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  *
66  * @(#)uipc_mbuf.c	8.2 (Berkeley) 1/4/94
67  * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $
68  * $DragonFly: src/sys/kern/uipc_mbuf.c,v 1.65 2007/08/12 01:46:26 dillon Exp $
69  */
70 
71 #include "opt_param.h"
72 #include "opt_ddb.h"
73 #include "opt_mbuf_stress_test.h"
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/malloc.h>
77 #include <sys/mbuf.h>
78 #include <sys/kernel.h>
79 #include <sys/sysctl.h>
80 #include <sys/domain.h>
81 #include <sys/objcache.h>
82 #include <sys/tree.h>
83 #include <sys/protosw.h>
84 #include <sys/uio.h>
85 #include <sys/thread.h>
86 #include <sys/globaldata.h>
87 #include <sys/thread2.h>
88 
89 #include <machine/atomic.h>
90 
91 #include <vm/vm.h>
92 #include <vm/vm_kern.h>
93 #include <vm/vm_extern.h>
94 
95 #ifdef INVARIANTS
96 #include <machine/cpu.h>
97 #endif
98 
99 /*
100  * mbuf cluster meta-data
101  */
102 struct mbcluster {
103 	int32_t	mcl_refs;
104 	void	*mcl_data;
105 };
106 
107 /*
108  * mbuf tracking for debugging purposes
109  */
110 #ifdef MBUF_DEBUG
111 
112 static MALLOC_DEFINE(M_MTRACK, "mtrack", "mtrack");
113 
114 struct mbctrack;
115 RB_HEAD(mbuf_rb_tree, mbtrack);
116 RB_PROTOTYPE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *);
117 
118 struct mbtrack {
119 	RB_ENTRY(mbtrack) rb_node;
120 	int trackid;
121 	struct mbuf *m;
122 };
123 
124 static int
125 mbtrack_cmp(struct mbtrack *mb1, struct mbtrack *mb2)
126 {
127 	if (mb1->m < mb2->m)
128 		return(-1);
129 	if (mb1->m > mb2->m)
130 		return(1);
131 	return(0);
132 }
133 
134 RB_GENERATE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *, m);
135 
136 struct mbuf_rb_tree	mbuf_track_root;
137 
138 static void
139 mbuftrack(struct mbuf *m)
140 {
141 	struct mbtrack *mbt;
142 
143 	crit_enter();
144 	mbt = kmalloc(sizeof(*mbt), M_MTRACK, M_INTWAIT|M_ZERO);
145 	mbt->m = m;
146 	if (mbuf_rb_tree_RB_INSERT(&mbuf_track_root, mbt))
147 		panic("mbuftrack: mbuf %p already being tracked\n", m);
148 	crit_exit();
149 }
150 
151 static void
152 mbufuntrack(struct mbuf *m)
153 {
154 	struct mbtrack *mbt;
155 
156 	crit_enter();
157 	mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m);
158 	if (mbt == NULL) {
159 		kprintf("mbufuntrack: mbuf %p was not tracked\n", m);
160 	} else {
161 		mbuf_rb_tree_RB_REMOVE(&mbuf_track_root, mbt);
162 		kfree(mbt, M_MTRACK);
163 	}
164 	crit_exit();
165 }
166 
167 void
168 mbuftrackid(struct mbuf *m, int trackid)
169 {
170 	struct mbtrack *mbt;
171 	struct mbuf *n;
172 
173 	crit_enter();
174 	while (m) {
175 		n = m->m_nextpkt;
176 		while (m) {
177 			mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m);
178 			if (mbt)
179 				mbt->trackid = trackid;
180 			m = m->m_next;
181 		}
182 		m = n;
183 	}
184 	crit_exit();
185 }
186 
187 static int
188 mbuftrack_callback(struct mbtrack *mbt, void *arg)
189 {
190 	struct sysctl_req *req = arg;
191 	char buf[64];
192 	int error;
193 
194 	ksnprintf(buf, sizeof(buf), "mbuf %p track %d\n", mbt->m, mbt->trackid);
195 
196 	error = SYSCTL_OUT(req, buf, strlen(buf));
197 	if (error)
198 		return(-error);
199 	return(0);
200 }
201 
202 static int
203 mbuftrack_show(SYSCTL_HANDLER_ARGS)
204 {
205 	int error;
206 
207 	crit_enter();
208 	error = mbuf_rb_tree_RB_SCAN(&mbuf_track_root, NULL,
209 				     mbuftrack_callback, req);
210 	crit_exit();
211 	return (-error);
212 }
213 SYSCTL_PROC(_kern_ipc, OID_AUTO, showmbufs, CTLFLAG_RD|CTLTYPE_STRING,
214 	    0, 0, mbuftrack_show, "A", "Show all in-use mbufs");
215 
216 #else
217 
218 #define mbuftrack(m)
219 #define mbufuntrack(m)
220 
221 #endif
222 
223 static void mbinit(void *);
224 SYSINIT(mbuf, SI_BOOT2_MACHDEP, SI_ORDER_FIRST, mbinit, NULL)
225 
226 static u_long	mbtypes[SMP_MAXCPU][MT_NTYPES];
227 
228 static struct mbstat mbstat[SMP_MAXCPU];
229 int	max_linkhdr;
230 int	max_protohdr;
231 int	max_hdr;
232 int	max_datalen;
233 int	m_defragpackets;
234 int	m_defragbytes;
235 int	m_defraguseless;
236 int	m_defragfailure;
237 #ifdef MBUF_STRESS_TEST
238 int	m_defragrandomfailures;
239 #endif
240 
241 struct objcache *mbuf_cache, *mbufphdr_cache;
242 struct objcache *mclmeta_cache;
243 struct objcache *mbufcluster_cache, *mbufphdrcluster_cache;
244 
245 int	nmbclusters;
246 int	nmbufs;
247 
248 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
249 	   &max_linkhdr, 0, "");
250 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
251 	   &max_protohdr, 0, "");
252 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, "");
253 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
254 	   &max_datalen, 0, "");
255 SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW,
256 	   &mbuf_wait, 0, "");
257 static int do_mbstat(SYSCTL_HANDLER_ARGS);
258 
259 SYSCTL_PROC(_kern_ipc, KIPC_MBSTAT, mbstat, CTLTYPE_STRUCT|CTLFLAG_RD,
260 	0, 0, do_mbstat, "S,mbstat", "");
261 
262 static int do_mbtypes(SYSCTL_HANDLER_ARGS);
263 
264 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbtypes, CTLTYPE_ULONG|CTLFLAG_RD,
265 	0, 0, do_mbtypes, "LU", "");
266 
267 static int
268 do_mbstat(SYSCTL_HANDLER_ARGS)
269 {
270 	struct mbstat mbstat_total;
271 	struct mbstat *mbstat_totalp;
272 	int i;
273 
274 	bzero(&mbstat_total, sizeof(mbstat_total));
275 	mbstat_totalp = &mbstat_total;
276 
277 	for (i = 0; i < ncpus; i++)
278 	{
279 		mbstat_total.m_mbufs += mbstat[i].m_mbufs;
280 		mbstat_total.m_clusters += mbstat[i].m_clusters;
281 		mbstat_total.m_spare += mbstat[i].m_spare;
282 		mbstat_total.m_clfree += mbstat[i].m_clfree;
283 		mbstat_total.m_drops += mbstat[i].m_drops;
284 		mbstat_total.m_wait += mbstat[i].m_wait;
285 		mbstat_total.m_drain += mbstat[i].m_drain;
286 		mbstat_total.m_mcfail += mbstat[i].m_mcfail;
287 		mbstat_total.m_mpfail += mbstat[i].m_mpfail;
288 
289 	}
290 	/*
291 	 * The following fields are not cumulative fields so just
292 	 * get their values once.
293 	 */
294 	mbstat_total.m_msize = mbstat[0].m_msize;
295 	mbstat_total.m_mclbytes = mbstat[0].m_mclbytes;
296 	mbstat_total.m_minclsize = mbstat[0].m_minclsize;
297 	mbstat_total.m_mlen = mbstat[0].m_mlen;
298 	mbstat_total.m_mhlen = mbstat[0].m_mhlen;
299 
300 	return(sysctl_handle_opaque(oidp, mbstat_totalp, sizeof(mbstat_total), req));
301 }
302 
303 static int
304 do_mbtypes(SYSCTL_HANDLER_ARGS)
305 {
306 	u_long totals[MT_NTYPES];
307 	int i, j;
308 
309 	for (i = 0; i < MT_NTYPES; i++)
310 		totals[i] = 0;
311 
312 	for (i = 0; i < ncpus; i++)
313 	{
314 		for (j = 0; j < MT_NTYPES; j++)
315 			totals[j] += mbtypes[i][j];
316 	}
317 
318 	return(sysctl_handle_opaque(oidp, totals, sizeof(totals), req));
319 }
320 
321 /*
322  * These are read-only because we do not currently have any code
323  * to adjust the objcache limits after the fact.  The variables
324  * may only be set as boot-time tunables.
325  */
326 SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD,
327 	   &nmbclusters, 0, "Maximum number of mbuf clusters available");
328 SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0,
329 	   "Maximum number of mbufs available");
330 
331 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
332 	   &m_defragpackets, 0, "");
333 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
334 	   &m_defragbytes, 0, "");
335 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
336 	   &m_defraguseless, 0, "");
337 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
338 	   &m_defragfailure, 0, "");
339 #ifdef MBUF_STRESS_TEST
340 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
341 	   &m_defragrandomfailures, 0, "");
342 #endif
343 
344 static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf");
345 static MALLOC_DEFINE(M_MBUFCL, "mbufcl", "mbufcl");
346 static MALLOC_DEFINE(M_MCLMETA, "mclmeta", "mclmeta");
347 
348 static void m_reclaim (void);
349 static void m_mclref(void *arg);
350 static void m_mclfree(void *arg);
351 
352 #ifndef NMBCLUSTERS
353 #define NMBCLUSTERS	(512 + maxusers * 16)
354 #endif
355 #ifndef NMBUFS
356 #define NMBUFS		(nmbclusters * 2)
357 #endif
358 
359 /*
360  * Perform sanity checks of tunables declared above.
361  */
362 static void
363 tunable_mbinit(void *dummy)
364 {
365 	/*
366 	 * This has to be done before VM init.
367 	 */
368 	nmbclusters = NMBCLUSTERS;
369 	TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
370 	nmbufs = NMBUFS;
371 	TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
372 	/* Sanity checks */
373 	if (nmbufs < nmbclusters * 2)
374 		nmbufs = nmbclusters * 2;
375 }
376 SYSINIT(tunable_mbinit, SI_BOOT1_TUNABLES, SI_ORDER_ANY,
377 	tunable_mbinit, NULL);
378 
379 /* "number of clusters of pages" */
380 #define NCL_INIT	1
381 
382 #define NMB_INIT	16
383 
384 /*
385  * The mbuf object cache only guarantees that m_next and m_nextpkt are
386  * NULL and that m_data points to the beginning of the data area.  In
387  * particular, m_len and m_pkthdr.len are uninitialized.  It is the
388  * responsibility of the caller to initialize those fields before use.
389  */
390 
391 static boolean_t __inline
392 mbuf_ctor(void *obj, void *private, int ocflags)
393 {
394 	struct mbuf *m = obj;
395 
396 	m->m_next = NULL;
397 	m->m_nextpkt = NULL;
398 	m->m_data = m->m_dat;
399 	m->m_flags = 0;
400 
401 	return (TRUE);
402 }
403 
404 /*
405  * Initialize the mbuf and the packet header fields.
406  */
407 static boolean_t
408 mbufphdr_ctor(void *obj, void *private, int ocflags)
409 {
410 	struct mbuf *m = obj;
411 
412 	m->m_next = NULL;
413 	m->m_nextpkt = NULL;
414 	m->m_data = m->m_pktdat;
415 	m->m_flags = M_PKTHDR | M_PHCACHE;
416 
417 	m->m_pkthdr.rcvif = NULL;	/* eliminate XXX JH */
418 	SLIST_INIT(&m->m_pkthdr.tags);
419 	m->m_pkthdr.csum_flags = 0;	/* eliminate XXX JH */
420 	m->m_pkthdr.fw_flags = 0;	/* eliminate XXX JH */
421 
422 	return (TRUE);
423 }
424 
425 /*
426  * A mbcluster object consists of 2K (MCLBYTES) cluster and a refcount.
427  */
428 static boolean_t
429 mclmeta_ctor(void *obj, void *private, int ocflags)
430 {
431 	struct mbcluster *cl = obj;
432 	void *buf;
433 
434 	if (ocflags & M_NOWAIT)
435 		buf = kmalloc(MCLBYTES, M_MBUFCL, M_NOWAIT | M_ZERO);
436 	else
437 		buf = kmalloc(MCLBYTES, M_MBUFCL, M_INTWAIT | M_ZERO);
438 	if (buf == NULL)
439 		return (FALSE);
440 	cl->mcl_refs = 0;
441 	cl->mcl_data = buf;
442 	return (TRUE);
443 }
444 
445 static void
446 mclmeta_dtor(void *obj, void *private)
447 {
448 	struct mbcluster *mcl = obj;
449 
450 	KKASSERT(mcl->mcl_refs == 0);
451 	kfree(mcl->mcl_data, M_MBUFCL);
452 }
453 
454 static void
455 linkcluster(struct mbuf *m, struct mbcluster *cl)
456 {
457 	/*
458 	 * Add the cluster to the mbuf.  The caller will detect that the
459 	 * mbuf now has an attached cluster.
460 	 */
461 	m->m_ext.ext_arg = cl;
462 	m->m_ext.ext_buf = cl->mcl_data;
463 	m->m_ext.ext_ref = m_mclref;
464 	m->m_ext.ext_free = m_mclfree;
465 	m->m_ext.ext_size = MCLBYTES;
466 	atomic_add_int(&cl->mcl_refs, 1);
467 
468 	m->m_data = m->m_ext.ext_buf;
469 	m->m_flags |= M_EXT | M_EXT_CLUSTER;
470 }
471 
472 static boolean_t
473 mbufphdrcluster_ctor(void *obj, void *private, int ocflags)
474 {
475 	struct mbuf *m = obj;
476 	struct mbcluster *cl;
477 
478 	mbufphdr_ctor(obj, private, ocflags);
479 	cl = objcache_get(mclmeta_cache, ocflags);
480 	if (cl == NULL)
481 		return (FALSE);
482 	m->m_flags |= M_CLCACHE;
483 	linkcluster(m, cl);
484 	return (TRUE);
485 }
486 
487 static boolean_t
488 mbufcluster_ctor(void *obj, void *private, int ocflags)
489 {
490 	struct mbuf *m = obj;
491 	struct mbcluster *cl;
492 
493 	mbuf_ctor(obj, private, ocflags);
494 	cl = objcache_get(mclmeta_cache, ocflags);
495 	if (cl == NULL)
496 		return (FALSE);
497 	m->m_flags |= M_CLCACHE;
498 	linkcluster(m, cl);
499 	return (TRUE);
500 }
501 
502 /*
503  * Used for both the cluster and cluster PHDR caches.
504  *
505  * The mbuf may have lost its cluster due to sharing, deal
506  * with the situation by checking M_EXT.
507  */
508 static void
509 mbufcluster_dtor(void *obj, void *private)
510 {
511 	struct mbuf *m = obj;
512 	struct mbcluster *mcl;
513 
514 	if (m->m_flags & M_EXT) {
515 		KKASSERT((m->m_flags & M_EXT_CLUSTER) != 0);
516 		mcl = m->m_ext.ext_arg;
517 		KKASSERT(mcl->mcl_refs == 1);
518 		mcl->mcl_refs = 0;
519 		objcache_put(mclmeta_cache, mcl);
520 	}
521 }
522 
523 struct objcache_malloc_args mbuf_malloc_args = { MSIZE, M_MBUF };
524 struct objcache_malloc_args mclmeta_malloc_args =
525 	{ sizeof(struct mbcluster), M_MCLMETA };
526 
527 /* ARGSUSED*/
528 static void
529 mbinit(void *dummy)
530 {
531 	int i;
532 
533 	for (i = 0; i < ncpus; i++)
534 	{
535 		atomic_set_long_nonlocked(&mbstat[i].m_msize, MSIZE);
536 		atomic_set_long_nonlocked(&mbstat[i].m_mclbytes, MCLBYTES);
537 		atomic_set_long_nonlocked(&mbstat[i].m_minclsize, MINCLSIZE);
538 		atomic_set_long_nonlocked(&mbstat[i].m_mlen, MLEN);
539 		atomic_set_long_nonlocked(&mbstat[i].m_mhlen, MHLEN);
540 	}
541 
542 	mbuf_cache = objcache_create("mbuf", nmbufs, 0,
543 	    mbuf_ctor, NULL, NULL,
544 	    objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
545 	mbufphdr_cache = objcache_create("mbuf pkt hdr", nmbufs, 64,
546 	    mbufphdr_ctor, NULL, NULL,
547 	    objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
548 	mclmeta_cache = objcache_create("cluster mbuf", nmbclusters , 0,
549 	    mclmeta_ctor, mclmeta_dtor, NULL,
550 	    objcache_malloc_alloc, objcache_malloc_free, &mclmeta_malloc_args);
551 	mbufcluster_cache = objcache_create("mbuf + cluster", nmbclusters, 0,
552 	    mbufcluster_ctor, mbufcluster_dtor, NULL,
553 	    objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
554 	mbufphdrcluster_cache = objcache_create("mbuf pkt hdr + cluster",
555 	    nmbclusters, 64, mbufphdrcluster_ctor, mbufcluster_dtor, NULL,
556 	    objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
557 	return;
558 }
559 
560 /*
561  * Return the number of references to this mbuf's data.  0 is returned
562  * if the mbuf is not M_EXT, a reference count is returned if it is
563  * M_EXT | M_EXT_CLUSTER, and 99 is returned if it is a special M_EXT.
564  */
565 int
566 m_sharecount(struct mbuf *m)
567 {
568 	switch (m->m_flags & (M_EXT | M_EXT_CLUSTER)) {
569 	case 0:
570 		return (0);
571 	case M_EXT:
572 		return (99);
573 	case M_EXT | M_EXT_CLUSTER:
574 		return (((struct mbcluster *)m->m_ext.ext_arg)->mcl_refs);
575 	}
576 	/* NOTREACHED */
577 	return (0);		/* to shut up compiler */
578 }
579 
580 /*
581  * change mbuf to new type
582  */
583 void
584 m_chtype(struct mbuf *m, int type)
585 {
586 	struct globaldata *gd = mycpu;
587 
588 	atomic_add_long_nonlocked(&mbtypes[gd->gd_cpuid][type], 1);
589 	atomic_subtract_long_nonlocked(&mbtypes[gd->gd_cpuid][m->m_type], 1);
590 	atomic_set_short_nonlocked(&m->m_type, type);
591 }
592 
593 static void
594 m_reclaim(void)
595 {
596 	struct domain *dp;
597 	struct protosw *pr;
598 
599 	crit_enter();
600 	SLIST_FOREACH(dp, &domains, dom_next) {
601 		for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) {
602 			if (pr->pr_drain)
603 				(*pr->pr_drain)();
604 		}
605 	}
606 	crit_exit();
607 	atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_drain, 1);
608 }
609 
610 static void __inline
611 updatestats(struct mbuf *m, int type)
612 {
613 	struct globaldata *gd = mycpu;
614 	m->m_type = type;
615 
616 	mbuftrack(m);
617 
618 	atomic_add_long_nonlocked(&mbtypes[gd->gd_cpuid][type], 1);
619 	atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mbufs, 1);
620 
621 }
622 
623 /*
624  * Allocate an mbuf.
625  */
626 struct mbuf *
627 m_get(int how, int type)
628 {
629 	struct mbuf *m;
630 	int ntries = 0;
631 	int ocf = MBTOM(how);
632 
633 retryonce:
634 
635 	m = objcache_get(mbuf_cache, ocf);
636 
637 	if (m == NULL) {
638 		if ((how & MB_TRYWAIT) && ntries++ == 0) {
639 			struct objcache *reclaimlist[] = {
640 				mbufphdr_cache,
641 				mbufcluster_cache, mbufphdrcluster_cache
642 			};
643 			const int nreclaims = __arysize(reclaimlist);
644 
645 			if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf))
646 				m_reclaim();
647 			goto retryonce;
648 		}
649 		return (NULL);
650 	}
651 
652 	updatestats(m, type);
653 	return (m);
654 }
655 
656 struct mbuf *
657 m_gethdr(int how, int type)
658 {
659 	struct mbuf *m;
660 	int ocf = MBTOM(how);
661 	int ntries = 0;
662 
663 retryonce:
664 
665 	m = objcache_get(mbufphdr_cache, ocf);
666 
667 	if (m == NULL) {
668 		if ((how & MB_TRYWAIT) && ntries++ == 0) {
669 			struct objcache *reclaimlist[] = {
670 				mbuf_cache,
671 				mbufcluster_cache, mbufphdrcluster_cache
672 			};
673 			const int nreclaims = __arysize(reclaimlist);
674 
675 			if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf))
676 				m_reclaim();
677 			goto retryonce;
678 		}
679 		return (NULL);
680 	}
681 
682 	updatestats(m, type);
683 	return (m);
684 }
685 
686 /*
687  * Get a mbuf (not a mbuf cluster!) and zero it.
688  * Deprecated.
689  */
690 struct mbuf *
691 m_getclr(int how, int type)
692 {
693 	struct mbuf *m;
694 
695 	m = m_get(how, type);
696 	if (m != NULL)
697 		bzero(m->m_data, MLEN);
698 	return (m);
699 }
700 
701 /*
702  * Returns an mbuf with an attached cluster.
703  * Because many network drivers use this kind of buffers a lot, it is
704  * convenient to keep a small pool of free buffers of this kind.
705  * Even a small size such as 10 gives about 10% improvement in the
706  * forwarding rate in a bridge or router.
707  */
708 struct mbuf *
709 m_getcl(int how, short type, int flags)
710 {
711 	struct mbuf *m;
712 	int ocflags = MBTOM(how);
713 	int ntries = 0;
714 
715 retryonce:
716 
717 	if (flags & M_PKTHDR)
718 		m = objcache_get(mbufphdrcluster_cache, ocflags);
719 	else
720 		m = objcache_get(mbufcluster_cache, ocflags);
721 
722 	if (m == NULL) {
723 		if ((how & MB_TRYWAIT) && ntries++ == 0) {
724 			struct objcache *reclaimlist[1];
725 
726 			if (flags & M_PKTHDR)
727 				reclaimlist[0] = mbufcluster_cache;
728 			else
729 				reclaimlist[0] = mbufphdrcluster_cache;
730 			if (!objcache_reclaimlist(reclaimlist, 1, ocflags))
731 				m_reclaim();
732 			goto retryonce;
733 		}
734 		return (NULL);
735 	}
736 
737 	m->m_type = type;
738 
739 	mbuftrack(m);
740 
741 	atomic_add_long_nonlocked(&mbtypes[mycpu->gd_cpuid][type], 1);
742 	atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1);
743 	return (m);
744 }
745 
746 /*
747  * Allocate chain of requested length.
748  */
749 struct mbuf *
750 m_getc(int len, int how, int type)
751 {
752 	struct mbuf *n, *nfirst = NULL, **ntail = &nfirst;
753 	int nsize;
754 
755 	while (len > 0) {
756 		n = m_getl(len, how, type, 0, &nsize);
757 		if (n == NULL)
758 			goto failed;
759 		n->m_len = 0;
760 		*ntail = n;
761 		ntail = &n->m_next;
762 		len -= nsize;
763 	}
764 	return (nfirst);
765 
766 failed:
767 	m_freem(nfirst);
768 	return (NULL);
769 }
770 
771 /*
772  * Allocate len-worth of mbufs and/or mbuf clusters (whatever fits best)
773  * and return a pointer to the head of the allocated chain. If m0 is
774  * non-null, then we assume that it is a single mbuf or an mbuf chain to
775  * which we want len bytes worth of mbufs and/or clusters attached, and so
776  * if we succeed in allocating it, we will just return a pointer to m0.
777  *
778  * If we happen to fail at any point during the allocation, we will free
779  * up everything we have already allocated and return NULL.
780  *
781  * Deprecated.  Use m_getc() and m_cat() instead.
782  */
783 struct mbuf *
784 m_getm(struct mbuf *m0, int len, int type, int how)
785 {
786 	struct mbuf *nfirst;
787 
788 	nfirst = m_getc(len, how, type);
789 
790 	if (m0 != NULL) {
791 		m_last(m0)->m_next = nfirst;
792 		return (m0);
793 	}
794 
795 	return (nfirst);
796 }
797 
798 /*
799  * Adds a cluster to a normal mbuf, M_EXT is set on success.
800  * Deprecated.  Use m_getcl() instead.
801  */
802 void
803 m_mclget(struct mbuf *m, int how)
804 {
805 	struct mbcluster *mcl;
806 
807 	KKASSERT((m->m_flags & M_EXT) == 0);
808 	mcl = objcache_get(mclmeta_cache, MBTOM(how));
809 	if (mcl != NULL) {
810 		linkcluster(m, mcl);
811 		atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1);
812 	}
813 }
814 
815 /*
816  * Updates to mbcluster must be MPSAFE.  Only an entity which already has
817  * a reference to the cluster can ref it, so we are in no danger of
818  * racing an add with a subtract.  But the operation must still be atomic
819  * since multiple entities may have a reference on the cluster.
820  *
821  * m_mclfree() is almost the same but it must contend with two entities
822  * freeing the cluster at the same time.  If there is only one reference
823  * count we are the only entity referencing the cluster and no further
824  * locking is required.  Otherwise we must protect against a race to 0
825  * with the serializer.
826  */
827 static void
828 m_mclref(void *arg)
829 {
830 	struct mbcluster *mcl = arg;
831 
832 	atomic_add_int(&mcl->mcl_refs, 1);
833 }
834 
835 /*
836  * When dereferencing a cluster we have to deal with a N->0 race, where
837  * N entities free their references simultaniously.  To do this we use
838  * atomic_cmpset_int().
839  */
840 static void
841 m_mclfree(void *arg)
842 {
843 	struct mbcluster *mcl = arg;
844 	int refs;
845 
846 	do {
847 		refs = mcl->mcl_refs;
848 	} while (atomic_cmpset_int(&mcl->mcl_refs, refs, refs - 1) == 0);
849 	if (refs == 1)
850 		objcache_put(mclmeta_cache, mcl);
851 }
852 
853 extern void db_print_backtrace(void);
854 
855 /*
856  * Free a single mbuf and any associated external storage.  The successor,
857  * if any, is returned.
858  *
859  * We do need to check non-first mbuf for m_aux, since some of existing
860  * code does not call M_PREPEND properly.
861  * (example: call to bpf_mtap from drivers)
862  */
863 struct mbuf *
864 m_free(struct mbuf *m)
865 {
866 	struct mbuf *n;
867 	struct globaldata *gd = mycpu;
868 
869 	KASSERT(m->m_type != MT_FREE, ("freeing free mbuf %p", m));
870 	atomic_subtract_long_nonlocked(&mbtypes[gd->gd_cpuid][m->m_type], 1);
871 
872 	n = m->m_next;
873 
874 	/*
875 	 * Make sure the mbuf is in constructed state before returning it
876 	 * to the objcache.
877 	 */
878 	m->m_next = NULL;
879 	mbufuntrack(m);
880 #ifdef notyet
881 	KKASSERT(m->m_nextpkt == NULL);
882 #else
883 	if (m->m_nextpkt != NULL) {
884 #ifdef DDB
885 		static int afewtimes = 10;
886 
887 		if (afewtimes-- > 0) {
888 			kprintf("mfree: m->m_nextpkt != NULL\n");
889 			db_print_backtrace();
890 		}
891 #endif
892 		m->m_nextpkt = NULL;
893 	}
894 #endif
895 	if (m->m_flags & M_PKTHDR) {
896 		m_tag_delete_chain(m);		/* eliminate XXX JH */
897 	}
898 
899 	m->m_flags &= (M_EXT | M_EXT_CLUSTER | M_CLCACHE | M_PHCACHE);
900 
901 	/*
902 	 * Clean the M_PKTHDR state so we can return the mbuf to its original
903 	 * cache.  This is based on the PHCACHE flag which tells us whether
904 	 * the mbuf was originally allocated out of a packet-header cache
905 	 * or a non-packet-header cache.
906 	 */
907 	if (m->m_flags & M_PHCACHE) {
908 		m->m_flags |= M_PKTHDR;
909 		m->m_pkthdr.rcvif = NULL;	/* eliminate XXX JH */
910 		m->m_pkthdr.csum_flags = 0;	/* eliminate XXX JH */
911 		m->m_pkthdr.fw_flags = 0;	/* eliminate XXX JH */
912 		SLIST_INIT(&m->m_pkthdr.tags);
913 	}
914 
915 	/*
916 	 * Handle remaining flags combinations.  M_CLCACHE tells us whether
917 	 * the mbuf was originally allocated from a cluster cache or not,
918 	 * and is totally separate from whether the mbuf is currently
919 	 * associated with a cluster.
920 	 */
921 	crit_enter();
922 	switch(m->m_flags & (M_CLCACHE | M_EXT | M_EXT_CLUSTER)) {
923 	case M_CLCACHE | M_EXT | M_EXT_CLUSTER:
924 		/*
925 		 * mbuf+cluster cache case.  The mbuf was allocated from the
926 		 * combined mbuf_cluster cache and can be returned to the
927 		 * cache if the cluster hasn't been shared.
928 		 */
929 		if (m_sharecount(m) == 1) {
930 			/*
931 			 * The cluster has not been shared, we can just
932 			 * reset the data pointer and return the mbuf
933 			 * to the cluster cache.  Note that the reference
934 			 * count is left intact (it is still associated with
935 			 * an mbuf).
936 			 */
937 			m->m_data = m->m_ext.ext_buf;
938 			if (m->m_flags & M_PHCACHE)
939 				objcache_put(mbufphdrcluster_cache, m);
940 			else
941 				objcache_put(mbufcluster_cache, m);
942 			atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1);
943 		} else {
944 			/*
945 			 * Hell.  Someone else has a ref on this cluster,
946 			 * we have to disconnect it which means we can't
947 			 * put it back into the mbufcluster_cache, we
948 			 * have to destroy the mbuf.
949 			 *
950 			 * Other mbuf references to the cluster will typically
951 			 * be M_EXT | M_EXT_CLUSTER but without M_CLCACHE.
952 			 *
953 			 * XXX we could try to connect another cluster to
954 			 * it.
955 			 */
956 			m->m_ext.ext_free(m->m_ext.ext_arg);
957 			m->m_flags &= ~(M_EXT | M_EXT_CLUSTER);
958 			if (m->m_flags & M_PHCACHE)
959 				objcache_dtor(mbufphdrcluster_cache, m);
960 			else
961 				objcache_dtor(mbufcluster_cache, m);
962 		}
963 		break;
964 	case M_EXT | M_EXT_CLUSTER:
965 		/*
966 		 * Normal cluster associated with an mbuf that was allocated
967 		 * from the normal mbuf pool rather then the cluster pool.
968 		 * The cluster has to be independantly disassociated from the
969 		 * mbuf.
970 		 */
971 		if (m_sharecount(m) == 1)
972 			atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1);
973 		/* fall through */
974 	case M_EXT:
975 		/*
976 		 * Normal cluster association case, disconnect the cluster from
977 		 * the mbuf.  The cluster may or may not be custom.
978 		 */
979 		m->m_ext.ext_free(m->m_ext.ext_arg);
980 		m->m_flags &= ~(M_EXT | M_EXT_CLUSTER);
981 		/* fall through */
982 	case 0:
983 		/*
984 		 * return the mbuf to the mbuf cache.
985 		 */
986 		if (m->m_flags & M_PHCACHE) {
987 			m->m_data = m->m_pktdat;
988 			objcache_put(mbufphdr_cache, m);
989 		} else {
990 			m->m_data = m->m_dat;
991 			objcache_put(mbuf_cache, m);
992 		}
993 		atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mbufs, 1);
994 		break;
995 	default:
996 		if (!panicstr)
997 			panic("bad mbuf flags %p %08x\n", m, m->m_flags);
998 		break;
999 	}
1000 	crit_exit();
1001 	return (n);
1002 }
1003 
1004 void
1005 m_freem(struct mbuf *m)
1006 {
1007 	crit_enter();
1008 	while (m)
1009 		m = m_free(m);
1010 	crit_exit();
1011 }
1012 
1013 /*
1014  * mbuf utility routines
1015  */
1016 
1017 /*
1018  * Lesser-used path for M_PREPEND: allocate new mbuf to prepend to chain and
1019  * copy junk along.
1020  */
1021 struct mbuf *
1022 m_prepend(struct mbuf *m, int len, int how)
1023 {
1024 	struct mbuf *mn;
1025 
1026 	if (m->m_flags & M_PKTHDR)
1027 	    mn = m_gethdr(how, m->m_type);
1028 	else
1029 	    mn = m_get(how, m->m_type);
1030 	if (mn == NULL) {
1031 		m_freem(m);
1032 		return (NULL);
1033 	}
1034 	if (m->m_flags & M_PKTHDR)
1035 		M_MOVE_PKTHDR(mn, m);
1036 	mn->m_next = m;
1037 	m = mn;
1038 	if (len < MHLEN)
1039 		MH_ALIGN(m, len);
1040 	m->m_len = len;
1041 	return (m);
1042 }
1043 
1044 /*
1045  * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
1046  * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
1047  * The wait parameter is a choice of MB_WAIT/MB_DONTWAIT from caller.
1048  * Note that the copy is read-only, because clusters are not copied,
1049  * only their reference counts are incremented.
1050  */
1051 struct mbuf *
1052 m_copym(const struct mbuf *m, int off0, int len, int wait)
1053 {
1054 	struct mbuf *n, **np;
1055 	int off = off0;
1056 	struct mbuf *top;
1057 	int copyhdr = 0;
1058 
1059 	KASSERT(off >= 0, ("m_copym, negative off %d", off));
1060 	KASSERT(len >= 0, ("m_copym, negative len %d", len));
1061 	if (off == 0 && m->m_flags & M_PKTHDR)
1062 		copyhdr = 1;
1063 	while (off > 0) {
1064 		KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
1065 		if (off < m->m_len)
1066 			break;
1067 		off -= m->m_len;
1068 		m = m->m_next;
1069 	}
1070 	np = &top;
1071 	top = 0;
1072 	while (len > 0) {
1073 		if (m == NULL) {
1074 			KASSERT(len == M_COPYALL,
1075 			    ("m_copym, length > size of mbuf chain"));
1076 			break;
1077 		}
1078 		/*
1079 		 * Because we are sharing any cluster attachment below,
1080 		 * be sure to get an mbuf that does not have a cluster
1081 		 * associated with it.
1082 		 */
1083 		if (copyhdr)
1084 			n = m_gethdr(wait, m->m_type);
1085 		else
1086 			n = m_get(wait, m->m_type);
1087 		*np = n;
1088 		if (n == NULL)
1089 			goto nospace;
1090 		if (copyhdr) {
1091 			if (!m_dup_pkthdr(n, m, wait))
1092 				goto nospace;
1093 			if (len == M_COPYALL)
1094 				n->m_pkthdr.len -= off0;
1095 			else
1096 				n->m_pkthdr.len = len;
1097 			copyhdr = 0;
1098 		}
1099 		n->m_len = min(len, m->m_len - off);
1100 		if (m->m_flags & M_EXT) {
1101 			KKASSERT((n->m_flags & M_EXT) == 0);
1102 			n->m_data = m->m_data + off;
1103 			m->m_ext.ext_ref(m->m_ext.ext_arg);
1104 			n->m_ext = m->m_ext;
1105 			n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1106 		} else {
1107 			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
1108 			    (unsigned)n->m_len);
1109 		}
1110 		if (len != M_COPYALL)
1111 			len -= n->m_len;
1112 		off = 0;
1113 		m = m->m_next;
1114 		np = &n->m_next;
1115 	}
1116 	if (top == NULL)
1117 		atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
1118 	return (top);
1119 nospace:
1120 	m_freem(top);
1121 	atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
1122 	return (NULL);
1123 }
1124 
1125 /*
1126  * Copy an entire packet, including header (which must be present).
1127  * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
1128  * Note that the copy is read-only, because clusters are not copied,
1129  * only their reference counts are incremented.
1130  * Preserve alignment of the first mbuf so if the creator has left
1131  * some room at the beginning (e.g. for inserting protocol headers)
1132  * the copies also have the room available.
1133  */
1134 struct mbuf *
1135 m_copypacket(struct mbuf *m, int how)
1136 {
1137 	struct mbuf *top, *n, *o;
1138 
1139 	n = m_gethdr(how, m->m_type);
1140 	top = n;
1141 	if (!n)
1142 		goto nospace;
1143 
1144 	if (!m_dup_pkthdr(n, m, how))
1145 		goto nospace;
1146 	n->m_len = m->m_len;
1147 	if (m->m_flags & M_EXT) {
1148 		KKASSERT((n->m_flags & M_EXT) == 0);
1149 		n->m_data = m->m_data;
1150 		m->m_ext.ext_ref(m->m_ext.ext_arg);
1151 		n->m_ext = m->m_ext;
1152 		n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1153 	} else {
1154 		n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
1155 		bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1156 	}
1157 
1158 	m = m->m_next;
1159 	while (m) {
1160 		o = m_get(how, m->m_type);
1161 		if (!o)
1162 			goto nospace;
1163 
1164 		n->m_next = o;
1165 		n = n->m_next;
1166 
1167 		n->m_len = m->m_len;
1168 		if (m->m_flags & M_EXT) {
1169 			KKASSERT((n->m_flags & M_EXT) == 0);
1170 			n->m_data = m->m_data;
1171 			m->m_ext.ext_ref(m->m_ext.ext_arg);
1172 			n->m_ext = m->m_ext;
1173 			n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1174 		} else {
1175 			bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1176 		}
1177 
1178 		m = m->m_next;
1179 	}
1180 	return top;
1181 nospace:
1182 	m_freem(top);
1183 	atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
1184 	return (NULL);
1185 }
1186 
1187 /*
1188  * Copy data from an mbuf chain starting "off" bytes from the beginning,
1189  * continuing for "len" bytes, into the indicated buffer.
1190  */
1191 void
1192 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
1193 {
1194 	unsigned count;
1195 
1196 	KASSERT(off >= 0, ("m_copydata, negative off %d", off));
1197 	KASSERT(len >= 0, ("m_copydata, negative len %d", len));
1198 	while (off > 0) {
1199 		KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
1200 		if (off < m->m_len)
1201 			break;
1202 		off -= m->m_len;
1203 		m = m->m_next;
1204 	}
1205 	while (len > 0) {
1206 		KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
1207 		count = min(m->m_len - off, len);
1208 		bcopy(mtod(m, caddr_t) + off, cp, count);
1209 		len -= count;
1210 		cp += count;
1211 		off = 0;
1212 		m = m->m_next;
1213 	}
1214 }
1215 
1216 /*
1217  * Copy a packet header mbuf chain into a completely new chain, including
1218  * copying any mbuf clusters.  Use this instead of m_copypacket() when
1219  * you need a writable copy of an mbuf chain.
1220  */
1221 struct mbuf *
1222 m_dup(struct mbuf *m, int how)
1223 {
1224 	struct mbuf **p, *top = NULL;
1225 	int remain, moff, nsize;
1226 
1227 	/* Sanity check */
1228 	if (m == NULL)
1229 		return (NULL);
1230 	KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __func__));
1231 
1232 	/* While there's more data, get a new mbuf, tack it on, and fill it */
1233 	remain = m->m_pkthdr.len;
1234 	moff = 0;
1235 	p = &top;
1236 	while (remain > 0 || top == NULL) {	/* allow m->m_pkthdr.len == 0 */
1237 		struct mbuf *n;
1238 
1239 		/* Get the next new mbuf */
1240 		n = m_getl(remain, how, m->m_type, top == NULL ? M_PKTHDR : 0,
1241 			   &nsize);
1242 		if (n == NULL)
1243 			goto nospace;
1244 		if (top == NULL)
1245 			if (!m_dup_pkthdr(n, m, how))
1246 				goto nospace0;
1247 
1248 		/* Link it into the new chain */
1249 		*p = n;
1250 		p = &n->m_next;
1251 
1252 		/* Copy data from original mbuf(s) into new mbuf */
1253 		n->m_len = 0;
1254 		while (n->m_len < nsize && m != NULL) {
1255 			int chunk = min(nsize - n->m_len, m->m_len - moff);
1256 
1257 			bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1258 			moff += chunk;
1259 			n->m_len += chunk;
1260 			remain -= chunk;
1261 			if (moff == m->m_len) {
1262 				m = m->m_next;
1263 				moff = 0;
1264 			}
1265 		}
1266 
1267 		/* Check correct total mbuf length */
1268 		KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
1269 			("%s: bogus m_pkthdr.len", __func__));
1270 	}
1271 	return (top);
1272 
1273 nospace:
1274 	m_freem(top);
1275 nospace0:
1276 	atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
1277 	return (NULL);
1278 }
1279 
1280 /*
1281  * Concatenate mbuf chain n to m.
1282  * Both chains must be of the same type (e.g. MT_DATA).
1283  * Any m_pkthdr is not updated.
1284  */
1285 void
1286 m_cat(struct mbuf *m, struct mbuf *n)
1287 {
1288 	m = m_last(m);
1289 	while (n) {
1290 		if (m->m_flags & M_EXT ||
1291 		    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
1292 			/* just join the two chains */
1293 			m->m_next = n;
1294 			return;
1295 		}
1296 		/* splat the data from one into the other */
1297 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1298 		    (u_int)n->m_len);
1299 		m->m_len += n->m_len;
1300 		n = m_free(n);
1301 	}
1302 }
1303 
1304 void
1305 m_adj(struct mbuf *mp, int req_len)
1306 {
1307 	int len = req_len;
1308 	struct mbuf *m;
1309 	int count;
1310 
1311 	if ((m = mp) == NULL)
1312 		return;
1313 	if (len >= 0) {
1314 		/*
1315 		 * Trim from head.
1316 		 */
1317 		while (m != NULL && len > 0) {
1318 			if (m->m_len <= len) {
1319 				len -= m->m_len;
1320 				m->m_len = 0;
1321 				m = m->m_next;
1322 			} else {
1323 				m->m_len -= len;
1324 				m->m_data += len;
1325 				len = 0;
1326 			}
1327 		}
1328 		m = mp;
1329 		if (mp->m_flags & M_PKTHDR)
1330 			m->m_pkthdr.len -= (req_len - len);
1331 	} else {
1332 		/*
1333 		 * Trim from tail.  Scan the mbuf chain,
1334 		 * calculating its length and finding the last mbuf.
1335 		 * If the adjustment only affects this mbuf, then just
1336 		 * adjust and return.  Otherwise, rescan and truncate
1337 		 * after the remaining size.
1338 		 */
1339 		len = -len;
1340 		count = 0;
1341 		for (;;) {
1342 			count += m->m_len;
1343 			if (m->m_next == (struct mbuf *)0)
1344 				break;
1345 			m = m->m_next;
1346 		}
1347 		if (m->m_len >= len) {
1348 			m->m_len -= len;
1349 			if (mp->m_flags & M_PKTHDR)
1350 				mp->m_pkthdr.len -= len;
1351 			return;
1352 		}
1353 		count -= len;
1354 		if (count < 0)
1355 			count = 0;
1356 		/*
1357 		 * Correct length for chain is "count".
1358 		 * Find the mbuf with last data, adjust its length,
1359 		 * and toss data from remaining mbufs on chain.
1360 		 */
1361 		m = mp;
1362 		if (m->m_flags & M_PKTHDR)
1363 			m->m_pkthdr.len = count;
1364 		for (; m; m = m->m_next) {
1365 			if (m->m_len >= count) {
1366 				m->m_len = count;
1367 				break;
1368 			}
1369 			count -= m->m_len;
1370 		}
1371 		while (m->m_next)
1372 			(m = m->m_next) ->m_len = 0;
1373 	}
1374 }
1375 
1376 /*
1377  * Rearrange an mbuf chain so that len bytes are contiguous
1378  * and in the data area of an mbuf (so that mtod will work for a structure
1379  * of size len).  Returns the resulting mbuf chain on success, frees it and
1380  * returns null on failure.  If there is room, it will add up to
1381  * max_protohdr-len extra bytes to the contiguous region in an attempt to
1382  * avoid being called next time.
1383  */
1384 struct mbuf *
1385 m_pullup(struct mbuf *n, int len)
1386 {
1387 	struct mbuf *m;
1388 	int count;
1389 	int space;
1390 
1391 	/*
1392 	 * If first mbuf has no cluster, and has room for len bytes
1393 	 * without shifting current data, pullup into it,
1394 	 * otherwise allocate a new mbuf to prepend to the chain.
1395 	 */
1396 	if (!(n->m_flags & M_EXT) &&
1397 	    n->m_data + len < &n->m_dat[MLEN] &&
1398 	    n->m_next) {
1399 		if (n->m_len >= len)
1400 			return (n);
1401 		m = n;
1402 		n = n->m_next;
1403 		len -= m->m_len;
1404 	} else {
1405 		if (len > MHLEN)
1406 			goto bad;
1407 		if (n->m_flags & M_PKTHDR)
1408 			m = m_gethdr(MB_DONTWAIT, n->m_type);
1409 		else
1410 			m = m_get(MB_DONTWAIT, n->m_type);
1411 		if (m == NULL)
1412 			goto bad;
1413 		m->m_len = 0;
1414 		if (n->m_flags & M_PKTHDR)
1415 			M_MOVE_PKTHDR(m, n);
1416 	}
1417 	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1418 	do {
1419 		count = min(min(max(len, max_protohdr), space), n->m_len);
1420 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1421 		  (unsigned)count);
1422 		len -= count;
1423 		m->m_len += count;
1424 		n->m_len -= count;
1425 		space -= count;
1426 		if (n->m_len)
1427 			n->m_data += count;
1428 		else
1429 			n = m_free(n);
1430 	} while (len > 0 && n);
1431 	if (len > 0) {
1432 		m_free(m);
1433 		goto bad;
1434 	}
1435 	m->m_next = n;
1436 	return (m);
1437 bad:
1438 	m_freem(n);
1439 	atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
1440 	return (NULL);
1441 }
1442 
1443 /*
1444  * Partition an mbuf chain in two pieces, returning the tail --
1445  * all but the first len0 bytes.  In case of failure, it returns NULL and
1446  * attempts to restore the chain to its original state.
1447  *
1448  * Note that the resulting mbufs might be read-only, because the new
1449  * mbuf can end up sharing an mbuf cluster with the original mbuf if
1450  * the "breaking point" happens to lie within a cluster mbuf. Use the
1451  * M_WRITABLE() macro to check for this case.
1452  */
1453 struct mbuf *
1454 m_split(struct mbuf *m0, int len0, int wait)
1455 {
1456 	struct mbuf *m, *n;
1457 	unsigned len = len0, remain;
1458 
1459 	for (m = m0; m && len > m->m_len; m = m->m_next)
1460 		len -= m->m_len;
1461 	if (m == NULL)
1462 		return (NULL);
1463 	remain = m->m_len - len;
1464 	if (m0->m_flags & M_PKTHDR) {
1465 		n = m_gethdr(wait, m0->m_type);
1466 		if (n == NULL)
1467 			return (NULL);
1468 		n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1469 		n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1470 		m0->m_pkthdr.len = len0;
1471 		if (m->m_flags & M_EXT)
1472 			goto extpacket;
1473 		if (remain > MHLEN) {
1474 			/* m can't be the lead packet */
1475 			MH_ALIGN(n, 0);
1476 			n->m_next = m_split(m, len, wait);
1477 			if (n->m_next == NULL) {
1478 				m_free(n);
1479 				return (NULL);
1480 			} else {
1481 				n->m_len = 0;
1482 				return (n);
1483 			}
1484 		} else
1485 			MH_ALIGN(n, remain);
1486 	} else if (remain == 0) {
1487 		n = m->m_next;
1488 		m->m_next = 0;
1489 		return (n);
1490 	} else {
1491 		n = m_get(wait, m->m_type);
1492 		if (n == NULL)
1493 			return (NULL);
1494 		M_ALIGN(n, remain);
1495 	}
1496 extpacket:
1497 	if (m->m_flags & M_EXT) {
1498 		KKASSERT((n->m_flags & M_EXT) == 0);
1499 		n->m_data = m->m_data + len;
1500 		m->m_ext.ext_ref(m->m_ext.ext_arg);
1501 		n->m_ext = m->m_ext;
1502 		n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1503 	} else {
1504 		bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1505 	}
1506 	n->m_len = remain;
1507 	m->m_len = len;
1508 	n->m_next = m->m_next;
1509 	m->m_next = 0;
1510 	return (n);
1511 }
1512 
1513 /*
1514  * Routine to copy from device local memory into mbufs.
1515  * Note: "offset" is ill-defined and always called as 0, so ignore it.
1516  */
1517 struct mbuf *
1518 m_devget(char *buf, int len, int offset, struct ifnet *ifp,
1519     void (*copy)(volatile const void *from, volatile void *to, size_t length))
1520 {
1521 	struct mbuf *m, *mfirst = NULL, **mtail;
1522 	int nsize, flags;
1523 
1524 	if (copy == NULL)
1525 		copy = bcopy;
1526 	mtail = &mfirst;
1527 	flags = M_PKTHDR;
1528 
1529 	while (len > 0) {
1530 		m = m_getl(len, MB_DONTWAIT, MT_DATA, flags, &nsize);
1531 		if (m == NULL) {
1532 			m_freem(mfirst);
1533 			return (NULL);
1534 		}
1535 		m->m_len = min(len, nsize);
1536 
1537 		if (flags & M_PKTHDR) {
1538 			if (len + max_linkhdr <= nsize)
1539 				m->m_data += max_linkhdr;
1540 			m->m_pkthdr.rcvif = ifp;
1541 			m->m_pkthdr.len = len;
1542 			flags = 0;
1543 		}
1544 
1545 		copy(buf, m->m_data, (unsigned)m->m_len);
1546 		buf += m->m_len;
1547 		len -= m->m_len;
1548 		*mtail = m;
1549 		mtail = &m->m_next;
1550 	}
1551 
1552 	return (mfirst);
1553 }
1554 
1555 /*
1556  * Copy data from a buffer back into the indicated mbuf chain,
1557  * starting "off" bytes from the beginning, extending the mbuf
1558  * chain if necessary.
1559  */
1560 void
1561 m_copyback(struct mbuf *m0, int off, int len, caddr_t cp)
1562 {
1563 	int mlen;
1564 	struct mbuf *m = m0, *n;
1565 	int totlen = 0;
1566 
1567 	if (m0 == NULL)
1568 		return;
1569 	while (off > (mlen = m->m_len)) {
1570 		off -= mlen;
1571 		totlen += mlen;
1572 		if (m->m_next == NULL) {
1573 			n = m_getclr(MB_DONTWAIT, m->m_type);
1574 			if (n == NULL)
1575 				goto out;
1576 			n->m_len = min(MLEN, len + off);
1577 			m->m_next = n;
1578 		}
1579 		m = m->m_next;
1580 	}
1581 	while (len > 0) {
1582 		mlen = min (m->m_len - off, len);
1583 		bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
1584 		cp += mlen;
1585 		len -= mlen;
1586 		mlen += off;
1587 		off = 0;
1588 		totlen += mlen;
1589 		if (len == 0)
1590 			break;
1591 		if (m->m_next == NULL) {
1592 			n = m_get(MB_DONTWAIT, m->m_type);
1593 			if (n == NULL)
1594 				break;
1595 			n->m_len = min(MLEN, len);
1596 			m->m_next = n;
1597 		}
1598 		m = m->m_next;
1599 	}
1600 out:	if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1601 		m->m_pkthdr.len = totlen;
1602 }
1603 
1604 void
1605 m_print(const struct mbuf *m)
1606 {
1607 	int len;
1608 	const struct mbuf *m2;
1609 
1610 	len = m->m_pkthdr.len;
1611 	m2 = m;
1612 	while (len) {
1613 		kprintf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-");
1614 		len -= m2->m_len;
1615 		m2 = m2->m_next;
1616 	}
1617 	return;
1618 }
1619 
1620 /*
1621  * "Move" mbuf pkthdr from "from" to "to".
1622  * "from" must have M_PKTHDR set, and "to" must be empty.
1623  */
1624 void
1625 m_move_pkthdr(struct mbuf *to, struct mbuf *from)
1626 {
1627 	KASSERT((to->m_flags & M_PKTHDR), ("m_move_pkthdr: not packet header"));
1628 
1629 	to->m_flags |= from->m_flags & M_COPYFLAGS;
1630 	to->m_pkthdr = from->m_pkthdr;		/* especially tags */
1631 	SLIST_INIT(&from->m_pkthdr.tags);	/* purge tags from src */
1632 }
1633 
1634 /*
1635  * Duplicate "from"'s mbuf pkthdr in "to".
1636  * "from" must have M_PKTHDR set, and "to" must be empty.
1637  * In particular, this does a deep copy of the packet tags.
1638  */
1639 int
1640 m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how)
1641 {
1642 	KASSERT((to->m_flags & M_PKTHDR), ("m_dup_pkthdr: not packet header"));
1643 
1644 	to->m_flags = (from->m_flags & M_COPYFLAGS) |
1645 		      (to->m_flags & ~M_COPYFLAGS);
1646 	to->m_pkthdr = from->m_pkthdr;
1647 	SLIST_INIT(&to->m_pkthdr.tags);
1648 	return (m_tag_copy_chain(to, from, how));
1649 }
1650 
1651 /*
1652  * Defragment a mbuf chain, returning the shortest possible
1653  * chain of mbufs and clusters.  If allocation fails and
1654  * this cannot be completed, NULL will be returned, but
1655  * the passed in chain will be unchanged.  Upon success,
1656  * the original chain will be freed, and the new chain
1657  * will be returned.
1658  *
1659  * If a non-packet header is passed in, the original
1660  * mbuf (chain?) will be returned unharmed.
1661  *
1662  * m_defrag_nofree doesn't free the passed in mbuf.
1663  */
1664 struct mbuf *
1665 m_defrag(struct mbuf *m0, int how)
1666 {
1667 	struct mbuf *m_new;
1668 
1669 	if ((m_new = m_defrag_nofree(m0, how)) == NULL)
1670 		return (NULL);
1671 	if (m_new != m0)
1672 		m_freem(m0);
1673 	return (m_new);
1674 }
1675 
1676 struct mbuf *
1677 m_defrag_nofree(struct mbuf *m0, int how)
1678 {
1679 	struct mbuf	*m_new = NULL, *m_final = NULL;
1680 	int		progress = 0, length, nsize;
1681 
1682 	if (!(m0->m_flags & M_PKTHDR))
1683 		return (m0);
1684 
1685 #ifdef MBUF_STRESS_TEST
1686 	if (m_defragrandomfailures) {
1687 		int temp = karc4random() & 0xff;
1688 		if (temp == 0xba)
1689 			goto nospace;
1690 	}
1691 #endif
1692 
1693 	m_final = m_getl(m0->m_pkthdr.len, how, MT_DATA, M_PKTHDR, &nsize);
1694 	if (m_final == NULL)
1695 		goto nospace;
1696 	m_final->m_len = 0;	/* in case m0->m_pkthdr.len is zero */
1697 
1698 	if (m_dup_pkthdr(m_final, m0, how) == NULL)
1699 		goto nospace;
1700 
1701 	m_new = m_final;
1702 
1703 	while (progress < m0->m_pkthdr.len) {
1704 		length = m0->m_pkthdr.len - progress;
1705 		if (length > MCLBYTES)
1706 			length = MCLBYTES;
1707 
1708 		if (m_new == NULL) {
1709 			m_new = m_getl(length, how, MT_DATA, 0, &nsize);
1710 			if (m_new == NULL)
1711 				goto nospace;
1712 		}
1713 
1714 		m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1715 		progress += length;
1716 		m_new->m_len = length;
1717 		if (m_new != m_final)
1718 			m_cat(m_final, m_new);
1719 		m_new = NULL;
1720 	}
1721 	if (m0->m_next == NULL)
1722 		m_defraguseless++;
1723 	m_defragpackets++;
1724 	m_defragbytes += m_final->m_pkthdr.len;
1725 	return (m_final);
1726 nospace:
1727 	m_defragfailure++;
1728 	if (m_new)
1729 		m_free(m_new);
1730 	m_freem(m_final);
1731 	return (NULL);
1732 }
1733 
1734 /*
1735  * Move data from uio into mbufs.
1736  */
1737 struct mbuf *
1738 m_uiomove(struct uio *uio)
1739 {
1740 	struct mbuf *m;			/* current working mbuf */
1741 	struct mbuf *head = NULL;	/* result mbuf chain */
1742 	struct mbuf **mp = &head;
1743 	int resid = uio->uio_resid, nsize, flags = M_PKTHDR, error;
1744 
1745 	do {
1746 		m = m_getl(resid, MB_WAIT, MT_DATA, flags, &nsize);
1747 		if (flags) {
1748 			m->m_pkthdr.len = 0;
1749 			/* Leave room for protocol headers. */
1750 			if (resid < MHLEN)
1751 				MH_ALIGN(m, resid);
1752 			flags = 0;
1753 		}
1754 		m->m_len = min(nsize, resid);
1755 		error = uiomove(mtod(m, caddr_t), m->m_len, uio);
1756 		if (error) {
1757 			m_free(m);
1758 			goto failed;
1759 		}
1760 		*mp = m;
1761 		mp = &m->m_next;
1762 		head->m_pkthdr.len += m->m_len;
1763 		resid -= m->m_len;
1764 	} while (resid > 0);
1765 
1766 	return (head);
1767 
1768 failed:
1769 	m_freem(head);
1770 	return (NULL);
1771 }
1772 
1773 struct mbuf *
1774 m_last(struct mbuf *m)
1775 {
1776 	while (m->m_next)
1777 		m = m->m_next;
1778 	return (m);
1779 }
1780 
1781 /*
1782  * Return the number of bytes in an mbuf chain.
1783  * If lastm is not NULL, also return the last mbuf.
1784  */
1785 u_int
1786 m_lengthm(struct mbuf *m, struct mbuf **lastm)
1787 {
1788 	u_int len = 0;
1789 	struct mbuf *prev = m;
1790 
1791 	while (m) {
1792 		len += m->m_len;
1793 		prev = m;
1794 		m = m->m_next;
1795 	}
1796 	if (lastm != NULL)
1797 		*lastm = prev;
1798 	return (len);
1799 }
1800 
1801 /*
1802  * Like m_lengthm(), except also keep track of mbuf usage.
1803  */
1804 u_int
1805 m_countm(struct mbuf *m, struct mbuf **lastm, u_int *pmbcnt)
1806 {
1807 	u_int len = 0, mbcnt = 0;
1808 	struct mbuf *prev = m;
1809 
1810 	while (m) {
1811 		len += m->m_len;
1812 		mbcnt += MSIZE;
1813 		if (m->m_flags & M_EXT)
1814 			mbcnt += m->m_ext.ext_size;
1815 		prev = m;
1816 		m = m->m_next;
1817 	}
1818 	if (lastm != NULL)
1819 		*lastm = prev;
1820 	*pmbcnt = mbcnt;
1821 	return (len);
1822 }
1823