xref: /dragonfly/sys/kern/uipc_mbuf.c (revision 10cbe914)
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 2004 Jeffrey M. Hsu.  All rights reserved.
5  * Copyright (c) 2004 The DragonFly Project.  All rights reserved.
6  *
7  * This code is derived from software contributed to The DragonFly Project
8  * by Jeffrey M. Hsu.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 /*
37  * Copyright (c) 1982, 1986, 1988, 1991, 1993
38  *	The Regents of the University of California.  All rights reserved.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  * 1. Redistributions of source code must retain the above copyright
44  *    notice, this list of conditions and the following disclaimer.
45  * 2. Redistributions in binary form must reproduce the above copyright
46  *    notice, this list of conditions and the following disclaimer in the
47  *    documentation and/or other materials provided with the distribution.
48  * 3. All advertising materials mentioning features or use of this software
49  *    must display the following acknowledgement:
50  *	This product includes software developed by the University of
51  *	California, Berkeley and its contributors.
52  * 4. Neither the name of the University nor the names of its contributors
53  *    may be used to endorse or promote products derived from this software
54  *    without specific prior written permission.
55  *
56  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66  * SUCH DAMAGE.
67  *
68  * @(#)uipc_mbuf.c	8.2 (Berkeley) 1/4/94
69  * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $
70  * $DragonFly: src/sys/kern/uipc_mbuf.c,v 1.70 2008/11/20 14:21:01 sephe Exp $
71  */
72 
73 #include "opt_param.h"
74 #include "opt_mbuf_stress_test.h"
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/file.h>
78 #include <sys/malloc.h>
79 #include <sys/mbuf.h>
80 #include <sys/kernel.h>
81 #include <sys/sysctl.h>
82 #include <sys/domain.h>
83 #include <sys/objcache.h>
84 #include <sys/tree.h>
85 #include <sys/protosw.h>
86 #include <sys/uio.h>
87 #include <sys/thread.h>
88 #include <sys/globaldata.h>
89 
90 #include <sys/thread2.h>
91 #include <sys/spinlock2.h>
92 
93 #include <machine/atomic.h>
94 #include <machine/limits.h>
95 
96 #include <vm/vm.h>
97 #include <vm/vm_kern.h>
98 #include <vm/vm_extern.h>
99 
100 #ifdef INVARIANTS
101 #include <machine/cpu.h>
102 #endif
103 
104 /*
105  * mbuf cluster meta-data
106  */
107 struct mbcluster {
108 	int32_t	mcl_refs;
109 	void	*mcl_data;
110 };
111 
112 /*
113  * mbuf tracking for debugging purposes
114  */
115 #ifdef MBUF_DEBUG
116 
117 static MALLOC_DEFINE(M_MTRACK, "mtrack", "mtrack");
118 
119 struct mbctrack;
120 RB_HEAD(mbuf_rb_tree, mbtrack);
121 RB_PROTOTYPE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *);
122 
123 struct mbtrack {
124 	RB_ENTRY(mbtrack) rb_node;
125 	int trackid;
126 	struct mbuf *m;
127 };
128 
129 static int
130 mbtrack_cmp(struct mbtrack *mb1, struct mbtrack *mb2)
131 {
132 	if (mb1->m < mb2->m)
133 		return(-1);
134 	if (mb1->m > mb2->m)
135 		return(1);
136 	return(0);
137 }
138 
139 RB_GENERATE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *, m);
140 
141 struct mbuf_rb_tree	mbuf_track_root;
142 static struct spinlock	mbuf_track_spin = SPINLOCK_INITIALIZER(mbuf_track_spin);
143 
144 static void
145 mbuftrack(struct mbuf *m)
146 {
147 	struct mbtrack *mbt;
148 
149 	mbt = kmalloc(sizeof(*mbt), M_MTRACK, M_INTWAIT|M_ZERO);
150 	spin_lock(&mbuf_track_spin);
151 	mbt->m = m;
152 	if (mbuf_rb_tree_RB_INSERT(&mbuf_track_root, mbt)) {
153 		spin_unlock(&mbuf_track_spin);
154 		panic("mbuftrack: mbuf %p already being tracked\n", m);
155 	}
156 	spin_unlock(&mbuf_track_spin);
157 }
158 
159 static void
160 mbufuntrack(struct mbuf *m)
161 {
162 	struct mbtrack *mbt;
163 
164 	spin_lock(&mbuf_track_spin);
165 	mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m);
166 	if (mbt == NULL) {
167 		spin_unlock(&mbuf_track_spin);
168 		panic("mbufuntrack: mbuf %p was not tracked\n", m);
169 	} else {
170 		mbuf_rb_tree_RB_REMOVE(&mbuf_track_root, mbt);
171 		spin_unlock(&mbuf_track_spin);
172 		kfree(mbt, M_MTRACK);
173 	}
174 }
175 
176 void
177 mbuftrackid(struct mbuf *m, int trackid)
178 {
179 	struct mbtrack *mbt;
180 	struct mbuf *n;
181 
182 	spin_lock(&mbuf_track_spin);
183 	while (m) {
184 		n = m->m_nextpkt;
185 		while (m) {
186 			mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m);
187 			if (mbt == NULL) {
188 				spin_unlock(&mbuf_track_spin);
189 				panic("mbuftrackid: mbuf %p not tracked", m);
190 			}
191 			mbt->trackid = trackid;
192 			m = m->m_next;
193 		}
194 		m = n;
195 	}
196 	spin_unlock(&mbuf_track_spin);
197 }
198 
199 static int
200 mbuftrack_callback(struct mbtrack *mbt, void *arg)
201 {
202 	struct sysctl_req *req = arg;
203 	char buf[64];
204 	int error;
205 
206 	ksnprintf(buf, sizeof(buf), "mbuf %p track %d\n", mbt->m, mbt->trackid);
207 
208 	spin_unlock(&mbuf_track_spin);
209 	error = SYSCTL_OUT(req, buf, strlen(buf));
210 	spin_lock(&mbuf_track_spin);
211 	if (error)
212 		return(-error);
213 	return(0);
214 }
215 
216 static int
217 mbuftrack_show(SYSCTL_HANDLER_ARGS)
218 {
219 	int error;
220 
221 	spin_lock(&mbuf_track_spin);
222 	error = mbuf_rb_tree_RB_SCAN(&mbuf_track_root, NULL,
223 				     mbuftrack_callback, req);
224 	spin_unlock(&mbuf_track_spin);
225 	return (-error);
226 }
227 SYSCTL_PROC(_kern_ipc, OID_AUTO, showmbufs, CTLFLAG_RD|CTLTYPE_STRING,
228 	    0, 0, mbuftrack_show, "A", "Show all in-use mbufs");
229 
230 #else
231 
232 #define mbuftrack(m)
233 #define mbufuntrack(m)
234 
235 #endif
236 
237 static void mbinit(void *);
238 SYSINIT(mbuf, SI_BOOT2_MACHDEP, SI_ORDER_FIRST, mbinit, NULL)
239 
240 static u_long	mbtypes[SMP_MAXCPU][MT_NTYPES];
241 
242 static struct mbstat mbstat[SMP_MAXCPU];
243 int	max_linkhdr;
244 int	max_protohdr;
245 int	max_hdr;
246 int	max_datalen;
247 int	m_defragpackets;
248 int	m_defragbytes;
249 int	m_defraguseless;
250 int	m_defragfailure;
251 #ifdef MBUF_STRESS_TEST
252 int	m_defragrandomfailures;
253 #endif
254 
255 struct objcache *mbuf_cache, *mbufphdr_cache;
256 struct objcache *mclmeta_cache;
257 struct objcache *mbufcluster_cache, *mbufphdrcluster_cache;
258 
259 int	nmbclusters;
260 int	nmbufs;
261 
262 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
263 	   &max_linkhdr, 0, "");
264 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
265 	   &max_protohdr, 0, "");
266 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, "");
267 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
268 	   &max_datalen, 0, "");
269 SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW,
270 	   &mbuf_wait, 0, "");
271 static int do_mbstat(SYSCTL_HANDLER_ARGS);
272 
273 SYSCTL_PROC(_kern_ipc, KIPC_MBSTAT, mbstat, CTLTYPE_STRUCT|CTLFLAG_RD,
274 	0, 0, do_mbstat, "S,mbstat", "");
275 
276 static int do_mbtypes(SYSCTL_HANDLER_ARGS);
277 
278 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbtypes, CTLTYPE_ULONG|CTLFLAG_RD,
279 	0, 0, do_mbtypes, "LU", "");
280 
281 static int
282 do_mbstat(SYSCTL_HANDLER_ARGS)
283 {
284 	struct mbstat mbstat_total;
285 	struct mbstat *mbstat_totalp;
286 	int i;
287 
288 	bzero(&mbstat_total, sizeof(mbstat_total));
289 	mbstat_totalp = &mbstat_total;
290 
291 	for (i = 0; i < ncpus; i++)
292 	{
293 		mbstat_total.m_mbufs += mbstat[i].m_mbufs;
294 		mbstat_total.m_clusters += mbstat[i].m_clusters;
295 		mbstat_total.m_spare += mbstat[i].m_spare;
296 		mbstat_total.m_clfree += mbstat[i].m_clfree;
297 		mbstat_total.m_drops += mbstat[i].m_drops;
298 		mbstat_total.m_wait += mbstat[i].m_wait;
299 		mbstat_total.m_drain += mbstat[i].m_drain;
300 		mbstat_total.m_mcfail += mbstat[i].m_mcfail;
301 		mbstat_total.m_mpfail += mbstat[i].m_mpfail;
302 
303 	}
304 	/*
305 	 * The following fields are not cumulative fields so just
306 	 * get their values once.
307 	 */
308 	mbstat_total.m_msize = mbstat[0].m_msize;
309 	mbstat_total.m_mclbytes = mbstat[0].m_mclbytes;
310 	mbstat_total.m_minclsize = mbstat[0].m_minclsize;
311 	mbstat_total.m_mlen = mbstat[0].m_mlen;
312 	mbstat_total.m_mhlen = mbstat[0].m_mhlen;
313 
314 	return(sysctl_handle_opaque(oidp, mbstat_totalp, sizeof(mbstat_total), req));
315 }
316 
317 static int
318 do_mbtypes(SYSCTL_HANDLER_ARGS)
319 {
320 	u_long totals[MT_NTYPES];
321 	int i, j;
322 
323 	for (i = 0; i < MT_NTYPES; i++)
324 		totals[i] = 0;
325 
326 	for (i = 0; i < ncpus; i++)
327 	{
328 		for (j = 0; j < MT_NTYPES; j++)
329 			totals[j] += mbtypes[i][j];
330 	}
331 
332 	return(sysctl_handle_opaque(oidp, totals, sizeof(totals), req));
333 }
334 
335 /*
336  * These are read-only because we do not currently have any code
337  * to adjust the objcache limits after the fact.  The variables
338  * may only be set as boot-time tunables.
339  */
340 SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD,
341 	   &nmbclusters, 0, "Maximum number of mbuf clusters available");
342 SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0,
343 	   "Maximum number of mbufs available");
344 
345 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
346 	   &m_defragpackets, 0, "");
347 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
348 	   &m_defragbytes, 0, "");
349 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
350 	   &m_defraguseless, 0, "");
351 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
352 	   &m_defragfailure, 0, "");
353 #ifdef MBUF_STRESS_TEST
354 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
355 	   &m_defragrandomfailures, 0, "");
356 #endif
357 
358 static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf");
359 static MALLOC_DEFINE(M_MBUFCL, "mbufcl", "mbufcl");
360 static MALLOC_DEFINE(M_MCLMETA, "mclmeta", "mclmeta");
361 
362 static void m_reclaim (void);
363 static void m_mclref(void *arg);
364 static void m_mclfree(void *arg);
365 
366 /*
367  * NOTE: Default NMBUFS must take into account a possible DOS attack
368  *	 using fd passing on unix domain sockets.
369  */
370 #ifndef NMBCLUSTERS
371 #define NMBCLUSTERS	(512 + maxusers * 16)
372 #endif
373 #ifndef NMBUFS
374 #define NMBUFS		(nmbclusters * 2 + maxfiles)
375 #endif
376 
377 /*
378  * Perform sanity checks of tunables declared above.
379  */
380 static void
381 tunable_mbinit(void *dummy)
382 {
383 	/*
384 	 * This has to be done before VM init.
385 	 */
386 	nmbclusters = NMBCLUSTERS;
387 	TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
388 	nmbufs = NMBUFS;
389 	TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
390 	/* Sanity checks */
391 	if (nmbufs < nmbclusters * 2)
392 		nmbufs = nmbclusters * 2;
393 }
394 SYSINIT(tunable_mbinit, SI_BOOT1_TUNABLES, SI_ORDER_ANY,
395 	tunable_mbinit, NULL);
396 
397 /* "number of clusters of pages" */
398 #define NCL_INIT	1
399 
400 #define NMB_INIT	16
401 
402 /*
403  * The mbuf object cache only guarantees that m_next and m_nextpkt are
404  * NULL and that m_data points to the beginning of the data area.  In
405  * particular, m_len and m_pkthdr.len are uninitialized.  It is the
406  * responsibility of the caller to initialize those fields before use.
407  */
408 
409 static boolean_t __inline
410 mbuf_ctor(void *obj, void *private, int ocflags)
411 {
412 	struct mbuf *m = obj;
413 
414 	m->m_next = NULL;
415 	m->m_nextpkt = NULL;
416 	m->m_data = m->m_dat;
417 	m->m_flags = 0;
418 
419 	return (TRUE);
420 }
421 
422 /*
423  * Initialize the mbuf and the packet header fields.
424  */
425 static boolean_t
426 mbufphdr_ctor(void *obj, void *private, int ocflags)
427 {
428 	struct mbuf *m = obj;
429 
430 	m->m_next = NULL;
431 	m->m_nextpkt = NULL;
432 	m->m_data = m->m_pktdat;
433 	m->m_flags = M_PKTHDR | M_PHCACHE;
434 
435 	m->m_pkthdr.rcvif = NULL;	/* eliminate XXX JH */
436 	SLIST_INIT(&m->m_pkthdr.tags);
437 	m->m_pkthdr.csum_flags = 0;	/* eliminate XXX JH */
438 	m->m_pkthdr.fw_flags = 0;	/* eliminate XXX JH */
439 
440 	return (TRUE);
441 }
442 
443 /*
444  * A mbcluster object consists of 2K (MCLBYTES) cluster and a refcount.
445  */
446 static boolean_t
447 mclmeta_ctor(void *obj, void *private, int ocflags)
448 {
449 	struct mbcluster *cl = obj;
450 	void *buf;
451 
452 	if (ocflags & M_NOWAIT)
453 		buf = kmalloc(MCLBYTES, M_MBUFCL, M_NOWAIT | M_ZERO);
454 	else
455 		buf = kmalloc(MCLBYTES, M_MBUFCL, M_INTWAIT | M_ZERO);
456 	if (buf == NULL)
457 		return (FALSE);
458 	cl->mcl_refs = 0;
459 	cl->mcl_data = buf;
460 	return (TRUE);
461 }
462 
463 static void
464 mclmeta_dtor(void *obj, void *private)
465 {
466 	struct mbcluster *mcl = obj;
467 
468 	KKASSERT(mcl->mcl_refs == 0);
469 	kfree(mcl->mcl_data, M_MBUFCL);
470 }
471 
472 static void
473 linkcluster(struct mbuf *m, struct mbcluster *cl)
474 {
475 	/*
476 	 * Add the cluster to the mbuf.  The caller will detect that the
477 	 * mbuf now has an attached cluster.
478 	 */
479 	m->m_ext.ext_arg = cl;
480 	m->m_ext.ext_buf = cl->mcl_data;
481 	m->m_ext.ext_ref = m_mclref;
482 	m->m_ext.ext_free = m_mclfree;
483 	m->m_ext.ext_size = MCLBYTES;
484 	atomic_add_int(&cl->mcl_refs, 1);
485 
486 	m->m_data = m->m_ext.ext_buf;
487 	m->m_flags |= M_EXT | M_EXT_CLUSTER;
488 }
489 
490 static boolean_t
491 mbufphdrcluster_ctor(void *obj, void *private, int ocflags)
492 {
493 	struct mbuf *m = obj;
494 	struct mbcluster *cl;
495 
496 	mbufphdr_ctor(obj, private, ocflags);
497 	cl = objcache_get(mclmeta_cache, ocflags);
498 	if (cl == NULL) {
499 		++mbstat[mycpu->gd_cpuid].m_drops;
500 		return (FALSE);
501 	}
502 	m->m_flags |= M_CLCACHE;
503 	linkcluster(m, cl);
504 	return (TRUE);
505 }
506 
507 static boolean_t
508 mbufcluster_ctor(void *obj, void *private, int ocflags)
509 {
510 	struct mbuf *m = obj;
511 	struct mbcluster *cl;
512 
513 	mbuf_ctor(obj, private, ocflags);
514 	cl = objcache_get(mclmeta_cache, ocflags);
515 	if (cl == NULL) {
516 		++mbstat[mycpu->gd_cpuid].m_drops;
517 		return (FALSE);
518 	}
519 	m->m_flags |= M_CLCACHE;
520 	linkcluster(m, cl);
521 	return (TRUE);
522 }
523 
524 /*
525  * Used for both the cluster and cluster PHDR caches.
526  *
527  * The mbuf may have lost its cluster due to sharing, deal
528  * with the situation by checking M_EXT.
529  */
530 static void
531 mbufcluster_dtor(void *obj, void *private)
532 {
533 	struct mbuf *m = obj;
534 	struct mbcluster *mcl;
535 
536 	if (m->m_flags & M_EXT) {
537 		KKASSERT((m->m_flags & M_EXT_CLUSTER) != 0);
538 		mcl = m->m_ext.ext_arg;
539 		KKASSERT(mcl->mcl_refs == 1);
540 		mcl->mcl_refs = 0;
541 		objcache_put(mclmeta_cache, mcl);
542 	}
543 }
544 
545 struct objcache_malloc_args mbuf_malloc_args = { MSIZE, M_MBUF };
546 struct objcache_malloc_args mclmeta_malloc_args =
547 	{ sizeof(struct mbcluster), M_MCLMETA };
548 
549 /* ARGSUSED*/
550 static void
551 mbinit(void *dummy)
552 {
553 	int mb_limit, cl_limit;
554 	int limit;
555 	int i;
556 
557 	/*
558 	 * Initialize statistics
559 	 */
560 	for (i = 0; i < ncpus; i++) {
561 		atomic_set_long_nonlocked(&mbstat[i].m_msize, MSIZE);
562 		atomic_set_long_nonlocked(&mbstat[i].m_mclbytes, MCLBYTES);
563 		atomic_set_long_nonlocked(&mbstat[i].m_minclsize, MINCLSIZE);
564 		atomic_set_long_nonlocked(&mbstat[i].m_mlen, MLEN);
565 		atomic_set_long_nonlocked(&mbstat[i].m_mhlen, MHLEN);
566 	}
567 
568 	/*
569 	 * Create objtect caches and save cluster limits, which will
570 	 * be used to adjust backing kmalloc pools' limit later.
571 	 */
572 
573 	mb_limit = cl_limit = 0;
574 
575 	limit = nmbufs;
576 	mbuf_cache = objcache_create("mbuf", &limit, 0,
577 	    mbuf_ctor, NULL, NULL,
578 	    objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
579 	mb_limit += limit;
580 
581 	limit = nmbufs;
582 	mbufphdr_cache = objcache_create("mbuf pkt hdr", &limit, 64,
583 	    mbufphdr_ctor, NULL, NULL,
584 	    objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
585 	mb_limit += limit;
586 
587 	cl_limit = nmbclusters;
588 	mclmeta_cache = objcache_create("cluster mbuf", &cl_limit, 0,
589 	    mclmeta_ctor, mclmeta_dtor, NULL,
590 	    objcache_malloc_alloc, objcache_malloc_free, &mclmeta_malloc_args);
591 
592 	limit = nmbclusters;
593 	mbufcluster_cache = objcache_create("mbuf + cluster", &limit, 0,
594 	    mbufcluster_ctor, mbufcluster_dtor, NULL,
595 	    objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
596 	mb_limit += limit;
597 
598 	limit = nmbclusters;
599 	mbufphdrcluster_cache = objcache_create("mbuf pkt hdr + cluster",
600 	    &limit, 64, mbufphdrcluster_ctor, mbufcluster_dtor, NULL,
601 	    objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
602 	mb_limit += limit;
603 
604 	/*
605 	 * Adjust backing kmalloc pools' limit
606 	 *
607 	 * NOTE: We raise the limit by another 1/8 to take the effect
608 	 * of loosememuse into account.
609 	 */
610 	cl_limit += cl_limit / 8;
611 	kmalloc_raise_limit(mclmeta_malloc_args.mtype,
612 			    mclmeta_malloc_args.objsize * cl_limit);
613 	kmalloc_raise_limit(M_MBUFCL, MCLBYTES * cl_limit);
614 
615 	mb_limit += mb_limit / 8;
616 	kmalloc_raise_limit(mbuf_malloc_args.mtype,
617 			    mbuf_malloc_args.objsize * mb_limit);
618 }
619 
620 /*
621  * Return the number of references to this mbuf's data.  0 is returned
622  * if the mbuf is not M_EXT, a reference count is returned if it is
623  * M_EXT | M_EXT_CLUSTER, and 99 is returned if it is a special M_EXT.
624  */
625 int
626 m_sharecount(struct mbuf *m)
627 {
628 	switch (m->m_flags & (M_EXT | M_EXT_CLUSTER)) {
629 	case 0:
630 		return (0);
631 	case M_EXT:
632 		return (99);
633 	case M_EXT | M_EXT_CLUSTER:
634 		return (((struct mbcluster *)m->m_ext.ext_arg)->mcl_refs);
635 	}
636 	/* NOTREACHED */
637 	return (0);		/* to shut up compiler */
638 }
639 
640 /*
641  * change mbuf to new type
642  */
643 void
644 m_chtype(struct mbuf *m, int type)
645 {
646 	struct globaldata *gd = mycpu;
647 
648 	atomic_add_long_nonlocked(&mbtypes[gd->gd_cpuid][type], 1);
649 	atomic_subtract_long_nonlocked(&mbtypes[gd->gd_cpuid][m->m_type], 1);
650 	atomic_set_short_nonlocked(&m->m_type, type);
651 }
652 
653 static void
654 m_reclaim(void)
655 {
656 	struct domain *dp;
657 	struct protosw *pr;
658 
659 	kprintf("Debug: m_reclaim() called\n");
660 
661 	SLIST_FOREACH(dp, &domains, dom_next) {
662 		for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) {
663 			if (pr->pr_drain)
664 				(*pr->pr_drain)();
665 		}
666 	}
667 	atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_drain, 1);
668 }
669 
670 static void __inline
671 updatestats(struct mbuf *m, int type)
672 {
673 	struct globaldata *gd = mycpu;
674 
675 	m->m_type = type;
676 	mbuftrack(m);
677 #ifdef MBUF_DEBUG
678 	KASSERT(m->m_next == NULL, ("mbuf %p: bad m_next in get", m));
679 	KASSERT(m->m_nextpkt == NULL, ("mbuf %p: bad m_nextpkt in get", m));
680 #endif
681 
682 	atomic_add_long_nonlocked(&mbtypes[gd->gd_cpuid][type], 1);
683 	atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mbufs, 1);
684 
685 }
686 
687 /*
688  * Allocate an mbuf.
689  */
690 struct mbuf *
691 m_get(int how, int type)
692 {
693 	struct mbuf *m;
694 	int ntries = 0;
695 	int ocf = MBTOM(how);
696 
697 retryonce:
698 
699 	m = objcache_get(mbuf_cache, ocf);
700 
701 	if (m == NULL) {
702 		if ((how & MB_TRYWAIT) && ntries++ == 0) {
703 			struct objcache *reclaimlist[] = {
704 				mbufphdr_cache,
705 				mbufcluster_cache,
706 				mbufphdrcluster_cache
707 			};
708 			const int nreclaims = __arysize(reclaimlist);
709 
710 			if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf))
711 				m_reclaim();
712 			goto retryonce;
713 		}
714 		++mbstat[mycpu->gd_cpuid].m_drops;
715 		return (NULL);
716 	}
717 #ifdef MBUF_DEBUG
718 	KASSERT(m->m_data == m->m_dat, ("mbuf %p: bad m_data in get", m));
719 #endif
720 	m->m_len = 0;
721 
722 	updatestats(m, type);
723 	return (m);
724 }
725 
726 struct mbuf *
727 m_gethdr(int how, int type)
728 {
729 	struct mbuf *m;
730 	int ocf = MBTOM(how);
731 	int ntries = 0;
732 
733 retryonce:
734 
735 	m = objcache_get(mbufphdr_cache, ocf);
736 
737 	if (m == NULL) {
738 		if ((how & MB_TRYWAIT) && ntries++ == 0) {
739 			struct objcache *reclaimlist[] = {
740 				mbuf_cache,
741 				mbufcluster_cache, mbufphdrcluster_cache
742 			};
743 			const int nreclaims = __arysize(reclaimlist);
744 
745 			if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf))
746 				m_reclaim();
747 			goto retryonce;
748 		}
749 		++mbstat[mycpu->gd_cpuid].m_drops;
750 		return (NULL);
751 	}
752 #ifdef MBUF_DEBUG
753 	KASSERT(m->m_data == m->m_pktdat, ("mbuf %p: bad m_data in get", m));
754 #endif
755 	m->m_len = 0;
756 	m->m_pkthdr.len = 0;
757 
758 	updatestats(m, type);
759 	return (m);
760 }
761 
762 /*
763  * Get a mbuf (not a mbuf cluster!) and zero it.
764  * Deprecated.
765  */
766 struct mbuf *
767 m_getclr(int how, int type)
768 {
769 	struct mbuf *m;
770 
771 	m = m_get(how, type);
772 	if (m != NULL)
773 		bzero(m->m_data, MLEN);
774 	return (m);
775 }
776 
777 /*
778  * Returns an mbuf with an attached cluster.
779  * Because many network drivers use this kind of buffers a lot, it is
780  * convenient to keep a small pool of free buffers of this kind.
781  * Even a small size such as 10 gives about 10% improvement in the
782  * forwarding rate in a bridge or router.
783  */
784 struct mbuf *
785 m_getcl(int how, short type, int flags)
786 {
787 	struct mbuf *m;
788 	int ocflags = MBTOM(how);
789 	int ntries = 0;
790 
791 retryonce:
792 
793 	if (flags & M_PKTHDR)
794 		m = objcache_get(mbufphdrcluster_cache, ocflags);
795 	else
796 		m = objcache_get(mbufcluster_cache, ocflags);
797 
798 	if (m == NULL) {
799 		if ((how & MB_TRYWAIT) && ntries++ == 0) {
800 			struct objcache *reclaimlist[1];
801 
802 			if (flags & M_PKTHDR)
803 				reclaimlist[0] = mbufcluster_cache;
804 			else
805 				reclaimlist[0] = mbufphdrcluster_cache;
806 			if (!objcache_reclaimlist(reclaimlist, 1, ocflags))
807 				m_reclaim();
808 			goto retryonce;
809 		}
810 		++mbstat[mycpu->gd_cpuid].m_drops;
811 		return (NULL);
812 	}
813 
814 #ifdef MBUF_DEBUG
815 	KASSERT(m->m_data == m->m_ext.ext_buf,
816 		("mbuf %p: bad m_data in get", m));
817 #endif
818 	m->m_type = type;
819 	m->m_len = 0;
820 	m->m_pkthdr.len = 0;	/* just do it unconditonally */
821 
822 	mbuftrack(m);
823 
824 	atomic_add_long_nonlocked(&mbtypes[mycpu->gd_cpuid][type], 1);
825 	atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1);
826 	return (m);
827 }
828 
829 /*
830  * Allocate chain of requested length.
831  */
832 struct mbuf *
833 m_getc(int len, int how, int type)
834 {
835 	struct mbuf *n, *nfirst = NULL, **ntail = &nfirst;
836 	int nsize;
837 
838 	while (len > 0) {
839 		n = m_getl(len, how, type, 0, &nsize);
840 		if (n == NULL)
841 			goto failed;
842 		n->m_len = 0;
843 		*ntail = n;
844 		ntail = &n->m_next;
845 		len -= nsize;
846 	}
847 	return (nfirst);
848 
849 failed:
850 	m_freem(nfirst);
851 	return (NULL);
852 }
853 
854 /*
855  * Allocate len-worth of mbufs and/or mbuf clusters (whatever fits best)
856  * and return a pointer to the head of the allocated chain. If m0 is
857  * non-null, then we assume that it is a single mbuf or an mbuf chain to
858  * which we want len bytes worth of mbufs and/or clusters attached, and so
859  * if we succeed in allocating it, we will just return a pointer to m0.
860  *
861  * If we happen to fail at any point during the allocation, we will free
862  * up everything we have already allocated and return NULL.
863  *
864  * Deprecated.  Use m_getc() and m_cat() instead.
865  */
866 struct mbuf *
867 m_getm(struct mbuf *m0, int len, int type, int how)
868 {
869 	struct mbuf *nfirst;
870 
871 	nfirst = m_getc(len, how, type);
872 
873 	if (m0 != NULL) {
874 		m_last(m0)->m_next = nfirst;
875 		return (m0);
876 	}
877 
878 	return (nfirst);
879 }
880 
881 /*
882  * Adds a cluster to a normal mbuf, M_EXT is set on success.
883  * Deprecated.  Use m_getcl() instead.
884  */
885 void
886 m_mclget(struct mbuf *m, int how)
887 {
888 	struct mbcluster *mcl;
889 
890 	KKASSERT((m->m_flags & M_EXT) == 0);
891 	mcl = objcache_get(mclmeta_cache, MBTOM(how));
892 	if (mcl != NULL) {
893 		linkcluster(m, mcl);
894 		atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters,
895 					  1);
896 	} else {
897 		++mbstat[mycpu->gd_cpuid].m_drops;
898 	}
899 }
900 
901 /*
902  * Updates to mbcluster must be MPSAFE.  Only an entity which already has
903  * a reference to the cluster can ref it, so we are in no danger of
904  * racing an add with a subtract.  But the operation must still be atomic
905  * since multiple entities may have a reference on the cluster.
906  *
907  * m_mclfree() is almost the same but it must contend with two entities
908  * freeing the cluster at the same time.
909  */
910 static void
911 m_mclref(void *arg)
912 {
913 	struct mbcluster *mcl = arg;
914 
915 	atomic_add_int(&mcl->mcl_refs, 1);
916 }
917 
918 /*
919  * When dereferencing a cluster we have to deal with a N->0 race, where
920  * N entities free their references simultaniously.  To do this we use
921  * atomic_fetchadd_int().
922  */
923 static void
924 m_mclfree(void *arg)
925 {
926 	struct mbcluster *mcl = arg;
927 
928 	if (atomic_fetchadd_int(&mcl->mcl_refs, -1) == 1)
929 		objcache_put(mclmeta_cache, mcl);
930 }
931 
932 /*
933  * Free a single mbuf and any associated external storage.  The successor,
934  * if any, is returned.
935  *
936  * We do need to check non-first mbuf for m_aux, since some of existing
937  * code does not call M_PREPEND properly.
938  * (example: call to bpf_mtap from drivers)
939  */
940 
941 #ifdef MBUF_DEBUG
942 
943 struct mbuf  *
944 _m_free(struct mbuf *m, const char *func)
945 
946 #else
947 
948 struct mbuf *
949 m_free(struct mbuf *m)
950 
951 #endif
952 {
953 	struct mbuf *n;
954 	struct globaldata *gd = mycpu;
955 
956 	KASSERT(m->m_type != MT_FREE, ("freeing free mbuf %p", m));
957 	KASSERT(M_TRAILINGSPACE(m) >= 0, ("overflowed mbuf %p", m));
958 	atomic_subtract_long_nonlocked(&mbtypes[gd->gd_cpuid][m->m_type], 1);
959 
960 	n = m->m_next;
961 
962 	/*
963 	 * Make sure the mbuf is in constructed state before returning it
964 	 * to the objcache.
965 	 */
966 	m->m_next = NULL;
967 	mbufuntrack(m);
968 #ifdef MBUF_DEBUG
969 	m->m_hdr.mh_lastfunc = func;
970 #endif
971 #ifdef notyet
972 	KKASSERT(m->m_nextpkt == NULL);
973 #else
974 	if (m->m_nextpkt != NULL) {
975 		static int afewtimes = 10;
976 
977 		if (afewtimes-- > 0) {
978 			kprintf("mfree: m->m_nextpkt != NULL\n");
979 			print_backtrace(-1);
980 		}
981 		m->m_nextpkt = NULL;
982 	}
983 #endif
984 	if (m->m_flags & M_PKTHDR) {
985 		m_tag_delete_chain(m);		/* eliminate XXX JH */
986 	}
987 
988 	m->m_flags &= (M_EXT | M_EXT_CLUSTER | M_CLCACHE | M_PHCACHE);
989 
990 	/*
991 	 * Clean the M_PKTHDR state so we can return the mbuf to its original
992 	 * cache.  This is based on the PHCACHE flag which tells us whether
993 	 * the mbuf was originally allocated out of a packet-header cache
994 	 * or a non-packet-header cache.
995 	 */
996 	if (m->m_flags & M_PHCACHE) {
997 		m->m_flags |= M_PKTHDR;
998 		m->m_pkthdr.rcvif = NULL;	/* eliminate XXX JH */
999 		m->m_pkthdr.csum_flags = 0;	/* eliminate XXX JH */
1000 		m->m_pkthdr.fw_flags = 0;	/* eliminate XXX JH */
1001 		SLIST_INIT(&m->m_pkthdr.tags);
1002 	}
1003 
1004 	/*
1005 	 * Handle remaining flags combinations.  M_CLCACHE tells us whether
1006 	 * the mbuf was originally allocated from a cluster cache or not,
1007 	 * and is totally separate from whether the mbuf is currently
1008 	 * associated with a cluster.
1009 	 */
1010 	switch(m->m_flags & (M_CLCACHE | M_EXT | M_EXT_CLUSTER)) {
1011 	case M_CLCACHE | M_EXT | M_EXT_CLUSTER:
1012 		/*
1013 		 * mbuf+cluster cache case.  The mbuf was allocated from the
1014 		 * combined mbuf_cluster cache and can be returned to the
1015 		 * cache if the cluster hasn't been shared.
1016 		 */
1017 		if (m_sharecount(m) == 1) {
1018 			/*
1019 			 * The cluster has not been shared, we can just
1020 			 * reset the data pointer and return the mbuf
1021 			 * to the cluster cache.  Note that the reference
1022 			 * count is left intact (it is still associated with
1023 			 * an mbuf).
1024 			 */
1025 			m->m_data = m->m_ext.ext_buf;
1026 			if (m->m_flags & M_PHCACHE)
1027 				objcache_put(mbufphdrcluster_cache, m);
1028 			else
1029 				objcache_put(mbufcluster_cache, m);
1030 			atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1);
1031 		} else {
1032 			/*
1033 			 * Hell.  Someone else has a ref on this cluster,
1034 			 * we have to disconnect it which means we can't
1035 			 * put it back into the mbufcluster_cache, we
1036 			 * have to destroy the mbuf.
1037 			 *
1038 			 * Other mbuf references to the cluster will typically
1039 			 * be M_EXT | M_EXT_CLUSTER but without M_CLCACHE.
1040 			 *
1041 			 * XXX we could try to connect another cluster to
1042 			 * it.
1043 			 */
1044 			m->m_ext.ext_free(m->m_ext.ext_arg);
1045 			m->m_flags &= ~(M_EXT | M_EXT_CLUSTER);
1046 			if (m->m_flags & M_PHCACHE)
1047 				objcache_dtor(mbufphdrcluster_cache, m);
1048 			else
1049 				objcache_dtor(mbufcluster_cache, m);
1050 		}
1051 		break;
1052 	case M_EXT | M_EXT_CLUSTER:
1053 		/*
1054 		 * Normal cluster associated with an mbuf that was allocated
1055 		 * from the normal mbuf pool rather then the cluster pool.
1056 		 * The cluster has to be independantly disassociated from the
1057 		 * mbuf.
1058 		 */
1059 		if (m_sharecount(m) == 1)
1060 			atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1);
1061 		/* fall through */
1062 	case M_EXT:
1063 		/*
1064 		 * Normal cluster association case, disconnect the cluster from
1065 		 * the mbuf.  The cluster may or may not be custom.
1066 		 */
1067 		m->m_ext.ext_free(m->m_ext.ext_arg);
1068 		m->m_flags &= ~(M_EXT | M_EXT_CLUSTER);
1069 		/* fall through */
1070 	case 0:
1071 		/*
1072 		 * return the mbuf to the mbuf cache.
1073 		 */
1074 		if (m->m_flags & M_PHCACHE) {
1075 			m->m_data = m->m_pktdat;
1076 			objcache_put(mbufphdr_cache, m);
1077 		} else {
1078 			m->m_data = m->m_dat;
1079 			objcache_put(mbuf_cache, m);
1080 		}
1081 		atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mbufs, 1);
1082 		break;
1083 	default:
1084 		if (!panicstr)
1085 			panic("bad mbuf flags %p %08x\n", m, m->m_flags);
1086 		break;
1087 	}
1088 	return (n);
1089 }
1090 
1091 #ifdef MBUF_DEBUG
1092 
1093 void
1094 _m_freem(struct mbuf *m, const char *func)
1095 {
1096 	while (m)
1097 		m = _m_free(m, func);
1098 }
1099 
1100 #else
1101 
1102 void
1103 m_freem(struct mbuf *m)
1104 {
1105 	while (m)
1106 		m = m_free(m);
1107 }
1108 
1109 #endif
1110 
1111 /*
1112  * mbuf utility routines
1113  */
1114 
1115 /*
1116  * Lesser-used path for M_PREPEND: allocate new mbuf to prepend to chain and
1117  * copy junk along.
1118  */
1119 struct mbuf *
1120 m_prepend(struct mbuf *m, int len, int how)
1121 {
1122 	struct mbuf *mn;
1123 
1124 	if (m->m_flags & M_PKTHDR)
1125 	    mn = m_gethdr(how, m->m_type);
1126 	else
1127 	    mn = m_get(how, m->m_type);
1128 	if (mn == NULL) {
1129 		m_freem(m);
1130 		return (NULL);
1131 	}
1132 	if (m->m_flags & M_PKTHDR)
1133 		M_MOVE_PKTHDR(mn, m);
1134 	mn->m_next = m;
1135 	m = mn;
1136 	if (len < MHLEN)
1137 		MH_ALIGN(m, len);
1138 	m->m_len = len;
1139 	return (m);
1140 }
1141 
1142 /*
1143  * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
1144  * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
1145  * The wait parameter is a choice of MB_WAIT/MB_DONTWAIT from caller.
1146  * Note that the copy is read-only, because clusters are not copied,
1147  * only their reference counts are incremented.
1148  */
1149 struct mbuf *
1150 m_copym(const struct mbuf *m, int off0, int len, int wait)
1151 {
1152 	struct mbuf *n, **np;
1153 	int off = off0;
1154 	struct mbuf *top;
1155 	int copyhdr = 0;
1156 
1157 	KASSERT(off >= 0, ("m_copym, negative off %d", off));
1158 	KASSERT(len >= 0, ("m_copym, negative len %d", len));
1159 	if (off == 0 && (m->m_flags & M_PKTHDR))
1160 		copyhdr = 1;
1161 	while (off > 0) {
1162 		KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
1163 		if (off < m->m_len)
1164 			break;
1165 		off -= m->m_len;
1166 		m = m->m_next;
1167 	}
1168 	np = &top;
1169 	top = NULL;
1170 	while (len > 0) {
1171 		if (m == NULL) {
1172 			KASSERT(len == M_COPYALL,
1173 			    ("m_copym, length > size of mbuf chain"));
1174 			break;
1175 		}
1176 		/*
1177 		 * Because we are sharing any cluster attachment below,
1178 		 * be sure to get an mbuf that does not have a cluster
1179 		 * associated with it.
1180 		 */
1181 		if (copyhdr)
1182 			n = m_gethdr(wait, m->m_type);
1183 		else
1184 			n = m_get(wait, m->m_type);
1185 		*np = n;
1186 		if (n == NULL)
1187 			goto nospace;
1188 		if (copyhdr) {
1189 			if (!m_dup_pkthdr(n, m, wait))
1190 				goto nospace;
1191 			if (len == M_COPYALL)
1192 				n->m_pkthdr.len -= off0;
1193 			else
1194 				n->m_pkthdr.len = len;
1195 			copyhdr = 0;
1196 		}
1197 		n->m_len = min(len, m->m_len - off);
1198 		if (m->m_flags & M_EXT) {
1199 			KKASSERT((n->m_flags & M_EXT) == 0);
1200 			n->m_data = m->m_data + off;
1201 			m->m_ext.ext_ref(m->m_ext.ext_arg);
1202 			n->m_ext = m->m_ext;
1203 			n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1204 		} else {
1205 			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
1206 			    (unsigned)n->m_len);
1207 		}
1208 		if (len != M_COPYALL)
1209 			len -= n->m_len;
1210 		off = 0;
1211 		m = m->m_next;
1212 		np = &n->m_next;
1213 	}
1214 	if (top == NULL)
1215 		atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
1216 	return (top);
1217 nospace:
1218 	m_freem(top);
1219 	atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
1220 	return (NULL);
1221 }
1222 
1223 /*
1224  * Copy an entire packet, including header (which must be present).
1225  * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
1226  * Note that the copy is read-only, because clusters are not copied,
1227  * only their reference counts are incremented.
1228  * Preserve alignment of the first mbuf so if the creator has left
1229  * some room at the beginning (e.g. for inserting protocol headers)
1230  * the copies also have the room available.
1231  */
1232 struct mbuf *
1233 m_copypacket(struct mbuf *m, int how)
1234 {
1235 	struct mbuf *top, *n, *o;
1236 
1237 	n = m_gethdr(how, m->m_type);
1238 	top = n;
1239 	if (!n)
1240 		goto nospace;
1241 
1242 	if (!m_dup_pkthdr(n, m, how))
1243 		goto nospace;
1244 	n->m_len = m->m_len;
1245 	if (m->m_flags & M_EXT) {
1246 		KKASSERT((n->m_flags & M_EXT) == 0);
1247 		n->m_data = m->m_data;
1248 		m->m_ext.ext_ref(m->m_ext.ext_arg);
1249 		n->m_ext = m->m_ext;
1250 		n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1251 	} else {
1252 		n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
1253 		bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1254 	}
1255 
1256 	m = m->m_next;
1257 	while (m) {
1258 		o = m_get(how, m->m_type);
1259 		if (!o)
1260 			goto nospace;
1261 
1262 		n->m_next = o;
1263 		n = n->m_next;
1264 
1265 		n->m_len = m->m_len;
1266 		if (m->m_flags & M_EXT) {
1267 			KKASSERT((n->m_flags & M_EXT) == 0);
1268 			n->m_data = m->m_data;
1269 			m->m_ext.ext_ref(m->m_ext.ext_arg);
1270 			n->m_ext = m->m_ext;
1271 			n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1272 		} else {
1273 			bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1274 		}
1275 
1276 		m = m->m_next;
1277 	}
1278 	return top;
1279 nospace:
1280 	m_freem(top);
1281 	atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
1282 	return (NULL);
1283 }
1284 
1285 /*
1286  * Copy data from an mbuf chain starting "off" bytes from the beginning,
1287  * continuing for "len" bytes, into the indicated buffer.
1288  */
1289 void
1290 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
1291 {
1292 	unsigned count;
1293 
1294 	KASSERT(off >= 0, ("m_copydata, negative off %d", off));
1295 	KASSERT(len >= 0, ("m_copydata, negative len %d", len));
1296 	while (off > 0) {
1297 		KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
1298 		if (off < m->m_len)
1299 			break;
1300 		off -= m->m_len;
1301 		m = m->m_next;
1302 	}
1303 	while (len > 0) {
1304 		KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
1305 		count = min(m->m_len - off, len);
1306 		bcopy(mtod(m, caddr_t) + off, cp, count);
1307 		len -= count;
1308 		cp += count;
1309 		off = 0;
1310 		m = m->m_next;
1311 	}
1312 }
1313 
1314 /*
1315  * Copy a packet header mbuf chain into a completely new chain, including
1316  * copying any mbuf clusters.  Use this instead of m_copypacket() when
1317  * you need a writable copy of an mbuf chain.
1318  */
1319 struct mbuf *
1320 m_dup(struct mbuf *m, int how)
1321 {
1322 	struct mbuf **p, *top = NULL;
1323 	int remain, moff, nsize;
1324 
1325 	/* Sanity check */
1326 	if (m == NULL)
1327 		return (NULL);
1328 	KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __func__));
1329 
1330 	/* While there's more data, get a new mbuf, tack it on, and fill it */
1331 	remain = m->m_pkthdr.len;
1332 	moff = 0;
1333 	p = &top;
1334 	while (remain > 0 || top == NULL) {	/* allow m->m_pkthdr.len == 0 */
1335 		struct mbuf *n;
1336 
1337 		/* Get the next new mbuf */
1338 		n = m_getl(remain, how, m->m_type, top == NULL ? M_PKTHDR : 0,
1339 			   &nsize);
1340 		if (n == NULL)
1341 			goto nospace;
1342 		if (top == NULL)
1343 			if (!m_dup_pkthdr(n, m, how))
1344 				goto nospace0;
1345 
1346 		/* Link it into the new chain */
1347 		*p = n;
1348 		p = &n->m_next;
1349 
1350 		/* Copy data from original mbuf(s) into new mbuf */
1351 		n->m_len = 0;
1352 		while (n->m_len < nsize && m != NULL) {
1353 			int chunk = min(nsize - n->m_len, m->m_len - moff);
1354 
1355 			bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1356 			moff += chunk;
1357 			n->m_len += chunk;
1358 			remain -= chunk;
1359 			if (moff == m->m_len) {
1360 				m = m->m_next;
1361 				moff = 0;
1362 			}
1363 		}
1364 
1365 		/* Check correct total mbuf length */
1366 		KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
1367 			("%s: bogus m_pkthdr.len", __func__));
1368 	}
1369 	return (top);
1370 
1371 nospace:
1372 	m_freem(top);
1373 nospace0:
1374 	atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
1375 	return (NULL);
1376 }
1377 
1378 /*
1379  * Copy the non-packet mbuf data chain into a new set of mbufs, including
1380  * copying any mbuf clusters.  This is typically used to realign a data
1381  * chain by nfs_realign().
1382  *
1383  * The original chain is left intact.  how should be MB_WAIT or MB_DONTWAIT
1384  * and NULL can be returned if MB_DONTWAIT is passed.
1385  *
1386  * Be careful to use cluster mbufs, a large mbuf chain converted to non
1387  * cluster mbufs can exhaust our supply of mbufs.
1388  */
1389 struct mbuf *
1390 m_dup_data(struct mbuf *m, int how)
1391 {
1392 	struct mbuf **p, *n, *top = NULL;
1393 	int mlen, moff, chunk, gsize, nsize;
1394 
1395 	/*
1396 	 * Degenerate case
1397 	 */
1398 	if (m == NULL)
1399 		return (NULL);
1400 
1401 	/*
1402 	 * Optimize the mbuf allocation but do not get too carried away.
1403 	 */
1404 	if (m->m_next || m->m_len > MLEN)
1405 		gsize = MCLBYTES;
1406 	else
1407 		gsize = MLEN;
1408 
1409 	/* Chain control */
1410 	p = &top;
1411 	n = NULL;
1412 	nsize = 0;
1413 
1414 	/*
1415 	 * Scan the mbuf chain until nothing is left, the new mbuf chain
1416 	 * will be allocated on the fly as needed.
1417 	 */
1418 	while (m) {
1419 		mlen = m->m_len;
1420 		moff = 0;
1421 
1422 		while (mlen) {
1423 			KKASSERT(m->m_type == MT_DATA);
1424 			if (n == NULL) {
1425 				n = m_getl(gsize, how, MT_DATA, 0, &nsize);
1426 				n->m_len = 0;
1427 				if (n == NULL)
1428 					goto nospace;
1429 				*p = n;
1430 				p = &n->m_next;
1431 			}
1432 			chunk = imin(mlen, nsize);
1433 			bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1434 			mlen -= chunk;
1435 			moff += chunk;
1436 			n->m_len += chunk;
1437 			nsize -= chunk;
1438 			if (nsize == 0)
1439 				n = NULL;
1440 		}
1441 		m = m->m_next;
1442 	}
1443 	*p = NULL;
1444 	return(top);
1445 nospace:
1446 	*p = NULL;
1447 	m_freem(top);
1448 	atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
1449 	return (NULL);
1450 }
1451 
1452 /*
1453  * Concatenate mbuf chain n to m.
1454  * Both chains must be of the same type (e.g. MT_DATA).
1455  * Any m_pkthdr is not updated.
1456  */
1457 void
1458 m_cat(struct mbuf *m, struct mbuf *n)
1459 {
1460 	m = m_last(m);
1461 	while (n) {
1462 		if (m->m_flags & M_EXT ||
1463 		    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
1464 			/* just join the two chains */
1465 			m->m_next = n;
1466 			return;
1467 		}
1468 		/* splat the data from one into the other */
1469 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1470 		    (u_int)n->m_len);
1471 		m->m_len += n->m_len;
1472 		n = m_free(n);
1473 	}
1474 }
1475 
1476 void
1477 m_adj(struct mbuf *mp, int req_len)
1478 {
1479 	int len = req_len;
1480 	struct mbuf *m;
1481 	int count;
1482 
1483 	if ((m = mp) == NULL)
1484 		return;
1485 	if (len >= 0) {
1486 		/*
1487 		 * Trim from head.
1488 		 */
1489 		while (m != NULL && len > 0) {
1490 			if (m->m_len <= len) {
1491 				len -= m->m_len;
1492 				m->m_len = 0;
1493 				m = m->m_next;
1494 			} else {
1495 				m->m_len -= len;
1496 				m->m_data += len;
1497 				len = 0;
1498 			}
1499 		}
1500 		m = mp;
1501 		if (mp->m_flags & M_PKTHDR)
1502 			m->m_pkthdr.len -= (req_len - len);
1503 	} else {
1504 		/*
1505 		 * Trim from tail.  Scan the mbuf chain,
1506 		 * calculating its length and finding the last mbuf.
1507 		 * If the adjustment only affects this mbuf, then just
1508 		 * adjust and return.  Otherwise, rescan and truncate
1509 		 * after the remaining size.
1510 		 */
1511 		len = -len;
1512 		count = 0;
1513 		for (;;) {
1514 			count += m->m_len;
1515 			if (m->m_next == NULL)
1516 				break;
1517 			m = m->m_next;
1518 		}
1519 		if (m->m_len >= len) {
1520 			m->m_len -= len;
1521 			if (mp->m_flags & M_PKTHDR)
1522 				mp->m_pkthdr.len -= len;
1523 			return;
1524 		}
1525 		count -= len;
1526 		if (count < 0)
1527 			count = 0;
1528 		/*
1529 		 * Correct length for chain is "count".
1530 		 * Find the mbuf with last data, adjust its length,
1531 		 * and toss data from remaining mbufs on chain.
1532 		 */
1533 		m = mp;
1534 		if (m->m_flags & M_PKTHDR)
1535 			m->m_pkthdr.len = count;
1536 		for (; m; m = m->m_next) {
1537 			if (m->m_len >= count) {
1538 				m->m_len = count;
1539 				break;
1540 			}
1541 			count -= m->m_len;
1542 		}
1543 		while (m->m_next)
1544 			(m = m->m_next) ->m_len = 0;
1545 	}
1546 }
1547 
1548 /*
1549  * Set the m_data pointer of a newly-allocated mbuf
1550  * to place an object of the specified size at the
1551  * end of the mbuf, longword aligned.
1552  */
1553 void
1554 m_align(struct mbuf *m, int len)
1555 {
1556 	int adjust;
1557 
1558 	if (m->m_flags & M_EXT)
1559 		adjust = m->m_ext.ext_size - len;
1560 	else if (m->m_flags & M_PKTHDR)
1561 		adjust = MHLEN - len;
1562 	else
1563 		adjust = MLEN - len;
1564 	m->m_data += adjust &~ (sizeof(long)-1);
1565 }
1566 
1567 /*
1568  * Rearrange an mbuf chain so that len bytes are contiguous
1569  * and in the data area of an mbuf (so that mtod will work for a structure
1570  * of size len).  Returns the resulting mbuf chain on success, frees it and
1571  * returns null on failure.  If there is room, it will add up to
1572  * max_protohdr-len extra bytes to the contiguous region in an attempt to
1573  * avoid being called next time.
1574  */
1575 struct mbuf *
1576 m_pullup(struct mbuf *n, int len)
1577 {
1578 	struct mbuf *m;
1579 	int count;
1580 	int space;
1581 
1582 	/*
1583 	 * If first mbuf has no cluster, and has room for len bytes
1584 	 * without shifting current data, pullup into it,
1585 	 * otherwise allocate a new mbuf to prepend to the chain.
1586 	 */
1587 	if (!(n->m_flags & M_EXT) &&
1588 	    n->m_data + len < &n->m_dat[MLEN] &&
1589 	    n->m_next) {
1590 		if (n->m_len >= len)
1591 			return (n);
1592 		m = n;
1593 		n = n->m_next;
1594 		len -= m->m_len;
1595 	} else {
1596 		if (len > MHLEN)
1597 			goto bad;
1598 		if (n->m_flags & M_PKTHDR)
1599 			m = m_gethdr(MB_DONTWAIT, n->m_type);
1600 		else
1601 			m = m_get(MB_DONTWAIT, n->m_type);
1602 		if (m == NULL)
1603 			goto bad;
1604 		m->m_len = 0;
1605 		if (n->m_flags & M_PKTHDR)
1606 			M_MOVE_PKTHDR(m, n);
1607 	}
1608 	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1609 	do {
1610 		count = min(min(max(len, max_protohdr), space), n->m_len);
1611 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1612 		  (unsigned)count);
1613 		len -= count;
1614 		m->m_len += count;
1615 		n->m_len -= count;
1616 		space -= count;
1617 		if (n->m_len)
1618 			n->m_data += count;
1619 		else
1620 			n = m_free(n);
1621 	} while (len > 0 && n);
1622 	if (len > 0) {
1623 		m_free(m);
1624 		goto bad;
1625 	}
1626 	m->m_next = n;
1627 	return (m);
1628 bad:
1629 	m_freem(n);
1630 	atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
1631 	return (NULL);
1632 }
1633 
1634 /*
1635  * Partition an mbuf chain in two pieces, returning the tail --
1636  * all but the first len0 bytes.  In case of failure, it returns NULL and
1637  * attempts to restore the chain to its original state.
1638  *
1639  * Note that the resulting mbufs might be read-only, because the new
1640  * mbuf can end up sharing an mbuf cluster with the original mbuf if
1641  * the "breaking point" happens to lie within a cluster mbuf. Use the
1642  * M_WRITABLE() macro to check for this case.
1643  */
1644 struct mbuf *
1645 m_split(struct mbuf *m0, int len0, int wait)
1646 {
1647 	struct mbuf *m, *n;
1648 	unsigned len = len0, remain;
1649 
1650 	for (m = m0; m && len > m->m_len; m = m->m_next)
1651 		len -= m->m_len;
1652 	if (m == NULL)
1653 		return (NULL);
1654 	remain = m->m_len - len;
1655 	if (m0->m_flags & M_PKTHDR) {
1656 		n = m_gethdr(wait, m0->m_type);
1657 		if (n == NULL)
1658 			return (NULL);
1659 		n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1660 		n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1661 		m0->m_pkthdr.len = len0;
1662 		if (m->m_flags & M_EXT)
1663 			goto extpacket;
1664 		if (remain > MHLEN) {
1665 			/* m can't be the lead packet */
1666 			MH_ALIGN(n, 0);
1667 			n->m_next = m_split(m, len, wait);
1668 			if (n->m_next == NULL) {
1669 				m_free(n);
1670 				return (NULL);
1671 			} else {
1672 				n->m_len = 0;
1673 				return (n);
1674 			}
1675 		} else
1676 			MH_ALIGN(n, remain);
1677 	} else if (remain == 0) {
1678 		n = m->m_next;
1679 		m->m_next = 0;
1680 		return (n);
1681 	} else {
1682 		n = m_get(wait, m->m_type);
1683 		if (n == NULL)
1684 			return (NULL);
1685 		M_ALIGN(n, remain);
1686 	}
1687 extpacket:
1688 	if (m->m_flags & M_EXT) {
1689 		KKASSERT((n->m_flags & M_EXT) == 0);
1690 		n->m_data = m->m_data + len;
1691 		m->m_ext.ext_ref(m->m_ext.ext_arg);
1692 		n->m_ext = m->m_ext;
1693 		n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1694 	} else {
1695 		bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1696 	}
1697 	n->m_len = remain;
1698 	m->m_len = len;
1699 	n->m_next = m->m_next;
1700 	m->m_next = 0;
1701 	return (n);
1702 }
1703 
1704 /*
1705  * Routine to copy from device local memory into mbufs.
1706  * Note: "offset" is ill-defined and always called as 0, so ignore it.
1707  */
1708 struct mbuf *
1709 m_devget(char *buf, int len, int offset, struct ifnet *ifp,
1710     void (*copy)(volatile const void *from, volatile void *to, size_t length))
1711 {
1712 	struct mbuf *m, *mfirst = NULL, **mtail;
1713 	int nsize, flags;
1714 
1715 	if (copy == NULL)
1716 		copy = bcopy;
1717 	mtail = &mfirst;
1718 	flags = M_PKTHDR;
1719 
1720 	while (len > 0) {
1721 		m = m_getl(len, MB_DONTWAIT, MT_DATA, flags, &nsize);
1722 		if (m == NULL) {
1723 			m_freem(mfirst);
1724 			return (NULL);
1725 		}
1726 		m->m_len = min(len, nsize);
1727 
1728 		if (flags & M_PKTHDR) {
1729 			if (len + max_linkhdr <= nsize)
1730 				m->m_data += max_linkhdr;
1731 			m->m_pkthdr.rcvif = ifp;
1732 			m->m_pkthdr.len = len;
1733 			flags = 0;
1734 		}
1735 
1736 		copy(buf, m->m_data, (unsigned)m->m_len);
1737 		buf += m->m_len;
1738 		len -= m->m_len;
1739 		*mtail = m;
1740 		mtail = &m->m_next;
1741 	}
1742 
1743 	return (mfirst);
1744 }
1745 
1746 /*
1747  * Routine to pad mbuf to the specified length 'padto'.
1748  */
1749 int
1750 m_devpad(struct mbuf *m, int padto)
1751 {
1752 	struct mbuf *last = NULL;
1753 	int padlen;
1754 
1755 	if (padto <= m->m_pkthdr.len)
1756 		return 0;
1757 
1758 	padlen = padto - m->m_pkthdr.len;
1759 
1760 	/* if there's only the packet-header and we can pad there, use it. */
1761 	if (m->m_pkthdr.len == m->m_len && M_TRAILINGSPACE(m) >= padlen) {
1762 		last = m;
1763 	} else {
1764 		/*
1765 		 * Walk packet chain to find last mbuf. We will either
1766 		 * pad there, or append a new mbuf and pad it
1767 		 */
1768 		for (last = m; last->m_next != NULL; last = last->m_next)
1769 			; /* EMPTY */
1770 
1771 		/* `last' now points to last in chain. */
1772 		if (M_TRAILINGSPACE(last) < padlen) {
1773 			struct mbuf *n;
1774 
1775 			/* Allocate new empty mbuf, pad it.  Compact later. */
1776 			MGET(n, MB_DONTWAIT, MT_DATA);
1777 			if (n == NULL)
1778 				return ENOBUFS;
1779 			n->m_len = 0;
1780 			last->m_next = n;
1781 			last = n;
1782 		}
1783 	}
1784 	KKASSERT(M_TRAILINGSPACE(last) >= padlen);
1785 	KKASSERT(M_WRITABLE(last));
1786 
1787 	/* Now zero the pad area */
1788 	bzero(mtod(last, char *) + last->m_len, padlen);
1789 	last->m_len += padlen;
1790 	m->m_pkthdr.len += padlen;
1791 	return 0;
1792 }
1793 
1794 /*
1795  * Copy data from a buffer back into the indicated mbuf chain,
1796  * starting "off" bytes from the beginning, extending the mbuf
1797  * chain if necessary.
1798  */
1799 void
1800 m_copyback(struct mbuf *m0, int off, int len, caddr_t cp)
1801 {
1802 	int mlen;
1803 	struct mbuf *m = m0, *n;
1804 	int totlen = 0;
1805 
1806 	if (m0 == NULL)
1807 		return;
1808 	while (off > (mlen = m->m_len)) {
1809 		off -= mlen;
1810 		totlen += mlen;
1811 		if (m->m_next == NULL) {
1812 			n = m_getclr(MB_DONTWAIT, m->m_type);
1813 			if (n == NULL)
1814 				goto out;
1815 			n->m_len = min(MLEN, len + off);
1816 			m->m_next = n;
1817 		}
1818 		m = m->m_next;
1819 	}
1820 	while (len > 0) {
1821 		mlen = min (m->m_len - off, len);
1822 		bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
1823 		cp += mlen;
1824 		len -= mlen;
1825 		mlen += off;
1826 		off = 0;
1827 		totlen += mlen;
1828 		if (len == 0)
1829 			break;
1830 		if (m->m_next == NULL) {
1831 			n = m_get(MB_DONTWAIT, m->m_type);
1832 			if (n == NULL)
1833 				break;
1834 			n->m_len = min(MLEN, len);
1835 			m->m_next = n;
1836 		}
1837 		m = m->m_next;
1838 	}
1839 out:	if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1840 		m->m_pkthdr.len = totlen;
1841 }
1842 
1843 /*
1844  * Append the specified data to the indicated mbuf chain,
1845  * Extend the mbuf chain if the new data does not fit in
1846  * existing space.
1847  *
1848  * Return 1 if able to complete the job; otherwise 0.
1849  */
1850 int
1851 m_append(struct mbuf *m0, int len, c_caddr_t cp)
1852 {
1853 	struct mbuf *m, *n;
1854 	int remainder, space;
1855 
1856 	for (m = m0; m->m_next != NULL; m = m->m_next)
1857 		;
1858 	remainder = len;
1859 	space = M_TRAILINGSPACE(m);
1860 	if (space > 0) {
1861 		/*
1862 		 * Copy into available space.
1863 		 */
1864 		if (space > remainder)
1865 			space = remainder;
1866 		bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
1867 		m->m_len += space;
1868 		cp += space, remainder -= space;
1869 	}
1870 	while (remainder > 0) {
1871 		/*
1872 		 * Allocate a new mbuf; could check space
1873 		 * and allocate a cluster instead.
1874 		 */
1875 		n = m_get(MB_DONTWAIT, m->m_type);
1876 		if (n == NULL)
1877 			break;
1878 		n->m_len = min(MLEN, remainder);
1879 		bcopy(cp, mtod(n, caddr_t), n->m_len);
1880 		cp += n->m_len, remainder -= n->m_len;
1881 		m->m_next = n;
1882 		m = n;
1883 	}
1884 	if (m0->m_flags & M_PKTHDR)
1885 		m0->m_pkthdr.len += len - remainder;
1886 	return (remainder == 0);
1887 }
1888 
1889 /*
1890  * Apply function f to the data in an mbuf chain starting "off" bytes from
1891  * the beginning, continuing for "len" bytes.
1892  */
1893 int
1894 m_apply(struct mbuf *m, int off, int len,
1895     int (*f)(void *, void *, u_int), void *arg)
1896 {
1897 	u_int count;
1898 	int rval;
1899 
1900 	KASSERT(off >= 0, ("m_apply, negative off %d", off));
1901 	KASSERT(len >= 0, ("m_apply, negative len %d", len));
1902 	while (off > 0) {
1903 		KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1904 		if (off < m->m_len)
1905 			break;
1906 		off -= m->m_len;
1907 		m = m->m_next;
1908 	}
1909 	while (len > 0) {
1910 		KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1911 		count = min(m->m_len - off, len);
1912 		rval = (*f)(arg, mtod(m, caddr_t) + off, count);
1913 		if (rval)
1914 			return (rval);
1915 		len -= count;
1916 		off = 0;
1917 		m = m->m_next;
1918 	}
1919 	return (0);
1920 }
1921 
1922 /*
1923  * Return a pointer to mbuf/offset of location in mbuf chain.
1924  */
1925 struct mbuf *
1926 m_getptr(struct mbuf *m, int loc, int *off)
1927 {
1928 
1929 	while (loc >= 0) {
1930 		/* Normal end of search. */
1931 		if (m->m_len > loc) {
1932 			*off = loc;
1933 			return (m);
1934 		} else {
1935 			loc -= m->m_len;
1936 			if (m->m_next == NULL) {
1937 				if (loc == 0) {
1938 					/* Point at the end of valid data. */
1939 					*off = m->m_len;
1940 					return (m);
1941 				}
1942 				return (NULL);
1943 			}
1944 			m = m->m_next;
1945 		}
1946 	}
1947 	return (NULL);
1948 }
1949 
1950 void
1951 m_print(const struct mbuf *m)
1952 {
1953 	int len;
1954 	const struct mbuf *m2;
1955 
1956 	len = m->m_pkthdr.len;
1957 	m2 = m;
1958 	while (len) {
1959 		kprintf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-");
1960 		len -= m2->m_len;
1961 		m2 = m2->m_next;
1962 	}
1963 	return;
1964 }
1965 
1966 /*
1967  * "Move" mbuf pkthdr from "from" to "to".
1968  * "from" must have M_PKTHDR set, and "to" must be empty.
1969  */
1970 void
1971 m_move_pkthdr(struct mbuf *to, struct mbuf *from)
1972 {
1973 	KASSERT((to->m_flags & M_PKTHDR), ("m_move_pkthdr: not packet header"));
1974 
1975 	to->m_flags |= from->m_flags & M_COPYFLAGS;
1976 	to->m_pkthdr = from->m_pkthdr;		/* especially tags */
1977 	SLIST_INIT(&from->m_pkthdr.tags);	/* purge tags from src */
1978 }
1979 
1980 /*
1981  * Duplicate "from"'s mbuf pkthdr in "to".
1982  * "from" must have M_PKTHDR set, and "to" must be empty.
1983  * In particular, this does a deep copy of the packet tags.
1984  */
1985 int
1986 m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how)
1987 {
1988 	KASSERT((to->m_flags & M_PKTHDR), ("m_dup_pkthdr: not packet header"));
1989 
1990 	to->m_flags = (from->m_flags & M_COPYFLAGS) |
1991 		      (to->m_flags & ~M_COPYFLAGS);
1992 	to->m_pkthdr = from->m_pkthdr;
1993 	SLIST_INIT(&to->m_pkthdr.tags);
1994 	return (m_tag_copy_chain(to, from, how));
1995 }
1996 
1997 /*
1998  * Defragment a mbuf chain, returning the shortest possible
1999  * chain of mbufs and clusters.  If allocation fails and
2000  * this cannot be completed, NULL will be returned, but
2001  * the passed in chain will be unchanged.  Upon success,
2002  * the original chain will be freed, and the new chain
2003  * will be returned.
2004  *
2005  * If a non-packet header is passed in, the original
2006  * mbuf (chain?) will be returned unharmed.
2007  *
2008  * m_defrag_nofree doesn't free the passed in mbuf.
2009  */
2010 struct mbuf *
2011 m_defrag(struct mbuf *m0, int how)
2012 {
2013 	struct mbuf *m_new;
2014 
2015 	if ((m_new = m_defrag_nofree(m0, how)) == NULL)
2016 		return (NULL);
2017 	if (m_new != m0)
2018 		m_freem(m0);
2019 	return (m_new);
2020 }
2021 
2022 struct mbuf *
2023 m_defrag_nofree(struct mbuf *m0, int how)
2024 {
2025 	struct mbuf	*m_new = NULL, *m_final = NULL;
2026 	int		progress = 0, length, nsize;
2027 
2028 	if (!(m0->m_flags & M_PKTHDR))
2029 		return (m0);
2030 
2031 #ifdef MBUF_STRESS_TEST
2032 	if (m_defragrandomfailures) {
2033 		int temp = karc4random() & 0xff;
2034 		if (temp == 0xba)
2035 			goto nospace;
2036 	}
2037 #endif
2038 
2039 	m_final = m_getl(m0->m_pkthdr.len, how, MT_DATA, M_PKTHDR, &nsize);
2040 	if (m_final == NULL)
2041 		goto nospace;
2042 	m_final->m_len = 0;	/* in case m0->m_pkthdr.len is zero */
2043 
2044 	if (m_dup_pkthdr(m_final, m0, how) == 0)
2045 		goto nospace;
2046 
2047 	m_new = m_final;
2048 
2049 	while (progress < m0->m_pkthdr.len) {
2050 		length = m0->m_pkthdr.len - progress;
2051 		if (length > MCLBYTES)
2052 			length = MCLBYTES;
2053 
2054 		if (m_new == NULL) {
2055 			m_new = m_getl(length, how, MT_DATA, 0, &nsize);
2056 			if (m_new == NULL)
2057 				goto nospace;
2058 		}
2059 
2060 		m_copydata(m0, progress, length, mtod(m_new, caddr_t));
2061 		progress += length;
2062 		m_new->m_len = length;
2063 		if (m_new != m_final)
2064 			m_cat(m_final, m_new);
2065 		m_new = NULL;
2066 	}
2067 	if (m0->m_next == NULL)
2068 		m_defraguseless++;
2069 	m_defragpackets++;
2070 	m_defragbytes += m_final->m_pkthdr.len;
2071 	return (m_final);
2072 nospace:
2073 	m_defragfailure++;
2074 	if (m_new)
2075 		m_free(m_new);
2076 	m_freem(m_final);
2077 	return (NULL);
2078 }
2079 
2080 /*
2081  * Move data from uio into mbufs.
2082  */
2083 struct mbuf *
2084 m_uiomove(struct uio *uio)
2085 {
2086 	struct mbuf *m;			/* current working mbuf */
2087 	struct mbuf *head = NULL;	/* result mbuf chain */
2088 	struct mbuf **mp = &head;
2089 	int flags = M_PKTHDR;
2090 	int nsize;
2091 	int error;
2092 	int resid;
2093 
2094 	do {
2095 		if (uio->uio_resid > INT_MAX)
2096 			resid = INT_MAX;
2097 		else
2098 			resid = (int)uio->uio_resid;
2099 		m = m_getl(resid, MB_WAIT, MT_DATA, flags, &nsize);
2100 		if (flags) {
2101 			m->m_pkthdr.len = 0;
2102 			/* Leave room for protocol headers. */
2103 			if (resid < MHLEN)
2104 				MH_ALIGN(m, resid);
2105 			flags = 0;
2106 		}
2107 		m->m_len = imin(nsize, resid);
2108 		error = uiomove(mtod(m, caddr_t), m->m_len, uio);
2109 		if (error) {
2110 			m_free(m);
2111 			goto failed;
2112 		}
2113 		*mp = m;
2114 		mp = &m->m_next;
2115 		head->m_pkthdr.len += m->m_len;
2116 	} while (uio->uio_resid > 0);
2117 
2118 	return (head);
2119 
2120 failed:
2121 	m_freem(head);
2122 	return (NULL);
2123 }
2124 
2125 struct mbuf *
2126 m_last(struct mbuf *m)
2127 {
2128 	while (m->m_next)
2129 		m = m->m_next;
2130 	return (m);
2131 }
2132 
2133 /*
2134  * Return the number of bytes in an mbuf chain.
2135  * If lastm is not NULL, also return the last mbuf.
2136  */
2137 u_int
2138 m_lengthm(struct mbuf *m, struct mbuf **lastm)
2139 {
2140 	u_int len = 0;
2141 	struct mbuf *prev = m;
2142 
2143 	while (m) {
2144 		len += m->m_len;
2145 		prev = m;
2146 		m = m->m_next;
2147 	}
2148 	if (lastm != NULL)
2149 		*lastm = prev;
2150 	return (len);
2151 }
2152 
2153 /*
2154  * Like m_lengthm(), except also keep track of mbuf usage.
2155  */
2156 u_int
2157 m_countm(struct mbuf *m, struct mbuf **lastm, u_int *pmbcnt)
2158 {
2159 	u_int len = 0, mbcnt = 0;
2160 	struct mbuf *prev = m;
2161 
2162 	while (m) {
2163 		len += m->m_len;
2164 		mbcnt += MSIZE;
2165 		if (m->m_flags & M_EXT)
2166 			mbcnt += m->m_ext.ext_size;
2167 		prev = m;
2168 		m = m->m_next;
2169 	}
2170 	if (lastm != NULL)
2171 		*lastm = prev;
2172 	*pmbcnt = mbcnt;
2173 	return (len);
2174 }
2175