xref: /dragonfly/sys/kern/uipc_mbuf.c (revision 678e8cc6)
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 2004 Jeffrey M. Hsu.  All rights reserved.
5  * Copyright (c) 2004 The DragonFly Project.  All rights reserved.
6  *
7  * This code is derived from software contributed to The DragonFly Project
8  * by Jeffrey M. Hsu.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 /*
37  * Copyright (c) 1982, 1986, 1988, 1991, 1993
38  *	The Regents of the University of California.  All rights reserved.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  * 1. Redistributions of source code must retain the above copyright
44  *    notice, this list of conditions and the following disclaimer.
45  * 2. Redistributions in binary form must reproduce the above copyright
46  *    notice, this list of conditions and the following disclaimer in the
47  *    documentation and/or other materials provided with the distribution.
48  * 3. All advertising materials mentioning features or use of this software
49  *    must display the following acknowledgement:
50  *	This product includes software developed by the University of
51  *	California, Berkeley and its contributors.
52  * 4. Neither the name of the University nor the names of its contributors
53  *    may be used to endorse or promote products derived from this software
54  *    without specific prior written permission.
55  *
56  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66  * SUCH DAMAGE.
67  *
68  * @(#)uipc_mbuf.c	8.2 (Berkeley) 1/4/94
69  * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $
70  */
71 
72 #include "opt_param.h"
73 #include "opt_mbuf_stress_test.h"
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/file.h>
77 #include <sys/malloc.h>
78 #include <sys/mbuf.h>
79 #include <sys/kernel.h>
80 #include <sys/sysctl.h>
81 #include <sys/domain.h>
82 #include <sys/objcache.h>
83 #include <sys/tree.h>
84 #include <sys/protosw.h>
85 #include <sys/uio.h>
86 #include <sys/thread.h>
87 #include <sys/globaldata.h>
88 
89 #include <sys/thread2.h>
90 #include <sys/spinlock2.h>
91 
92 #include <machine/atomic.h>
93 #include <machine/limits.h>
94 
95 #include <vm/vm.h>
96 #include <vm/vm_kern.h>
97 #include <vm/vm_extern.h>
98 
99 #ifdef INVARIANTS
100 #include <machine/cpu.h>
101 #endif
102 
103 /*
104  * mbuf cluster meta-data
105  */
106 struct mbcluster {
107 	int32_t	mcl_refs;
108 	void	*mcl_data;
109 };
110 
111 /*
112  * mbuf tracking for debugging purposes
113  */
114 #ifdef MBUF_DEBUG
115 
116 static MALLOC_DEFINE(M_MTRACK, "mtrack", "mtrack");
117 
118 struct mbctrack;
119 RB_HEAD(mbuf_rb_tree, mbtrack);
120 RB_PROTOTYPE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *);
121 
122 struct mbtrack {
123 	RB_ENTRY(mbtrack) rb_node;
124 	int trackid;
125 	struct mbuf *m;
126 };
127 
128 static int
129 mbtrack_cmp(struct mbtrack *mb1, struct mbtrack *mb2)
130 {
131 	if (mb1->m < mb2->m)
132 		return(-1);
133 	if (mb1->m > mb2->m)
134 		return(1);
135 	return(0);
136 }
137 
138 RB_GENERATE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *, m);
139 
140 struct mbuf_rb_tree	mbuf_track_root;
141 static struct spinlock	mbuf_track_spin = SPINLOCK_INITIALIZER(mbuf_track_spin);
142 
143 static void
144 mbuftrack(struct mbuf *m)
145 {
146 	struct mbtrack *mbt;
147 
148 	mbt = kmalloc(sizeof(*mbt), M_MTRACK, M_INTWAIT|M_ZERO);
149 	spin_lock(&mbuf_track_spin);
150 	mbt->m = m;
151 	if (mbuf_rb_tree_RB_INSERT(&mbuf_track_root, mbt)) {
152 		spin_unlock(&mbuf_track_spin);
153 		panic("mbuftrack: mbuf %p already being tracked\n", m);
154 	}
155 	spin_unlock(&mbuf_track_spin);
156 }
157 
158 static void
159 mbufuntrack(struct mbuf *m)
160 {
161 	struct mbtrack *mbt;
162 
163 	spin_lock(&mbuf_track_spin);
164 	mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m);
165 	if (mbt == NULL) {
166 		spin_unlock(&mbuf_track_spin);
167 		panic("mbufuntrack: mbuf %p was not tracked\n", m);
168 	} else {
169 		mbuf_rb_tree_RB_REMOVE(&mbuf_track_root, mbt);
170 		spin_unlock(&mbuf_track_spin);
171 		kfree(mbt, M_MTRACK);
172 	}
173 }
174 
175 void
176 mbuftrackid(struct mbuf *m, int trackid)
177 {
178 	struct mbtrack *mbt;
179 	struct mbuf *n;
180 
181 	spin_lock(&mbuf_track_spin);
182 	while (m) {
183 		n = m->m_nextpkt;
184 		while (m) {
185 			mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m);
186 			if (mbt == NULL) {
187 				spin_unlock(&mbuf_track_spin);
188 				panic("mbuftrackid: mbuf %p not tracked", m);
189 			}
190 			mbt->trackid = trackid;
191 			m = m->m_next;
192 		}
193 		m = n;
194 	}
195 	spin_unlock(&mbuf_track_spin);
196 }
197 
198 static int
199 mbuftrack_callback(struct mbtrack *mbt, void *arg)
200 {
201 	struct sysctl_req *req = arg;
202 	char buf[64];
203 	int error;
204 
205 	ksnprintf(buf, sizeof(buf), "mbuf %p track %d\n", mbt->m, mbt->trackid);
206 
207 	spin_unlock(&mbuf_track_spin);
208 	error = SYSCTL_OUT(req, buf, strlen(buf));
209 	spin_lock(&mbuf_track_spin);
210 	if (error)
211 		return(-error);
212 	return(0);
213 }
214 
215 static int
216 mbuftrack_show(SYSCTL_HANDLER_ARGS)
217 {
218 	int error;
219 
220 	spin_lock(&mbuf_track_spin);
221 	error = mbuf_rb_tree_RB_SCAN(&mbuf_track_root, NULL,
222 				     mbuftrack_callback, req);
223 	spin_unlock(&mbuf_track_spin);
224 	return (-error);
225 }
226 SYSCTL_PROC(_kern_ipc, OID_AUTO, showmbufs, CTLFLAG_RD|CTLTYPE_STRING,
227 	    0, 0, mbuftrack_show, "A", "Show all in-use mbufs");
228 
229 #else
230 
231 #define mbuftrack(m)
232 #define mbufuntrack(m)
233 
234 #endif
235 
236 static void mbinit(void *);
237 SYSINIT(mbuf, SI_BOOT2_MACHDEP, SI_ORDER_FIRST, mbinit, NULL)
238 
239 static u_long	mbtypes[SMP_MAXCPU][MT_NTYPES];
240 
241 static struct mbstat mbstat[SMP_MAXCPU];
242 int	max_linkhdr;
243 int	max_protohdr;
244 int	max_hdr;
245 int	max_datalen;
246 int	m_defragpackets;
247 int	m_defragbytes;
248 int	m_defraguseless;
249 int	m_defragfailure;
250 #ifdef MBUF_STRESS_TEST
251 int	m_defragrandomfailures;
252 #endif
253 
254 struct objcache *mbuf_cache, *mbufphdr_cache;
255 struct objcache *mclmeta_cache, *mjclmeta_cache;
256 struct objcache *mbufcluster_cache, *mbufphdrcluster_cache;
257 struct objcache *mbufjcluster_cache, *mbufphdrjcluster_cache;
258 
259 int	nmbclusters;
260 int	nmbufs;
261 
262 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
263 	&max_linkhdr, 0, "Max size of a link-level header");
264 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
265 	&max_protohdr, 0, "Max size of a protocol header");
266 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0,
267 	"Max size of link+protocol headers");
268 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
269 	&max_datalen, 0, "Max data payload size without headers");
270 SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW,
271 	&mbuf_wait, 0, "Time in ticks to sleep after failed mbuf allocations");
272 static int do_mbstat(SYSCTL_HANDLER_ARGS);
273 
274 SYSCTL_PROC(_kern_ipc, KIPC_MBSTAT, mbstat, CTLTYPE_STRUCT|CTLFLAG_RD,
275 	0, 0, do_mbstat, "S,mbstat", "mbuf usage statistics");
276 
277 static int do_mbtypes(SYSCTL_HANDLER_ARGS);
278 
279 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbtypes, CTLTYPE_ULONG|CTLFLAG_RD,
280 	0, 0, do_mbtypes, "LU", "");
281 
282 static int
283 do_mbstat(SYSCTL_HANDLER_ARGS)
284 {
285 	struct mbstat mbstat_total;
286 	struct mbstat *mbstat_totalp;
287 	int i;
288 
289 	bzero(&mbstat_total, sizeof(mbstat_total));
290 	mbstat_totalp = &mbstat_total;
291 
292 	for (i = 0; i < ncpus; i++)
293 	{
294 		mbstat_total.m_mbufs += mbstat[i].m_mbufs;
295 		mbstat_total.m_clusters += mbstat[i].m_clusters;
296 		mbstat_total.m_spare += mbstat[i].m_spare;
297 		mbstat_total.m_clfree += mbstat[i].m_clfree;
298 		mbstat_total.m_drops += mbstat[i].m_drops;
299 		mbstat_total.m_wait += mbstat[i].m_wait;
300 		mbstat_total.m_drain += mbstat[i].m_drain;
301 		mbstat_total.m_mcfail += mbstat[i].m_mcfail;
302 		mbstat_total.m_mpfail += mbstat[i].m_mpfail;
303 
304 	}
305 	/*
306 	 * The following fields are not cumulative fields so just
307 	 * get their values once.
308 	 */
309 	mbstat_total.m_msize = mbstat[0].m_msize;
310 	mbstat_total.m_mclbytes = mbstat[0].m_mclbytes;
311 	mbstat_total.m_minclsize = mbstat[0].m_minclsize;
312 	mbstat_total.m_mlen = mbstat[0].m_mlen;
313 	mbstat_total.m_mhlen = mbstat[0].m_mhlen;
314 
315 	return(sysctl_handle_opaque(oidp, mbstat_totalp, sizeof(mbstat_total), req));
316 }
317 
318 static int
319 do_mbtypes(SYSCTL_HANDLER_ARGS)
320 {
321 	u_long totals[MT_NTYPES];
322 	int i, j;
323 
324 	for (i = 0; i < MT_NTYPES; i++)
325 		totals[i] = 0;
326 
327 	for (i = 0; i < ncpus; i++)
328 	{
329 		for (j = 0; j < MT_NTYPES; j++)
330 			totals[j] += mbtypes[i][j];
331 	}
332 
333 	return(sysctl_handle_opaque(oidp, totals, sizeof(totals), req));
334 }
335 
336 /*
337  * These are read-only because we do not currently have any code
338  * to adjust the objcache limits after the fact.  The variables
339  * may only be set as boot-time tunables.
340  */
341 SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD,
342 	   &nmbclusters, 0, "Maximum number of mbuf clusters available");
343 SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0,
344 	   "Maximum number of mbufs available");
345 
346 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
347 	   &m_defragpackets, 0, "Number of defragment packets");
348 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
349 	   &m_defragbytes, 0, "Number of defragment bytes");
350 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
351 	   &m_defraguseless, 0, "Number of useless defragment mbuf chain operations");
352 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
353 	   &m_defragfailure, 0, "Number of failed defragment mbuf chain operations");
354 #ifdef MBUF_STRESS_TEST
355 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
356 	   &m_defragrandomfailures, 0, "");
357 #endif
358 
359 static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf");
360 static MALLOC_DEFINE(M_MBUFCL, "mbufcl", "mbufcl");
361 static MALLOC_DEFINE(M_MJBUFCL, "mbufcl", "mbufcl");
362 static MALLOC_DEFINE(M_MCLMETA, "mclmeta", "mclmeta");
363 static MALLOC_DEFINE(M_MJCLMETA, "mjclmeta", "mjclmeta");
364 
365 static void m_reclaim (void);
366 static void m_mclref(void *arg);
367 static void m_mclfree(void *arg);
368 
369 /*
370  * NOTE: Default NMBUFS must take into account a possible DOS attack
371  *	 using fd passing on unix domain sockets.
372  */
373 #ifndef NMBCLUSTERS
374 #define NMBCLUSTERS	(512 + maxusers * 16)
375 #endif
376 #ifndef NMBUFS
377 #define NMBUFS		(nmbclusters * 2 + maxfiles)
378 #endif
379 
380 /*
381  * Perform sanity checks of tunables declared above.
382  */
383 static void
384 tunable_mbinit(void *dummy)
385 {
386 	/*
387 	 * This has to be done before VM init.
388 	 */
389 	nmbclusters = NMBCLUSTERS;
390 	TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
391 	nmbufs = NMBUFS;
392 	TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
393 	/* Sanity checks */
394 	if (nmbufs < nmbclusters * 2)
395 		nmbufs = nmbclusters * 2;
396 }
397 SYSINIT(tunable_mbinit, SI_BOOT1_TUNABLES, SI_ORDER_ANY,
398 	tunable_mbinit, NULL);
399 
400 /* "number of clusters of pages" */
401 #define NCL_INIT	1
402 
403 #define NMB_INIT	16
404 
405 /*
406  * The mbuf object cache only guarantees that m_next and m_nextpkt are
407  * NULL and that m_data points to the beginning of the data area.  In
408  * particular, m_len and m_pkthdr.len are uninitialized.  It is the
409  * responsibility of the caller to initialize those fields before use.
410  */
411 
412 static __inline boolean_t
413 mbuf_ctor(void *obj, void *private, int ocflags)
414 {
415 	struct mbuf *m = obj;
416 
417 	m->m_next = NULL;
418 	m->m_nextpkt = NULL;
419 	m->m_data = m->m_dat;
420 	m->m_flags = 0;
421 
422 	return (TRUE);
423 }
424 
425 /*
426  * Initialize the mbuf and the packet header fields.
427  */
428 static boolean_t
429 mbufphdr_ctor(void *obj, void *private, int ocflags)
430 {
431 	struct mbuf *m = obj;
432 
433 	m->m_next = NULL;
434 	m->m_nextpkt = NULL;
435 	m->m_data = m->m_pktdat;
436 	m->m_flags = M_PKTHDR | M_PHCACHE;
437 
438 	m->m_pkthdr.rcvif = NULL;	/* eliminate XXX JH */
439 	SLIST_INIT(&m->m_pkthdr.tags);
440 	m->m_pkthdr.csum_flags = 0;	/* eliminate XXX JH */
441 	m->m_pkthdr.fw_flags = 0;	/* eliminate XXX JH */
442 
443 	return (TRUE);
444 }
445 
446 /*
447  * A mbcluster object consists of 2K (MCLBYTES) cluster and a refcount.
448  */
449 static boolean_t
450 mclmeta_ctor(void *obj, void *private, int ocflags)
451 {
452 	struct mbcluster *cl = obj;
453 	void *buf;
454 
455 	if (ocflags & M_NOWAIT)
456 		buf = kmalloc(MCLBYTES, M_MBUFCL, M_NOWAIT | M_ZERO);
457 	else
458 		buf = kmalloc(MCLBYTES, M_MBUFCL, M_INTWAIT | M_ZERO);
459 	if (buf == NULL)
460 		return (FALSE);
461 	cl->mcl_refs = 0;
462 	cl->mcl_data = buf;
463 	return (TRUE);
464 }
465 
466 static boolean_t
467 mjclmeta_ctor(void *obj, void *private, int ocflags)
468 {
469 	struct mbcluster *cl = obj;
470 	void *buf;
471 
472 	if (ocflags & M_NOWAIT)
473 		buf = kmalloc(MJUMPAGESIZE, M_MBUFCL, M_NOWAIT | M_ZERO);
474 	else
475 		buf = kmalloc(MJUMPAGESIZE, M_MBUFCL, M_INTWAIT | M_ZERO);
476 	if (buf == NULL)
477 		return (FALSE);
478 	cl->mcl_refs = 0;
479 	cl->mcl_data = buf;
480 	return (TRUE);
481 }
482 
483 static void
484 mclmeta_dtor(void *obj, void *private)
485 {
486 	struct mbcluster *mcl = obj;
487 
488 	KKASSERT(mcl->mcl_refs == 0);
489 	kfree(mcl->mcl_data, M_MBUFCL);
490 }
491 
492 static void
493 linkjcluster(struct mbuf *m, struct mbcluster *cl, uint size)
494 {
495 	/*
496 	 * Add the cluster to the mbuf.  The caller will detect that the
497 	 * mbuf now has an attached cluster.
498 	 */
499 	m->m_ext.ext_arg = cl;
500 	m->m_ext.ext_buf = cl->mcl_data;
501 	m->m_ext.ext_ref = m_mclref;
502 	m->m_ext.ext_free = m_mclfree;
503 	m->m_ext.ext_size = size;
504 	atomic_add_int(&cl->mcl_refs, 1);
505 
506 	m->m_data = m->m_ext.ext_buf;
507 	m->m_flags |= M_EXT | M_EXT_CLUSTER;
508 }
509 
510 static void
511 linkcluster(struct mbuf *m, struct mbcluster *cl)
512 {
513 	linkjcluster(m, cl, MCLBYTES);
514 }
515 
516 static boolean_t
517 mbufphdrcluster_ctor(void *obj, void *private, int ocflags)
518 {
519 	struct mbuf *m = obj;
520 	struct mbcluster *cl;
521 
522 	mbufphdr_ctor(obj, private, ocflags);
523 	cl = objcache_get(mclmeta_cache, ocflags);
524 	if (cl == NULL) {
525 		++mbstat[mycpu->gd_cpuid].m_drops;
526 		return (FALSE);
527 	}
528 	m->m_flags |= M_CLCACHE;
529 	linkcluster(m, cl);
530 	return (TRUE);
531 }
532 
533 static boolean_t
534 mbufphdrjcluster_ctor(void *obj, void *private, int ocflags)
535 {
536 	struct mbuf *m = obj;
537 	struct mbcluster *cl;
538 
539 	mbufphdr_ctor(obj, private, ocflags);
540 	cl = objcache_get(mjclmeta_cache, ocflags);
541 	if (cl == NULL) {
542 		++mbstat[mycpu->gd_cpuid].m_drops;
543 		return (FALSE);
544 	}
545 	m->m_flags |= M_CLCACHE;
546 	linkjcluster(m, cl, MJUMPAGESIZE);
547 	return (TRUE);
548 }
549 
550 static boolean_t
551 mbufcluster_ctor(void *obj, void *private, int ocflags)
552 {
553 	struct mbuf *m = obj;
554 	struct mbcluster *cl;
555 
556 	mbuf_ctor(obj, private, ocflags);
557 	cl = objcache_get(mclmeta_cache, ocflags);
558 	if (cl == NULL) {
559 		++mbstat[mycpu->gd_cpuid].m_drops;
560 		return (FALSE);
561 	}
562 	m->m_flags |= M_CLCACHE;
563 	linkcluster(m, cl);
564 	return (TRUE);
565 }
566 
567 static boolean_t
568 mbufjcluster_ctor(void *obj, void *private, int ocflags)
569 {
570 	struct mbuf *m = obj;
571 	struct mbcluster *cl;
572 
573 	mbuf_ctor(obj, private, ocflags);
574 	cl = objcache_get(mjclmeta_cache, ocflags);
575 	if (cl == NULL) {
576 		++mbstat[mycpu->gd_cpuid].m_drops;
577 		return (FALSE);
578 	}
579 	m->m_flags |= M_CLCACHE;
580 	linkjcluster(m, cl, MJUMPAGESIZE);
581 	return (TRUE);
582 }
583 
584 /*
585  * Used for both the cluster and cluster PHDR caches.
586  *
587  * The mbuf may have lost its cluster due to sharing, deal
588  * with the situation by checking M_EXT.
589  */
590 static void
591 mbufcluster_dtor(void *obj, void *private)
592 {
593 	struct mbuf *m = obj;
594 	struct mbcluster *mcl;
595 
596 	if (m->m_flags & M_EXT) {
597 		KKASSERT((m->m_flags & M_EXT_CLUSTER) != 0);
598 		mcl = m->m_ext.ext_arg;
599 		KKASSERT(mcl->mcl_refs == 1);
600 		mcl->mcl_refs = 0;
601 		if (m->m_flags & M_EXT && m->m_ext.ext_size != MCLBYTES)
602 			objcache_put(mjclmeta_cache, mcl);
603 		else
604 			objcache_put(mclmeta_cache, mcl);
605 	}
606 }
607 
608 struct objcache_malloc_args mbuf_malloc_args = { MSIZE, M_MBUF };
609 struct objcache_malloc_args mclmeta_malloc_args =
610 	{ sizeof(struct mbcluster), M_MCLMETA };
611 
612 /* ARGSUSED*/
613 static void
614 mbinit(void *dummy)
615 {
616 	int mb_limit, cl_limit;
617 	int limit;
618 	int i;
619 
620 	/*
621 	 * Initialize statistics
622 	 */
623 	for (i = 0; i < ncpus; i++) {
624 		mbstat[i].m_msize = MSIZE;
625 		mbstat[i].m_mclbytes = MCLBYTES;
626 		mbstat[i].m_mjumpagesize = MJUMPAGESIZE;
627 		mbstat[i].m_minclsize = MINCLSIZE;
628 		mbstat[i].m_mlen = MLEN;
629 		mbstat[i].m_mhlen = MHLEN;
630 	}
631 
632 	/*
633 	 * Create objtect caches and save cluster limits, which will
634 	 * be used to adjust backing kmalloc pools' limit later.
635 	 */
636 
637 	mb_limit = cl_limit = 0;
638 
639 	limit = nmbufs;
640 	mbuf_cache = objcache_create("mbuf",
641 	    &limit, 0,
642 	    mbuf_ctor, NULL, NULL,
643 	    objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
644 	mb_limit += limit;
645 
646 	limit = nmbufs;
647 	mbufphdr_cache = objcache_create("mbuf pkt hdr",
648 	    &limit, nmbufs / 4,
649 	    mbufphdr_ctor, NULL, NULL,
650 	    objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
651 	mb_limit += limit;
652 
653 	cl_limit = nmbclusters;
654 	mclmeta_cache = objcache_create("cluster mbuf",
655 	    &cl_limit, 0,
656 	    mclmeta_ctor, mclmeta_dtor, NULL,
657 	    objcache_malloc_alloc, objcache_malloc_free, &mclmeta_malloc_args);
658 
659 	cl_limit = nmbclusters;
660 	mjclmeta_cache = objcache_create("jcluster mbuf",
661 	    &cl_limit, 0,
662 	    mjclmeta_ctor, mclmeta_dtor, NULL,
663 	    objcache_malloc_alloc, objcache_malloc_free, &mclmeta_malloc_args);
664 
665 	limit = nmbclusters;
666 	mbufcluster_cache = objcache_create("mbuf + cluster",
667 	    &limit, 0,
668 	    mbufcluster_ctor, mbufcluster_dtor, NULL,
669 	    objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
670 	mb_limit += limit;
671 
672 	limit = nmbclusters;
673 	mbufphdrcluster_cache = objcache_create("mbuf pkt hdr + cluster",
674 	    &limit, nmbclusters / 16,
675 	    mbufphdrcluster_ctor, mbufcluster_dtor, NULL,
676 	    objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
677 	mb_limit += limit;
678 
679 	limit = nmbclusters;
680 	mbufjcluster_cache = objcache_create("mbuf + jcluster",
681 	    &limit, 0,
682 	    mbufjcluster_ctor, mbufcluster_dtor, NULL,
683 	    objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
684 
685 	limit = nmbclusters;
686 	mbufphdrjcluster_cache = objcache_create("mbuf pkt hdr + jcluster",
687 	    &limit, nmbclusters / 16,
688 	    mbufphdrjcluster_ctor, mbufcluster_dtor, NULL,
689 	    objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
690 
691 	/*
692 	 * Adjust backing kmalloc pools' limit
693 	 *
694 	 * NOTE: We raise the limit by another 1/8 to take the effect
695 	 * of loosememuse into account.
696 	 */
697 	cl_limit += cl_limit / 8;
698 	kmalloc_raise_limit(mclmeta_malloc_args.mtype,
699 	    mclmeta_malloc_args.objsize * (size_t)cl_limit);
700 	kmalloc_raise_limit(M_MBUFCL,
701 	    ((MCLBYTES * (size_t)cl_limit * 3) / 4) +
702 	    ((MJUMPAGESIZE * (size_t)cl_limit) / 4));
703 
704 	mb_limit += mb_limit / 8;
705 	kmalloc_raise_limit(mbuf_malloc_args.mtype,
706 	    mbuf_malloc_args.objsize * (size_t)mb_limit);
707 }
708 
709 /*
710  * Return the number of references to this mbuf's data.  0 is returned
711  * if the mbuf is not M_EXT, a reference count is returned if it is
712  * M_EXT | M_EXT_CLUSTER, and 99 is returned if it is a special M_EXT.
713  */
714 int
715 m_sharecount(struct mbuf *m)
716 {
717 	switch (m->m_flags & (M_EXT | M_EXT_CLUSTER)) {
718 	case 0:
719 		return (0);
720 	case M_EXT:
721 		return (99);
722 	case M_EXT | M_EXT_CLUSTER:
723 		return (((struct mbcluster *)m->m_ext.ext_arg)->mcl_refs);
724 	}
725 	/* NOTREACHED */
726 	return (0);		/* to shut up compiler */
727 }
728 
729 /*
730  * change mbuf to new type
731  */
732 void
733 m_chtype(struct mbuf *m, int type)
734 {
735 	struct globaldata *gd = mycpu;
736 
737 	++mbtypes[gd->gd_cpuid][type];
738 	--mbtypes[gd->gd_cpuid][m->m_type];
739 	m->m_type = type;
740 }
741 
742 static void
743 m_reclaim(void)
744 {
745 	struct domain *dp;
746 	struct protosw *pr;
747 
748 	kprintf("Debug: m_reclaim() called\n");
749 
750 	SLIST_FOREACH(dp, &domains, dom_next) {
751 		for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) {
752 			if (pr->pr_drain)
753 				(*pr->pr_drain)();
754 		}
755 	}
756 	++mbstat[mycpu->gd_cpuid].m_drain;
757 }
758 
759 static __inline void
760 updatestats(struct mbuf *m, int type)
761 {
762 	struct globaldata *gd = mycpu;
763 
764 	m->m_type = type;
765 	mbuftrack(m);
766 #ifdef MBUF_DEBUG
767 	KASSERT(m->m_next == NULL, ("mbuf %p: bad m_next in get", m));
768 	KASSERT(m->m_nextpkt == NULL, ("mbuf %p: bad m_nextpkt in get", m));
769 #endif
770 
771 	++mbtypes[gd->gd_cpuid][type];
772 	++mbstat[gd->gd_cpuid].m_mbufs;
773 
774 }
775 
776 /*
777  * Allocate an mbuf.
778  */
779 struct mbuf *
780 m_get(int how, int type)
781 {
782 	struct mbuf *m;
783 	int ntries = 0;
784 	int ocf = MBTOM(how);
785 
786 retryonce:
787 
788 	m = objcache_get(mbuf_cache, ocf);
789 
790 	if (m == NULL) {
791 		if ((how & MB_TRYWAIT) && ntries++ == 0) {
792 			struct objcache *reclaimlist[] = {
793 				mbufphdr_cache,
794 				mbufcluster_cache,
795 				mbufphdrcluster_cache,
796 				mbufjcluster_cache,
797 				mbufphdrjcluster_cache
798 			};
799 			const int nreclaims = NELEM(reclaimlist);
800 
801 			if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf))
802 				m_reclaim();
803 			goto retryonce;
804 		}
805 		++mbstat[mycpu->gd_cpuid].m_drops;
806 		return (NULL);
807 	}
808 #ifdef MBUF_DEBUG
809 	KASSERT(m->m_data == m->m_dat, ("mbuf %p: bad m_data in get", m));
810 #endif
811 	m->m_len = 0;
812 
813 	updatestats(m, type);
814 	return (m);
815 }
816 
817 struct mbuf *
818 m_gethdr(int how, int type)
819 {
820 	struct mbuf *m;
821 	int ocf = MBTOM(how);
822 	int ntries = 0;
823 
824 retryonce:
825 
826 	m = objcache_get(mbufphdr_cache, ocf);
827 
828 	if (m == NULL) {
829 		if ((how & MB_TRYWAIT) && ntries++ == 0) {
830 			struct objcache *reclaimlist[] = {
831 				mbuf_cache,
832 				mbufcluster_cache, mbufphdrcluster_cache,
833 				mbufjcluster_cache, mbufphdrjcluster_cache
834 			};
835 			const int nreclaims = NELEM(reclaimlist);
836 
837 			if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf))
838 				m_reclaim();
839 			goto retryonce;
840 		}
841 		++mbstat[mycpu->gd_cpuid].m_drops;
842 		return (NULL);
843 	}
844 #ifdef MBUF_DEBUG
845 	KASSERT(m->m_data == m->m_pktdat, ("mbuf %p: bad m_data in get", m));
846 #endif
847 	m->m_len = 0;
848 	m->m_pkthdr.len = 0;
849 
850 	updatestats(m, type);
851 	return (m);
852 }
853 
854 /*
855  * Get a mbuf (not a mbuf cluster!) and zero it.
856  * Deprecated.
857  */
858 struct mbuf *
859 m_getclr(int how, int type)
860 {
861 	struct mbuf *m;
862 
863 	m = m_get(how, type);
864 	if (m != NULL)
865 		bzero(m->m_data, MLEN);
866 	return (m);
867 }
868 
869 struct mbuf *
870 m_getjcl(int how, short type, int flags, size_t size)
871 {
872 	struct mbuf *m = NULL;
873 	struct objcache *mbclc, *mbphclc;
874 	int ocflags = MBTOM(how);
875 	int ntries = 0;
876 
877 	switch (size) {
878 		case MCLBYTES:
879 			mbclc = mbufcluster_cache;
880 			mbphclc = mbufphdrcluster_cache;
881 			break;
882 		default:
883 			mbclc = mbufjcluster_cache;
884 			mbphclc = mbufphdrjcluster_cache;
885 			break;
886 	}
887 
888 retryonce:
889 
890 	if (flags & M_PKTHDR)
891 		m = objcache_get(mbphclc, ocflags);
892 	else
893 		m = objcache_get(mbclc, ocflags);
894 
895 	if (m == NULL) {
896 		if ((how & MB_TRYWAIT) && ntries++ == 0) {
897 			struct objcache *reclaimlist[1];
898 
899 			if (flags & M_PKTHDR)
900 				reclaimlist[0] = mbclc;
901 			else
902 				reclaimlist[0] = mbphclc;
903 			if (!objcache_reclaimlist(reclaimlist, 1, ocflags))
904 				m_reclaim();
905 			goto retryonce;
906 		}
907 		++mbstat[mycpu->gd_cpuid].m_drops;
908 		return (NULL);
909 	}
910 
911 #ifdef MBUF_DEBUG
912 	KASSERT(m->m_data == m->m_ext.ext_buf,
913 		("mbuf %p: bad m_data in get", m));
914 #endif
915 	m->m_type = type;
916 	m->m_len = 0;
917 	m->m_pkthdr.len = 0;	/* just do it unconditonally */
918 
919 	mbuftrack(m);
920 
921 	++mbtypes[mycpu->gd_cpuid][type];
922 	++mbstat[mycpu->gd_cpuid].m_clusters;
923 	return (m);
924 }
925 
926 /*
927  * Returns an mbuf with an attached cluster.
928  * Because many network drivers use this kind of buffers a lot, it is
929  * convenient to keep a small pool of free buffers of this kind.
930  * Even a small size such as 10 gives about 10% improvement in the
931  * forwarding rate in a bridge or router.
932  */
933 struct mbuf *
934 m_getcl(int how, short type, int flags)
935 {
936 	return (m_getjcl(how, type, flags, MCLBYTES));
937 }
938 
939 /*
940  * Allocate chain of requested length.
941  */
942 struct mbuf *
943 m_getc(int len, int how, int type)
944 {
945 	struct mbuf *n, *nfirst = NULL, **ntail = &nfirst;
946 	int nsize;
947 
948 	while (len > 0) {
949 		n = m_getl(len, how, type, 0, &nsize);
950 		if (n == NULL)
951 			goto failed;
952 		n->m_len = 0;
953 		*ntail = n;
954 		ntail = &n->m_next;
955 		len -= nsize;
956 	}
957 	return (nfirst);
958 
959 failed:
960 	m_freem(nfirst);
961 	return (NULL);
962 }
963 
964 /*
965  * Allocate len-worth of mbufs and/or mbuf clusters (whatever fits best)
966  * and return a pointer to the head of the allocated chain. If m0 is
967  * non-null, then we assume that it is a single mbuf or an mbuf chain to
968  * which we want len bytes worth of mbufs and/or clusters attached, and so
969  * if we succeed in allocating it, we will just return a pointer to m0.
970  *
971  * If we happen to fail at any point during the allocation, we will free
972  * up everything we have already allocated and return NULL.
973  *
974  * Deprecated.  Use m_getc() and m_cat() instead.
975  */
976 struct mbuf *
977 m_getm(struct mbuf *m0, int len, int type, int how)
978 {
979 	struct mbuf *nfirst;
980 
981 	nfirst = m_getc(len, how, type);
982 
983 	if (m0 != NULL) {
984 		m_last(m0)->m_next = nfirst;
985 		return (m0);
986 	}
987 
988 	return (nfirst);
989 }
990 
991 /*
992  * Adds a cluster to a normal mbuf, M_EXT is set on success.
993  * Deprecated.  Use m_getcl() instead.
994  */
995 void
996 m_mclget(struct mbuf *m, int how)
997 {
998 	struct mbcluster *mcl;
999 
1000 	KKASSERT((m->m_flags & M_EXT) == 0);
1001 	mcl = objcache_get(mclmeta_cache, MBTOM(how));
1002 	if (mcl != NULL) {
1003 		linkcluster(m, mcl);
1004 		++mbstat[mycpu->gd_cpuid].m_clusters;
1005 	} else {
1006 		++mbstat[mycpu->gd_cpuid].m_drops;
1007 	}
1008 }
1009 
1010 /*
1011  * Updates to mbcluster must be MPSAFE.  Only an entity which already has
1012  * a reference to the cluster can ref it, so we are in no danger of
1013  * racing an add with a subtract.  But the operation must still be atomic
1014  * since multiple entities may have a reference on the cluster.
1015  *
1016  * m_mclfree() is almost the same but it must contend with two entities
1017  * freeing the cluster at the same time.
1018  */
1019 static void
1020 m_mclref(void *arg)
1021 {
1022 	struct mbcluster *mcl = arg;
1023 
1024 	atomic_add_int(&mcl->mcl_refs, 1);
1025 }
1026 
1027 /*
1028  * When dereferencing a cluster we have to deal with a N->0 race, where
1029  * N entities free their references simultaniously.  To do this we use
1030  * atomic_fetchadd_int().
1031  */
1032 static void
1033 m_mclfree(void *arg)
1034 {
1035 	struct mbcluster *mcl = arg;
1036 
1037 	if (atomic_fetchadd_int(&mcl->mcl_refs, -1) == 1) {
1038 		--mbstat[mycpu->gd_cpuid].m_clusters;
1039 		objcache_put(mclmeta_cache, mcl);
1040 	}
1041 }
1042 
1043 /*
1044  * Free a single mbuf and any associated external storage.  The successor,
1045  * if any, is returned.
1046  *
1047  * We do need to check non-first mbuf for m_aux, since some of existing
1048  * code does not call M_PREPEND properly.
1049  * (example: call to bpf_mtap from drivers)
1050  */
1051 
1052 #ifdef MBUF_DEBUG
1053 
1054 struct mbuf  *
1055 _m_free(struct mbuf *m, const char *func)
1056 
1057 #else
1058 
1059 struct mbuf *
1060 m_free(struct mbuf *m)
1061 
1062 #endif
1063 {
1064 	struct mbuf *n;
1065 	struct globaldata *gd = mycpu;
1066 
1067 	KASSERT(m->m_type != MT_FREE, ("freeing free mbuf %p", m));
1068 	KASSERT(M_TRAILINGSPACE(m) >= 0, ("overflowed mbuf %p", m));
1069 	--mbtypes[gd->gd_cpuid][m->m_type];
1070 
1071 	n = m->m_next;
1072 
1073 	/*
1074 	 * Make sure the mbuf is in constructed state before returning it
1075 	 * to the objcache.
1076 	 */
1077 	m->m_next = NULL;
1078 	mbufuntrack(m);
1079 #ifdef MBUF_DEBUG
1080 	m->m_hdr.mh_lastfunc = func;
1081 #endif
1082 #ifdef notyet
1083 	KKASSERT(m->m_nextpkt == NULL);
1084 #else
1085 	if (m->m_nextpkt != NULL) {
1086 		static int afewtimes = 10;
1087 
1088 		if (afewtimes-- > 0) {
1089 			kprintf("mfree: m->m_nextpkt != NULL\n");
1090 			print_backtrace(-1);
1091 		}
1092 		m->m_nextpkt = NULL;
1093 	}
1094 #endif
1095 	if (m->m_flags & M_PKTHDR) {
1096 		m_tag_delete_chain(m);		/* eliminate XXX JH */
1097 	}
1098 
1099 	m->m_flags &= (M_EXT | M_EXT_CLUSTER | M_CLCACHE | M_PHCACHE);
1100 
1101 	/*
1102 	 * Clean the M_PKTHDR state so we can return the mbuf to its original
1103 	 * cache.  This is based on the PHCACHE flag which tells us whether
1104 	 * the mbuf was originally allocated out of a packet-header cache
1105 	 * or a non-packet-header cache.
1106 	 */
1107 	if (m->m_flags & M_PHCACHE) {
1108 		m->m_flags |= M_PKTHDR;
1109 		m->m_pkthdr.rcvif = NULL;	/* eliminate XXX JH */
1110 		m->m_pkthdr.csum_flags = 0;	/* eliminate XXX JH */
1111 		m->m_pkthdr.fw_flags = 0;	/* eliminate XXX JH */
1112 		SLIST_INIT(&m->m_pkthdr.tags);
1113 	}
1114 
1115 	/*
1116 	 * Handle remaining flags combinations.  M_CLCACHE tells us whether
1117 	 * the mbuf was originally allocated from a cluster cache or not,
1118 	 * and is totally separate from whether the mbuf is currently
1119 	 * associated with a cluster.
1120 	 */
1121 	switch(m->m_flags & (M_CLCACHE | M_EXT | M_EXT_CLUSTER)) {
1122 	case M_CLCACHE | M_EXT | M_EXT_CLUSTER:
1123 		/*
1124 		 * mbuf+cluster cache case.  The mbuf was allocated from the
1125 		 * combined mbuf_cluster cache and can be returned to the
1126 		 * cache if the cluster hasn't been shared.
1127 		 */
1128 		if (m_sharecount(m) == 1) {
1129 			/*
1130 			 * The cluster has not been shared, we can just
1131 			 * reset the data pointer and return the mbuf
1132 			 * to the cluster cache.  Note that the reference
1133 			 * count is left intact (it is still associated with
1134 			 * an mbuf).
1135 			 */
1136 			m->m_data = m->m_ext.ext_buf;
1137 			if (m->m_flags & M_EXT && m->m_ext.ext_size != MCLBYTES) {
1138 				if (m->m_flags & M_PHCACHE)
1139 					objcache_put(mbufphdrjcluster_cache, m);
1140 				else
1141 					objcache_put(mbufjcluster_cache, m);
1142 			} else {
1143 				if (m->m_flags & M_PHCACHE)
1144 					objcache_put(mbufphdrcluster_cache, m);
1145 				else
1146 					objcache_put(mbufcluster_cache, m);
1147 			}
1148 			--mbstat[mycpu->gd_cpuid].m_clusters;
1149 		} else {
1150 			/*
1151 			 * Hell.  Someone else has a ref on this cluster,
1152 			 * we have to disconnect it which means we can't
1153 			 * put it back into the mbufcluster_cache, we
1154 			 * have to destroy the mbuf.
1155 			 *
1156 			 * Other mbuf references to the cluster will typically
1157 			 * be M_EXT | M_EXT_CLUSTER but without M_CLCACHE.
1158 			 *
1159 			 * XXX we could try to connect another cluster to
1160 			 * it.
1161 			 */
1162 			m->m_ext.ext_free(m->m_ext.ext_arg);
1163 			m->m_flags &= ~(M_EXT | M_EXT_CLUSTER);
1164 			if (m->m_ext.ext_size == MCLBYTES) {
1165 				if (m->m_flags & M_PHCACHE)
1166 					objcache_dtor(mbufphdrcluster_cache, m);
1167 				else
1168 					objcache_dtor(mbufcluster_cache, m);
1169 			} else {
1170 				if (m->m_flags & M_PHCACHE)
1171 					objcache_dtor(mbufphdrjcluster_cache, m);
1172 				else
1173 					objcache_dtor(mbufjcluster_cache, m);
1174 			}
1175 		}
1176 		break;
1177 	case M_EXT | M_EXT_CLUSTER:
1178 	case M_EXT:
1179 		/*
1180 		 * Normal cluster association case, disconnect the cluster from
1181 		 * the mbuf.  The cluster may or may not be custom.
1182 		 */
1183 		m->m_ext.ext_free(m->m_ext.ext_arg);
1184 		m->m_flags &= ~(M_EXT | M_EXT_CLUSTER);
1185 		/* fall through */
1186 	case 0:
1187 		/*
1188 		 * return the mbuf to the mbuf cache.
1189 		 */
1190 		if (m->m_flags & M_PHCACHE) {
1191 			m->m_data = m->m_pktdat;
1192 			objcache_put(mbufphdr_cache, m);
1193 		} else {
1194 			m->m_data = m->m_dat;
1195 			objcache_put(mbuf_cache, m);
1196 		}
1197 		--mbstat[mycpu->gd_cpuid].m_mbufs;
1198 		break;
1199 	default:
1200 		if (!panicstr)
1201 			panic("bad mbuf flags %p %08x\n", m, m->m_flags);
1202 		break;
1203 	}
1204 	return (n);
1205 }
1206 
1207 #ifdef MBUF_DEBUG
1208 
1209 void
1210 _m_freem(struct mbuf *m, const char *func)
1211 {
1212 	while (m)
1213 		m = _m_free(m, func);
1214 }
1215 
1216 #else
1217 
1218 void
1219 m_freem(struct mbuf *m)
1220 {
1221 	while (m)
1222 		m = m_free(m);
1223 }
1224 
1225 #endif
1226 
1227 void
1228 m_extadd(struct mbuf *m, caddr_t buf, u_int size,  void (*reff)(void *),
1229     void (*freef)(void *), void *arg)
1230 {
1231 	m->m_ext.ext_arg = arg;
1232 	m->m_ext.ext_buf = buf;
1233 	m->m_ext.ext_ref = reff;
1234 	m->m_ext.ext_free = freef;
1235 	m->m_ext.ext_size = size;
1236 	reff(arg);
1237 	m->m_data = buf;
1238 	m->m_flags |= M_EXT;
1239 }
1240 
1241 /*
1242  * mbuf utility routines
1243  */
1244 
1245 /*
1246  * Lesser-used path for M_PREPEND: allocate new mbuf to prepend to chain and
1247  * copy junk along.
1248  */
1249 struct mbuf *
1250 m_prepend(struct mbuf *m, int len, int how)
1251 {
1252 	struct mbuf *mn;
1253 
1254 	if (m->m_flags & M_PKTHDR)
1255 	    mn = m_gethdr(how, m->m_type);
1256 	else
1257 	    mn = m_get(how, m->m_type);
1258 	if (mn == NULL) {
1259 		m_freem(m);
1260 		return (NULL);
1261 	}
1262 	if (m->m_flags & M_PKTHDR)
1263 		M_MOVE_PKTHDR(mn, m);
1264 	mn->m_next = m;
1265 	m = mn;
1266 	if (len < MHLEN)
1267 		MH_ALIGN(m, len);
1268 	m->m_len = len;
1269 	return (m);
1270 }
1271 
1272 /*
1273  * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
1274  * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
1275  * The wait parameter is a choice of MB_WAIT/MB_DONTWAIT from caller.
1276  * Note that the copy is read-only, because clusters are not copied,
1277  * only their reference counts are incremented.
1278  */
1279 struct mbuf *
1280 m_copym(const struct mbuf *m, int off0, int len, int wait)
1281 {
1282 	struct mbuf *n, **np;
1283 	int off = off0;
1284 	struct mbuf *top;
1285 	int copyhdr = 0;
1286 
1287 	KASSERT(off >= 0, ("m_copym, negative off %d", off));
1288 	KASSERT(len >= 0, ("m_copym, negative len %d", len));
1289 	if (off == 0 && (m->m_flags & M_PKTHDR))
1290 		copyhdr = 1;
1291 	while (off > 0) {
1292 		KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
1293 		if (off < m->m_len)
1294 			break;
1295 		off -= m->m_len;
1296 		m = m->m_next;
1297 	}
1298 	np = &top;
1299 	top = NULL;
1300 	while (len > 0) {
1301 		if (m == NULL) {
1302 			KASSERT(len == M_COPYALL,
1303 			    ("m_copym, length > size of mbuf chain"));
1304 			break;
1305 		}
1306 		/*
1307 		 * Because we are sharing any cluster attachment below,
1308 		 * be sure to get an mbuf that does not have a cluster
1309 		 * associated with it.
1310 		 */
1311 		if (copyhdr)
1312 			n = m_gethdr(wait, m->m_type);
1313 		else
1314 			n = m_get(wait, m->m_type);
1315 		*np = n;
1316 		if (n == NULL)
1317 			goto nospace;
1318 		if (copyhdr) {
1319 			if (!m_dup_pkthdr(n, m, wait))
1320 				goto nospace;
1321 			if (len == M_COPYALL)
1322 				n->m_pkthdr.len -= off0;
1323 			else
1324 				n->m_pkthdr.len = len;
1325 			copyhdr = 0;
1326 		}
1327 		n->m_len = min(len, m->m_len - off);
1328 		if (m->m_flags & M_EXT) {
1329 			KKASSERT((n->m_flags & M_EXT) == 0);
1330 			n->m_data = m->m_data + off;
1331 			m->m_ext.ext_ref(m->m_ext.ext_arg);
1332 			n->m_ext = m->m_ext;
1333 			n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1334 		} else {
1335 			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
1336 			    (unsigned)n->m_len);
1337 		}
1338 		if (len != M_COPYALL)
1339 			len -= n->m_len;
1340 		off = 0;
1341 		m = m->m_next;
1342 		np = &n->m_next;
1343 	}
1344 	if (top == NULL)
1345 		++mbstat[mycpu->gd_cpuid].m_mcfail;
1346 	return (top);
1347 nospace:
1348 	m_freem(top);
1349 	++mbstat[mycpu->gd_cpuid].m_mcfail;
1350 	return (NULL);
1351 }
1352 
1353 /*
1354  * Copy an entire packet, including header (which must be present).
1355  * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
1356  * Note that the copy is read-only, because clusters are not copied,
1357  * only their reference counts are incremented.
1358  * Preserve alignment of the first mbuf so if the creator has left
1359  * some room at the beginning (e.g. for inserting protocol headers)
1360  * the copies also have the room available.
1361  */
1362 struct mbuf *
1363 m_copypacket(struct mbuf *m, int how)
1364 {
1365 	struct mbuf *top, *n, *o;
1366 
1367 	n = m_gethdr(how, m->m_type);
1368 	top = n;
1369 	if (!n)
1370 		goto nospace;
1371 
1372 	if (!m_dup_pkthdr(n, m, how))
1373 		goto nospace;
1374 	n->m_len = m->m_len;
1375 	if (m->m_flags & M_EXT) {
1376 		KKASSERT((n->m_flags & M_EXT) == 0);
1377 		n->m_data = m->m_data;
1378 		m->m_ext.ext_ref(m->m_ext.ext_arg);
1379 		n->m_ext = m->m_ext;
1380 		n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1381 	} else {
1382 		n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
1383 		bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1384 	}
1385 
1386 	m = m->m_next;
1387 	while (m) {
1388 		o = m_get(how, m->m_type);
1389 		if (!o)
1390 			goto nospace;
1391 
1392 		n->m_next = o;
1393 		n = n->m_next;
1394 
1395 		n->m_len = m->m_len;
1396 		if (m->m_flags & M_EXT) {
1397 			KKASSERT((n->m_flags & M_EXT) == 0);
1398 			n->m_data = m->m_data;
1399 			m->m_ext.ext_ref(m->m_ext.ext_arg);
1400 			n->m_ext = m->m_ext;
1401 			n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1402 		} else {
1403 			bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1404 		}
1405 
1406 		m = m->m_next;
1407 	}
1408 	return top;
1409 nospace:
1410 	m_freem(top);
1411 	++mbstat[mycpu->gd_cpuid].m_mcfail;
1412 	return (NULL);
1413 }
1414 
1415 /*
1416  * Copy data from an mbuf chain starting "off" bytes from the beginning,
1417  * continuing for "len" bytes, into the indicated buffer.
1418  */
1419 void
1420 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
1421 {
1422 	unsigned count;
1423 
1424 	KASSERT(off >= 0, ("m_copydata, negative off %d", off));
1425 	KASSERT(len >= 0, ("m_copydata, negative len %d", len));
1426 	while (off > 0) {
1427 		KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
1428 		if (off < m->m_len)
1429 			break;
1430 		off -= m->m_len;
1431 		m = m->m_next;
1432 	}
1433 	while (len > 0) {
1434 		KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
1435 		count = min(m->m_len - off, len);
1436 		bcopy(mtod(m, caddr_t) + off, cp, count);
1437 		len -= count;
1438 		cp += count;
1439 		off = 0;
1440 		m = m->m_next;
1441 	}
1442 }
1443 
1444 /*
1445  * Copy a packet header mbuf chain into a completely new chain, including
1446  * copying any mbuf clusters.  Use this instead of m_copypacket() when
1447  * you need a writable copy of an mbuf chain.
1448  */
1449 struct mbuf *
1450 m_dup(struct mbuf *m, int how)
1451 {
1452 	struct mbuf **p, *top = NULL;
1453 	int remain, moff, nsize;
1454 
1455 	/* Sanity check */
1456 	if (m == NULL)
1457 		return (NULL);
1458 	KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __func__));
1459 
1460 	/* While there's more data, get a new mbuf, tack it on, and fill it */
1461 	remain = m->m_pkthdr.len;
1462 	moff = 0;
1463 	p = &top;
1464 	while (remain > 0 || top == NULL) {	/* allow m->m_pkthdr.len == 0 */
1465 		struct mbuf *n;
1466 
1467 		/* Get the next new mbuf */
1468 		n = m_getl(remain, how, m->m_type, top == NULL ? M_PKTHDR : 0,
1469 			   &nsize);
1470 		if (n == NULL)
1471 			goto nospace;
1472 		if (top == NULL)
1473 			if (!m_dup_pkthdr(n, m, how))
1474 				goto nospace0;
1475 
1476 		/* Link it into the new chain */
1477 		*p = n;
1478 		p = &n->m_next;
1479 
1480 		/* Copy data from original mbuf(s) into new mbuf */
1481 		n->m_len = 0;
1482 		while (n->m_len < nsize && m != NULL) {
1483 			int chunk = min(nsize - n->m_len, m->m_len - moff);
1484 
1485 			bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1486 			moff += chunk;
1487 			n->m_len += chunk;
1488 			remain -= chunk;
1489 			if (moff == m->m_len) {
1490 				m = m->m_next;
1491 				moff = 0;
1492 			}
1493 		}
1494 
1495 		/* Check correct total mbuf length */
1496 		KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
1497 			("%s: bogus m_pkthdr.len", __func__));
1498 	}
1499 	return (top);
1500 
1501 nospace:
1502 	m_freem(top);
1503 nospace0:
1504 	++mbstat[mycpu->gd_cpuid].m_mcfail;
1505 	return (NULL);
1506 }
1507 
1508 /*
1509  * Copy the non-packet mbuf data chain into a new set of mbufs, including
1510  * copying any mbuf clusters.  This is typically used to realign a data
1511  * chain by nfs_realign().
1512  *
1513  * The original chain is left intact.  how should be MB_WAIT or MB_DONTWAIT
1514  * and NULL can be returned if MB_DONTWAIT is passed.
1515  *
1516  * Be careful to use cluster mbufs, a large mbuf chain converted to non
1517  * cluster mbufs can exhaust our supply of mbufs.
1518  */
1519 struct mbuf *
1520 m_dup_data(struct mbuf *m, int how)
1521 {
1522 	struct mbuf **p, *n, *top = NULL;
1523 	int mlen, moff, chunk, gsize, nsize;
1524 
1525 	/*
1526 	 * Degenerate case
1527 	 */
1528 	if (m == NULL)
1529 		return (NULL);
1530 
1531 	/*
1532 	 * Optimize the mbuf allocation but do not get too carried away.
1533 	 */
1534 	if (m->m_next || m->m_len > MLEN)
1535 		if (m->m_flags & M_EXT && m->m_ext.ext_size == MCLBYTES)
1536 			gsize = MCLBYTES;
1537 		else
1538 			gsize = MJUMPAGESIZE;
1539 	else
1540 		gsize = MLEN;
1541 
1542 	/* Chain control */
1543 	p = &top;
1544 	n = NULL;
1545 	nsize = 0;
1546 
1547 	/*
1548 	 * Scan the mbuf chain until nothing is left, the new mbuf chain
1549 	 * will be allocated on the fly as needed.
1550 	 */
1551 	while (m) {
1552 		mlen = m->m_len;
1553 		moff = 0;
1554 
1555 		while (mlen) {
1556 			KKASSERT(m->m_type == MT_DATA);
1557 			if (n == NULL) {
1558 				n = m_getl(gsize, how, MT_DATA, 0, &nsize);
1559 				n->m_len = 0;
1560 				if (n == NULL)
1561 					goto nospace;
1562 				*p = n;
1563 				p = &n->m_next;
1564 			}
1565 			chunk = imin(mlen, nsize);
1566 			bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1567 			mlen -= chunk;
1568 			moff += chunk;
1569 			n->m_len += chunk;
1570 			nsize -= chunk;
1571 			if (nsize == 0)
1572 				n = NULL;
1573 		}
1574 		m = m->m_next;
1575 	}
1576 	*p = NULL;
1577 	return(top);
1578 nospace:
1579 	*p = NULL;
1580 	m_freem(top);
1581 	++mbstat[mycpu->gd_cpuid].m_mcfail;
1582 	return (NULL);
1583 }
1584 
1585 /*
1586  * Concatenate mbuf chain n to m.
1587  * Both chains must be of the same type (e.g. MT_DATA).
1588  * Any m_pkthdr is not updated.
1589  */
1590 void
1591 m_cat(struct mbuf *m, struct mbuf *n)
1592 {
1593 	m = m_last(m);
1594 	while (n) {
1595 		if (m->m_flags & M_EXT ||
1596 		    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
1597 			/* just join the two chains */
1598 			m->m_next = n;
1599 			return;
1600 		}
1601 		/* splat the data from one into the other */
1602 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1603 		    (u_int)n->m_len);
1604 		m->m_len += n->m_len;
1605 		n = m_free(n);
1606 	}
1607 }
1608 
1609 void
1610 m_adj(struct mbuf *mp, int req_len)
1611 {
1612 	int len = req_len;
1613 	struct mbuf *m;
1614 	int count;
1615 
1616 	if ((m = mp) == NULL)
1617 		return;
1618 	if (len >= 0) {
1619 		/*
1620 		 * Trim from head.
1621 		 */
1622 		while (m != NULL && len > 0) {
1623 			if (m->m_len <= len) {
1624 				len -= m->m_len;
1625 				m->m_len = 0;
1626 				m = m->m_next;
1627 			} else {
1628 				m->m_len -= len;
1629 				m->m_data += len;
1630 				len = 0;
1631 			}
1632 		}
1633 		m = mp;
1634 		if (mp->m_flags & M_PKTHDR)
1635 			m->m_pkthdr.len -= (req_len - len);
1636 	} else {
1637 		/*
1638 		 * Trim from tail.  Scan the mbuf chain,
1639 		 * calculating its length and finding the last mbuf.
1640 		 * If the adjustment only affects this mbuf, then just
1641 		 * adjust and return.  Otherwise, rescan and truncate
1642 		 * after the remaining size.
1643 		 */
1644 		len = -len;
1645 		count = 0;
1646 		for (;;) {
1647 			count += m->m_len;
1648 			if (m->m_next == NULL)
1649 				break;
1650 			m = m->m_next;
1651 		}
1652 		if (m->m_len >= len) {
1653 			m->m_len -= len;
1654 			if (mp->m_flags & M_PKTHDR)
1655 				mp->m_pkthdr.len -= len;
1656 			return;
1657 		}
1658 		count -= len;
1659 		if (count < 0)
1660 			count = 0;
1661 		/*
1662 		 * Correct length for chain is "count".
1663 		 * Find the mbuf with last data, adjust its length,
1664 		 * and toss data from remaining mbufs on chain.
1665 		 */
1666 		m = mp;
1667 		if (m->m_flags & M_PKTHDR)
1668 			m->m_pkthdr.len = count;
1669 		for (; m; m = m->m_next) {
1670 			if (m->m_len >= count) {
1671 				m->m_len = count;
1672 				break;
1673 			}
1674 			count -= m->m_len;
1675 		}
1676 		while (m->m_next)
1677 			(m = m->m_next) ->m_len = 0;
1678 	}
1679 }
1680 
1681 /*
1682  * Set the m_data pointer of a newly-allocated mbuf
1683  * to place an object of the specified size at the
1684  * end of the mbuf, longword aligned.
1685  */
1686 void
1687 m_align(struct mbuf *m, int len)
1688 {
1689 	int adjust;
1690 
1691 	if (m->m_flags & M_EXT)
1692 		adjust = m->m_ext.ext_size - len;
1693 	else if (m->m_flags & M_PKTHDR)
1694 		adjust = MHLEN - len;
1695 	else
1696 		adjust = MLEN - len;
1697 	m->m_data += adjust &~ (sizeof(long)-1);
1698 }
1699 
1700 /*
1701  * Create a writable copy of the mbuf chain.  While doing this
1702  * we compact the chain with a goal of producing a chain with
1703  * at most two mbufs.  The second mbuf in this chain is likely
1704  * to be a cluster.  The primary purpose of this work is to create
1705  * a writable packet for encryption, compression, etc.  The
1706  * secondary goal is to linearize the data so the data can be
1707  * passed to crypto hardware in the most efficient manner possible.
1708  */
1709 struct mbuf *
1710 m_unshare(struct mbuf *m0, int how)
1711 {
1712 	struct mbuf *m, *mprev;
1713 	struct mbuf *n, *mfirst, *mlast;
1714 	int len, off;
1715 
1716 	mprev = NULL;
1717 	for (m = m0; m != NULL; m = mprev->m_next) {
1718 		/*
1719 		 * Regular mbufs are ignored unless there's a cluster
1720 		 * in front of it that we can use to coalesce.  We do
1721 		 * the latter mainly so later clusters can be coalesced
1722 		 * also w/o having to handle them specially (i.e. convert
1723 		 * mbuf+cluster -> cluster).  This optimization is heavily
1724 		 * influenced by the assumption that we're running over
1725 		 * Ethernet where MCLBYTES is large enough that the max
1726 		 * packet size will permit lots of coalescing into a
1727 		 * single cluster.  This in turn permits efficient
1728 		 * crypto operations, especially when using hardware.
1729 		 */
1730 		if ((m->m_flags & M_EXT) == 0) {
1731 			if (mprev && (mprev->m_flags & M_EXT) &&
1732 			    m->m_len <= M_TRAILINGSPACE(mprev)) {
1733 				/* XXX: this ignores mbuf types */
1734 				memcpy(mtod(mprev, caddr_t) + mprev->m_len,
1735 				       mtod(m, caddr_t), m->m_len);
1736 				mprev->m_len += m->m_len;
1737 				mprev->m_next = m->m_next;	/* unlink from chain */
1738 				m_free(m);			/* reclaim mbuf */
1739 			} else {
1740 				mprev = m;
1741 			}
1742 			continue;
1743 		}
1744 		/*
1745 		 * Writable mbufs are left alone (for now).
1746 		 */
1747 		if (M_WRITABLE(m)) {
1748 			mprev = m;
1749 			continue;
1750 		}
1751 
1752 		/*
1753 		 * Not writable, replace with a copy or coalesce with
1754 		 * the previous mbuf if possible (since we have to copy
1755 		 * it anyway, we try to reduce the number of mbufs and
1756 		 * clusters so that future work is easier).
1757 		 */
1758 		KASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags));
1759 		/* NB: we only coalesce into a cluster or larger */
1760 		if (mprev != NULL && (mprev->m_flags & M_EXT) &&
1761 		    m->m_len <= M_TRAILINGSPACE(mprev)) {
1762 			/* XXX: this ignores mbuf types */
1763 			memcpy(mtod(mprev, caddr_t) + mprev->m_len,
1764 			       mtod(m, caddr_t), m->m_len);
1765 			mprev->m_len += m->m_len;
1766 			mprev->m_next = m->m_next;	/* unlink from chain */
1767 			m_free(m);			/* reclaim mbuf */
1768 			continue;
1769 		}
1770 
1771 		/*
1772 		 * Allocate new space to hold the copy...
1773 		 */
1774 		/* XXX why can M_PKTHDR be set past the first mbuf? */
1775 		if (mprev == NULL && (m->m_flags & M_PKTHDR)) {
1776 			/*
1777 			 * NB: if a packet header is present we must
1778 			 * allocate the mbuf separately from any cluster
1779 			 * because M_MOVE_PKTHDR will smash the data
1780 			 * pointer and drop the M_EXT marker.
1781 			 */
1782 			MGETHDR(n, how, m->m_type);
1783 			if (n == NULL) {
1784 				m_freem(m0);
1785 				return (NULL);
1786 			}
1787 			M_MOVE_PKTHDR(n, m);
1788 			MCLGET(n, how);
1789 			if ((n->m_flags & M_EXT) == 0) {
1790 				m_free(n);
1791 				m_freem(m0);
1792 				return (NULL);
1793 			}
1794 		} else {
1795 			n = m_getcl(how, m->m_type, m->m_flags);
1796 			if (n == NULL) {
1797 				m_freem(m0);
1798 				return (NULL);
1799 			}
1800 		}
1801 		/*
1802 		 * ... and copy the data.  We deal with jumbo mbufs
1803 		 * (i.e. m_len > MCLBYTES) by splitting them into
1804 		 * clusters.  We could just malloc a buffer and make
1805 		 * it external but too many device drivers don't know
1806 		 * how to break up the non-contiguous memory when
1807 		 * doing DMA.
1808 		 */
1809 		len = m->m_len;
1810 		off = 0;
1811 		mfirst = n;
1812 		mlast = NULL;
1813 		for (;;) {
1814 			int cc = min(len, MCLBYTES);
1815 			memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, cc);
1816 			n->m_len = cc;
1817 			if (mlast != NULL)
1818 				mlast->m_next = n;
1819 			mlast = n;
1820 
1821 			len -= cc;
1822 			if (len <= 0)
1823 				break;
1824 			off += cc;
1825 
1826 			n = m_getcl(how, m->m_type, m->m_flags);
1827 			if (n == NULL) {
1828 				m_freem(mfirst);
1829 				m_freem(m0);
1830 				return (NULL);
1831 			}
1832 		}
1833 		n->m_next = m->m_next;
1834 		if (mprev == NULL)
1835 			m0 = mfirst;		/* new head of chain */
1836 		else
1837 			mprev->m_next = mfirst;	/* replace old mbuf */
1838 		m_free(m);			/* release old mbuf */
1839 		mprev = mfirst;
1840 	}
1841 	return (m0);
1842 }
1843 
1844 /*
1845  * Rearrange an mbuf chain so that len bytes are contiguous
1846  * and in the data area of an mbuf (so that mtod will work for a structure
1847  * of size len).  Returns the resulting mbuf chain on success, frees it and
1848  * returns null on failure.  If there is room, it will add up to
1849  * max_protohdr-len extra bytes to the contiguous region in an attempt to
1850  * avoid being called next time.
1851  */
1852 struct mbuf *
1853 m_pullup(struct mbuf *n, int len)
1854 {
1855 	struct mbuf *m;
1856 	int count;
1857 	int space;
1858 
1859 	/*
1860 	 * If first mbuf has no cluster, and has room for len bytes
1861 	 * without shifting current data, pullup into it,
1862 	 * otherwise allocate a new mbuf to prepend to the chain.
1863 	 */
1864 	if (!(n->m_flags & M_EXT) &&
1865 	    n->m_data + len < &n->m_dat[MLEN] &&
1866 	    n->m_next) {
1867 		if (n->m_len >= len)
1868 			return (n);
1869 		m = n;
1870 		n = n->m_next;
1871 		len -= m->m_len;
1872 	} else {
1873 		if (len > MHLEN)
1874 			goto bad;
1875 		if (n->m_flags & M_PKTHDR)
1876 			m = m_gethdr(MB_DONTWAIT, n->m_type);
1877 		else
1878 			m = m_get(MB_DONTWAIT, n->m_type);
1879 		if (m == NULL)
1880 			goto bad;
1881 		m->m_len = 0;
1882 		if (n->m_flags & M_PKTHDR)
1883 			M_MOVE_PKTHDR(m, n);
1884 	}
1885 	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1886 	do {
1887 		count = min(min(max(len, max_protohdr), space), n->m_len);
1888 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1889 		  (unsigned)count);
1890 		len -= count;
1891 		m->m_len += count;
1892 		n->m_len -= count;
1893 		space -= count;
1894 		if (n->m_len)
1895 			n->m_data += count;
1896 		else
1897 			n = m_free(n);
1898 	} while (len > 0 && n);
1899 	if (len > 0) {
1900 		m_free(m);
1901 		goto bad;
1902 	}
1903 	m->m_next = n;
1904 	return (m);
1905 bad:
1906 	m_freem(n);
1907 	++mbstat[mycpu->gd_cpuid].m_mcfail;
1908 	return (NULL);
1909 }
1910 
1911 /*
1912  * Partition an mbuf chain in two pieces, returning the tail --
1913  * all but the first len0 bytes.  In case of failure, it returns NULL and
1914  * attempts to restore the chain to its original state.
1915  *
1916  * Note that the resulting mbufs might be read-only, because the new
1917  * mbuf can end up sharing an mbuf cluster with the original mbuf if
1918  * the "breaking point" happens to lie within a cluster mbuf. Use the
1919  * M_WRITABLE() macro to check for this case.
1920  */
1921 struct mbuf *
1922 m_split(struct mbuf *m0, int len0, int wait)
1923 {
1924 	struct mbuf *m, *n;
1925 	unsigned len = len0, remain;
1926 
1927 	for (m = m0; m && len > m->m_len; m = m->m_next)
1928 		len -= m->m_len;
1929 	if (m == NULL)
1930 		return (NULL);
1931 	remain = m->m_len - len;
1932 	if (m0->m_flags & M_PKTHDR) {
1933 		n = m_gethdr(wait, m0->m_type);
1934 		if (n == NULL)
1935 			return (NULL);
1936 		n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1937 		n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1938 		m0->m_pkthdr.len = len0;
1939 		if (m->m_flags & M_EXT)
1940 			goto extpacket;
1941 		if (remain > MHLEN) {
1942 			/* m can't be the lead packet */
1943 			MH_ALIGN(n, 0);
1944 			n->m_next = m_split(m, len, wait);
1945 			if (n->m_next == NULL) {
1946 				m_free(n);
1947 				return (NULL);
1948 			} else {
1949 				n->m_len = 0;
1950 				return (n);
1951 			}
1952 		} else
1953 			MH_ALIGN(n, remain);
1954 	} else if (remain == 0) {
1955 		n = m->m_next;
1956 		m->m_next = 0;
1957 		return (n);
1958 	} else {
1959 		n = m_get(wait, m->m_type);
1960 		if (n == NULL)
1961 			return (NULL);
1962 		M_ALIGN(n, remain);
1963 	}
1964 extpacket:
1965 	if (m->m_flags & M_EXT) {
1966 		KKASSERT((n->m_flags & M_EXT) == 0);
1967 		n->m_data = m->m_data + len;
1968 		m->m_ext.ext_ref(m->m_ext.ext_arg);
1969 		n->m_ext = m->m_ext;
1970 		n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1971 	} else {
1972 		bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1973 	}
1974 	n->m_len = remain;
1975 	m->m_len = len;
1976 	n->m_next = m->m_next;
1977 	m->m_next = 0;
1978 	return (n);
1979 }
1980 
1981 /*
1982  * Routine to copy from device local memory into mbufs.
1983  * Note: "offset" is ill-defined and always called as 0, so ignore it.
1984  */
1985 struct mbuf *
1986 m_devget(char *buf, int len, int offset, struct ifnet *ifp,
1987     void (*copy)(volatile const void *from, volatile void *to, size_t length))
1988 {
1989 	struct mbuf *m, *mfirst = NULL, **mtail;
1990 	int nsize, flags;
1991 
1992 	if (copy == NULL)
1993 		copy = bcopy;
1994 	mtail = &mfirst;
1995 	flags = M_PKTHDR;
1996 
1997 	while (len > 0) {
1998 		m = m_getl(len, MB_DONTWAIT, MT_DATA, flags, &nsize);
1999 		if (m == NULL) {
2000 			m_freem(mfirst);
2001 			return (NULL);
2002 		}
2003 		m->m_len = min(len, nsize);
2004 
2005 		if (flags & M_PKTHDR) {
2006 			if (len + max_linkhdr <= nsize)
2007 				m->m_data += max_linkhdr;
2008 			m->m_pkthdr.rcvif = ifp;
2009 			m->m_pkthdr.len = len;
2010 			flags = 0;
2011 		}
2012 
2013 		copy(buf, m->m_data, (unsigned)m->m_len);
2014 		buf += m->m_len;
2015 		len -= m->m_len;
2016 		*mtail = m;
2017 		mtail = &m->m_next;
2018 	}
2019 
2020 	return (mfirst);
2021 }
2022 
2023 /*
2024  * Routine to pad mbuf to the specified length 'padto'.
2025  */
2026 int
2027 m_devpad(struct mbuf *m, int padto)
2028 {
2029 	struct mbuf *last = NULL;
2030 	int padlen;
2031 
2032 	if (padto <= m->m_pkthdr.len)
2033 		return 0;
2034 
2035 	padlen = padto - m->m_pkthdr.len;
2036 
2037 	/* if there's only the packet-header and we can pad there, use it. */
2038 	if (m->m_pkthdr.len == m->m_len && M_TRAILINGSPACE(m) >= padlen) {
2039 		last = m;
2040 	} else {
2041 		/*
2042 		 * Walk packet chain to find last mbuf. We will either
2043 		 * pad there, or append a new mbuf and pad it
2044 		 */
2045 		for (last = m; last->m_next != NULL; last = last->m_next)
2046 			; /* EMPTY */
2047 
2048 		/* `last' now points to last in chain. */
2049 		if (M_TRAILINGSPACE(last) < padlen) {
2050 			struct mbuf *n;
2051 
2052 			/* Allocate new empty mbuf, pad it.  Compact later. */
2053 			MGET(n, MB_DONTWAIT, MT_DATA);
2054 			if (n == NULL)
2055 				return ENOBUFS;
2056 			n->m_len = 0;
2057 			last->m_next = n;
2058 			last = n;
2059 		}
2060 	}
2061 	KKASSERT(M_TRAILINGSPACE(last) >= padlen);
2062 	KKASSERT(M_WRITABLE(last));
2063 
2064 	/* Now zero the pad area */
2065 	bzero(mtod(last, char *) + last->m_len, padlen);
2066 	last->m_len += padlen;
2067 	m->m_pkthdr.len += padlen;
2068 	return 0;
2069 }
2070 
2071 /*
2072  * Copy data from a buffer back into the indicated mbuf chain,
2073  * starting "off" bytes from the beginning, extending the mbuf
2074  * chain if necessary.
2075  */
2076 void
2077 m_copyback(struct mbuf *m0, int off, int len, caddr_t cp)
2078 {
2079 	int mlen;
2080 	struct mbuf *m = m0, *n;
2081 	int totlen = 0;
2082 
2083 	if (m0 == NULL)
2084 		return;
2085 	while (off > (mlen = m->m_len)) {
2086 		off -= mlen;
2087 		totlen += mlen;
2088 		if (m->m_next == NULL) {
2089 			n = m_getclr(MB_DONTWAIT, m->m_type);
2090 			if (n == NULL)
2091 				goto out;
2092 			n->m_len = min(MLEN, len + off);
2093 			m->m_next = n;
2094 		}
2095 		m = m->m_next;
2096 	}
2097 	while (len > 0) {
2098 		mlen = min (m->m_len - off, len);
2099 		bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
2100 		cp += mlen;
2101 		len -= mlen;
2102 		mlen += off;
2103 		off = 0;
2104 		totlen += mlen;
2105 		if (len == 0)
2106 			break;
2107 		if (m->m_next == NULL) {
2108 			n = m_get(MB_DONTWAIT, m->m_type);
2109 			if (n == NULL)
2110 				break;
2111 			n->m_len = min(MLEN, len);
2112 			m->m_next = n;
2113 		}
2114 		m = m->m_next;
2115 	}
2116 out:	if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
2117 		m->m_pkthdr.len = totlen;
2118 }
2119 
2120 /*
2121  * Append the specified data to the indicated mbuf chain,
2122  * Extend the mbuf chain if the new data does not fit in
2123  * existing space.
2124  *
2125  * Return 1 if able to complete the job; otherwise 0.
2126  */
2127 int
2128 m_append(struct mbuf *m0, int len, c_caddr_t cp)
2129 {
2130 	struct mbuf *m, *n;
2131 	int remainder, space;
2132 
2133 	for (m = m0; m->m_next != NULL; m = m->m_next)
2134 		;
2135 	remainder = len;
2136 	space = M_TRAILINGSPACE(m);
2137 	if (space > 0) {
2138 		/*
2139 		 * Copy into available space.
2140 		 */
2141 		if (space > remainder)
2142 			space = remainder;
2143 		bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
2144 		m->m_len += space;
2145 		cp += space, remainder -= space;
2146 	}
2147 	while (remainder > 0) {
2148 		/*
2149 		 * Allocate a new mbuf; could check space
2150 		 * and allocate a cluster instead.
2151 		 */
2152 		n = m_get(MB_DONTWAIT, m->m_type);
2153 		if (n == NULL)
2154 			break;
2155 		n->m_len = min(MLEN, remainder);
2156 		bcopy(cp, mtod(n, caddr_t), n->m_len);
2157 		cp += n->m_len, remainder -= n->m_len;
2158 		m->m_next = n;
2159 		m = n;
2160 	}
2161 	if (m0->m_flags & M_PKTHDR)
2162 		m0->m_pkthdr.len += len - remainder;
2163 	return (remainder == 0);
2164 }
2165 
2166 /*
2167  * Apply function f to the data in an mbuf chain starting "off" bytes from
2168  * the beginning, continuing for "len" bytes.
2169  */
2170 int
2171 m_apply(struct mbuf *m, int off, int len,
2172     int (*f)(void *, void *, u_int), void *arg)
2173 {
2174 	u_int count;
2175 	int rval;
2176 
2177 	KASSERT(off >= 0, ("m_apply, negative off %d", off));
2178 	KASSERT(len >= 0, ("m_apply, negative len %d", len));
2179 	while (off > 0) {
2180 		KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
2181 		if (off < m->m_len)
2182 			break;
2183 		off -= m->m_len;
2184 		m = m->m_next;
2185 	}
2186 	while (len > 0) {
2187 		KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
2188 		count = min(m->m_len - off, len);
2189 		rval = (*f)(arg, mtod(m, caddr_t) + off, count);
2190 		if (rval)
2191 			return (rval);
2192 		len -= count;
2193 		off = 0;
2194 		m = m->m_next;
2195 	}
2196 	return (0);
2197 }
2198 
2199 /*
2200  * Return a pointer to mbuf/offset of location in mbuf chain.
2201  */
2202 struct mbuf *
2203 m_getptr(struct mbuf *m, int loc, int *off)
2204 {
2205 
2206 	while (loc >= 0) {
2207 		/* Normal end of search. */
2208 		if (m->m_len > loc) {
2209 			*off = loc;
2210 			return (m);
2211 		} else {
2212 			loc -= m->m_len;
2213 			if (m->m_next == NULL) {
2214 				if (loc == 0) {
2215 					/* Point at the end of valid data. */
2216 					*off = m->m_len;
2217 					return (m);
2218 				}
2219 				return (NULL);
2220 			}
2221 			m = m->m_next;
2222 		}
2223 	}
2224 	return (NULL);
2225 }
2226 
2227 void
2228 m_print(const struct mbuf *m)
2229 {
2230 	int len;
2231 	const struct mbuf *m2;
2232 
2233 	len = m->m_pkthdr.len;
2234 	m2 = m;
2235 	while (len) {
2236 		kprintf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-");
2237 		len -= m2->m_len;
2238 		m2 = m2->m_next;
2239 	}
2240 	return;
2241 }
2242 
2243 /*
2244  * "Move" mbuf pkthdr from "from" to "to".
2245  * "from" must have M_PKTHDR set, and "to" must be empty.
2246  */
2247 void
2248 m_move_pkthdr(struct mbuf *to, struct mbuf *from)
2249 {
2250 	KASSERT((to->m_flags & M_PKTHDR), ("m_move_pkthdr: not packet header"));
2251 
2252 	to->m_flags |= from->m_flags & M_COPYFLAGS;
2253 	to->m_pkthdr = from->m_pkthdr;		/* especially tags */
2254 	SLIST_INIT(&from->m_pkthdr.tags);	/* purge tags from src */
2255 }
2256 
2257 /*
2258  * Duplicate "from"'s mbuf pkthdr in "to".
2259  * "from" must have M_PKTHDR set, and "to" must be empty.
2260  * In particular, this does a deep copy of the packet tags.
2261  */
2262 int
2263 m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how)
2264 {
2265 	KASSERT((to->m_flags & M_PKTHDR), ("m_dup_pkthdr: not packet header"));
2266 
2267 	to->m_flags = (from->m_flags & M_COPYFLAGS) |
2268 		      (to->m_flags & ~M_COPYFLAGS);
2269 	to->m_pkthdr = from->m_pkthdr;
2270 	SLIST_INIT(&to->m_pkthdr.tags);
2271 	return (m_tag_copy_chain(to, from, how));
2272 }
2273 
2274 /*
2275  * Defragment a mbuf chain, returning the shortest possible
2276  * chain of mbufs and clusters.  If allocation fails and
2277  * this cannot be completed, NULL will be returned, but
2278  * the passed in chain will be unchanged.  Upon success,
2279  * the original chain will be freed, and the new chain
2280  * will be returned.
2281  *
2282  * If a non-packet header is passed in, the original
2283  * mbuf (chain?) will be returned unharmed.
2284  *
2285  * m_defrag_nofree doesn't free the passed in mbuf.
2286  */
2287 struct mbuf *
2288 m_defrag(struct mbuf *m0, int how)
2289 {
2290 	struct mbuf *m_new;
2291 
2292 	if ((m_new = m_defrag_nofree(m0, how)) == NULL)
2293 		return (NULL);
2294 	if (m_new != m0)
2295 		m_freem(m0);
2296 	return (m_new);
2297 }
2298 
2299 struct mbuf *
2300 m_defrag_nofree(struct mbuf *m0, int how)
2301 {
2302 	struct mbuf	*m_new = NULL, *m_final = NULL;
2303 	int		progress = 0, length, nsize;
2304 
2305 	if (!(m0->m_flags & M_PKTHDR))
2306 		return (m0);
2307 
2308 #ifdef MBUF_STRESS_TEST
2309 	if (m_defragrandomfailures) {
2310 		int temp = karc4random() & 0xff;
2311 		if (temp == 0xba)
2312 			goto nospace;
2313 	}
2314 #endif
2315 
2316 	m_final = m_getl(m0->m_pkthdr.len, how, MT_DATA, M_PKTHDR, &nsize);
2317 	if (m_final == NULL)
2318 		goto nospace;
2319 	m_final->m_len = 0;	/* in case m0->m_pkthdr.len is zero */
2320 
2321 	if (m_dup_pkthdr(m_final, m0, how) == 0)
2322 		goto nospace;
2323 
2324 	m_new = m_final;
2325 
2326 	while (progress < m0->m_pkthdr.len) {
2327 		length = m0->m_pkthdr.len - progress;
2328 		if (length > MCLBYTES)
2329 			length = MCLBYTES;
2330 
2331 		if (m_new == NULL) {
2332 			m_new = m_getl(length, how, MT_DATA, 0, &nsize);
2333 			if (m_new == NULL)
2334 				goto nospace;
2335 		}
2336 
2337 		m_copydata(m0, progress, length, mtod(m_new, caddr_t));
2338 		progress += length;
2339 		m_new->m_len = length;
2340 		if (m_new != m_final)
2341 			m_cat(m_final, m_new);
2342 		m_new = NULL;
2343 	}
2344 	if (m0->m_next == NULL)
2345 		m_defraguseless++;
2346 	m_defragpackets++;
2347 	m_defragbytes += m_final->m_pkthdr.len;
2348 	return (m_final);
2349 nospace:
2350 	m_defragfailure++;
2351 	if (m_new)
2352 		m_free(m_new);
2353 	m_freem(m_final);
2354 	return (NULL);
2355 }
2356 
2357 /*
2358  * Move data from uio into mbufs.
2359  */
2360 struct mbuf *
2361 m_uiomove(struct uio *uio)
2362 {
2363 	struct mbuf *m;			/* current working mbuf */
2364 	struct mbuf *head = NULL;	/* result mbuf chain */
2365 	struct mbuf **mp = &head;
2366 	int flags = M_PKTHDR;
2367 	int nsize;
2368 	int error;
2369 	int resid;
2370 
2371 	do {
2372 		if (uio->uio_resid > INT_MAX)
2373 			resid = INT_MAX;
2374 		else
2375 			resid = (int)uio->uio_resid;
2376 		m = m_getl(resid, MB_WAIT, MT_DATA, flags, &nsize);
2377 		if (flags) {
2378 			m->m_pkthdr.len = 0;
2379 			/* Leave room for protocol headers. */
2380 			if (resid < MHLEN)
2381 				MH_ALIGN(m, resid);
2382 			flags = 0;
2383 		}
2384 		m->m_len = imin(nsize, resid);
2385 		error = uiomove(mtod(m, caddr_t), m->m_len, uio);
2386 		if (error) {
2387 			m_free(m);
2388 			goto failed;
2389 		}
2390 		*mp = m;
2391 		mp = &m->m_next;
2392 		head->m_pkthdr.len += m->m_len;
2393 	} while (uio->uio_resid > 0);
2394 
2395 	return (head);
2396 
2397 failed:
2398 	m_freem(head);
2399 	return (NULL);
2400 }
2401 
2402 struct mbuf *
2403 m_last(struct mbuf *m)
2404 {
2405 	while (m->m_next)
2406 		m = m->m_next;
2407 	return (m);
2408 }
2409 
2410 /*
2411  * Return the number of bytes in an mbuf chain.
2412  * If lastm is not NULL, also return the last mbuf.
2413  */
2414 u_int
2415 m_lengthm(struct mbuf *m, struct mbuf **lastm)
2416 {
2417 	u_int len = 0;
2418 	struct mbuf *prev = m;
2419 
2420 	while (m) {
2421 		len += m->m_len;
2422 		prev = m;
2423 		m = m->m_next;
2424 	}
2425 	if (lastm != NULL)
2426 		*lastm = prev;
2427 	return (len);
2428 }
2429 
2430 /*
2431  * Like m_lengthm(), except also keep track of mbuf usage.
2432  */
2433 u_int
2434 m_countm(struct mbuf *m, struct mbuf **lastm, u_int *pmbcnt)
2435 {
2436 	u_int len = 0, mbcnt = 0;
2437 	struct mbuf *prev = m;
2438 
2439 	while (m) {
2440 		len += m->m_len;
2441 		mbcnt += MSIZE;
2442 		if (m->m_flags & M_EXT)
2443 			mbcnt += m->m_ext.ext_size;
2444 		prev = m;
2445 		m = m->m_next;
2446 	}
2447 	if (lastm != NULL)
2448 		*lastm = prev;
2449 	*pmbcnt = mbcnt;
2450 	return (len);
2451 }
2452