xref: /freebsd/sys/kern/kern_mbuf.c (revision 39beb93c)
1 /*-
2  * Copyright (c) 2004, 2005,
3  * 	Bosko Milekic <bmilekic@FreeBSD.org>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_mac.h"
32 #include "opt_param.h"
33 
34 #include <sys/param.h>
35 #include <sys/malloc.h>
36 #include <sys/systm.h>
37 #include <sys/mbuf.h>
38 #include <sys/domain.h>
39 #include <sys/eventhandler.h>
40 #include <sys/kernel.h>
41 #include <sys/protosw.h>
42 #include <sys/smp.h>
43 #include <sys/sysctl.h>
44 
45 #include <security/mac/mac_framework.h>
46 
47 #include <vm/vm.h>
48 #include <vm/vm_page.h>
49 #include <vm/uma.h>
50 #include <vm/uma_int.h>
51 #include <vm/uma_dbg.h>
52 
53 /*
54  * In FreeBSD, Mbufs and Mbuf Clusters are allocated from UMA
55  * Zones.
56  *
57  * Mbuf Clusters (2K, contiguous) are allocated from the Cluster
58  * Zone.  The Zone can be capped at kern.ipc.nmbclusters, if the
59  * administrator so desires.
60  *
61  * Mbufs are allocated from a UMA Master Zone called the Mbuf
62  * Zone.
63  *
64  * Additionally, FreeBSD provides a Packet Zone, which it
65  * configures as a Secondary Zone to the Mbuf Master Zone,
66  * thus sharing backend Slab kegs with the Mbuf Master Zone.
67  *
68  * Thus common-case allocations and locking are simplified:
69  *
70  *  m_clget()                m_getcl()
71  *    |                         |
72  *    |   .------------>[(Packet Cache)]    m_get(), m_gethdr()
73  *    |   |             [     Packet   ]            |
74  *  [(Cluster Cache)]   [    Secondary ]   [ (Mbuf Cache)     ]
75  *  [ Cluster Zone  ]   [     Zone     ]   [ Mbuf Master Zone ]
76  *        |                       \________         |
77  *  [ Cluster Keg   ]                      \       /
78  *        |    	                         [ Mbuf Keg   ]
79  *  [ Cluster Slabs ]                         |
80  *        |                              [ Mbuf Slabs ]
81  *         \____________(VM)_________________/
82  *
83  *
84  * Whenever an object is allocated with uma_zalloc() out of
85  * one of the Zones its _ctor_ function is executed.  The same
86  * for any deallocation through uma_zfree() the _dtor_ function
87  * is executed.
88  *
89  * Caches are per-CPU and are filled from the Master Zone.
90  *
91  * Whenever an object is allocated from the underlying global
92  * memory pool it gets pre-initialized with the _zinit_ functions.
93  * When the Keg's are overfull objects get decomissioned with
94  * _zfini_ functions and free'd back to the global memory pool.
95  *
96  */
97 
98 int nmbclusters;		/* limits number of mbuf clusters */
99 int nmbjumbop;			/* limits number of page size jumbo clusters */
100 int nmbjumbo9;			/* limits number of 9k jumbo clusters */
101 int nmbjumbo16;			/* limits number of 16k jumbo clusters */
102 struct mbstat mbstat;
103 
104 /*
105  * tunable_mbinit() has to be run before init_maxsockets() thus
106  * the SYSINIT order below is SI_ORDER_MIDDLE while init_maxsockets()
107  * runs at SI_ORDER_ANY.
108  */
109 static void
110 tunable_mbinit(void *dummy)
111 {
112 	TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
113 
114 	/* This has to be done before VM init. */
115 	if (nmbclusters == 0)
116 		nmbclusters = 1024 + maxusers * 64;
117 	nmbjumbop = nmbclusters / 2;
118 	nmbjumbo9 = nmbjumbop / 2;
119 	nmbjumbo16 = nmbjumbo9 / 2;
120 }
121 SYSINIT(tunable_mbinit, SI_SUB_TUNABLES, SI_ORDER_MIDDLE, tunable_mbinit, NULL);
122 
123 static int
124 sysctl_nmbclusters(SYSCTL_HANDLER_ARGS)
125 {
126 	int error, newnmbclusters;
127 
128 	newnmbclusters = nmbclusters;
129 	error = sysctl_handle_int(oidp, &newnmbclusters, 0, req);
130 	if (error == 0 && req->newptr) {
131 		if (newnmbclusters > nmbclusters) {
132 			nmbclusters = newnmbclusters;
133 			uma_zone_set_max(zone_clust, nmbclusters);
134 			EVENTHANDLER_INVOKE(nmbclusters_change);
135 		} else
136 			error = EINVAL;
137 	}
138 	return (error);
139 }
140 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbclusters, CTLTYPE_INT|CTLFLAG_RW,
141 &nmbclusters, 0, sysctl_nmbclusters, "IU",
142     "Maximum number of mbuf clusters allowed");
143 
144 static int
145 sysctl_nmbjumbop(SYSCTL_HANDLER_ARGS)
146 {
147 	int error, newnmbjumbop;
148 
149 	newnmbjumbop = nmbjumbop;
150 	error = sysctl_handle_int(oidp, &newnmbjumbop, 0, req);
151 	if (error == 0 && req->newptr) {
152 		if (newnmbjumbop> nmbjumbop) {
153 			nmbjumbop = newnmbjumbop;
154 			uma_zone_set_max(zone_jumbop, nmbjumbop);
155 		} else
156 			error = EINVAL;
157 	}
158 	return (error);
159 }
160 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbop, CTLTYPE_INT|CTLFLAG_RW,
161 &nmbjumbop, 0, sysctl_nmbjumbop, "IU",
162 	 "Maximum number of mbuf page size jumbo clusters allowed");
163 
164 
165 static int
166 sysctl_nmbjumbo9(SYSCTL_HANDLER_ARGS)
167 {
168 	int error, newnmbjumbo9;
169 
170 	newnmbjumbo9 = nmbjumbo9;
171 	error = sysctl_handle_int(oidp, &newnmbjumbo9, 0, req);
172 	if (error == 0 && req->newptr) {
173 		if (newnmbjumbo9> nmbjumbo9) {
174 			nmbjumbo9 = newnmbjumbo9;
175 			uma_zone_set_max(zone_jumbo9, nmbjumbo9);
176 		} else
177 			error = EINVAL;
178 	}
179 	return (error);
180 }
181 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo9, CTLTYPE_INT|CTLFLAG_RW,
182 &nmbjumbo9, 0, sysctl_nmbjumbo9, "IU",
183 	"Maximum number of mbuf 9k jumbo clusters allowed");
184 
185 static int
186 sysctl_nmbjumbo16(SYSCTL_HANDLER_ARGS)
187 {
188 	int error, newnmbjumbo16;
189 
190 	newnmbjumbo16 = nmbjumbo16;
191 	error = sysctl_handle_int(oidp, &newnmbjumbo16, 0, req);
192 	if (error == 0 && req->newptr) {
193 		if (newnmbjumbo16> nmbjumbo16) {
194 			nmbjumbo16 = newnmbjumbo16;
195 			uma_zone_set_max(zone_jumbo16, nmbjumbo16);
196 		} else
197 			error = EINVAL;
198 	}
199 	return (error);
200 }
201 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo16, CTLTYPE_INT|CTLFLAG_RW,
202 &nmbjumbo16, 0, sysctl_nmbjumbo16, "IU",
203     "Maximum number of mbuf 16k jumbo clusters allowed");
204 
205 
206 
207 SYSCTL_STRUCT(_kern_ipc, OID_AUTO, mbstat, CTLFLAG_RD, &mbstat, mbstat,
208     "Mbuf general information and statistics");
209 
210 /*
211  * Zones from which we allocate.
212  */
213 uma_zone_t	zone_mbuf;
214 uma_zone_t	zone_clust;
215 uma_zone_t	zone_pack;
216 uma_zone_t	zone_jumbop;
217 uma_zone_t	zone_jumbo9;
218 uma_zone_t	zone_jumbo16;
219 uma_zone_t	zone_ext_refcnt;
220 
221 /*
222  * Local prototypes.
223  */
224 static int	mb_ctor_mbuf(void *, int, void *, int);
225 static int	mb_ctor_clust(void *, int, void *, int);
226 static int	mb_ctor_pack(void *, int, void *, int);
227 static void	mb_dtor_mbuf(void *, int, void *);
228 static void	mb_dtor_clust(void *, int, void *);
229 static void	mb_dtor_pack(void *, int, void *);
230 static int	mb_zinit_pack(void *, int, int);
231 static void	mb_zfini_pack(void *, int);
232 
233 static void	mb_reclaim(void *);
234 static void	mbuf_init(void *);
235 static void    *mbuf_jumbo_alloc(uma_zone_t, int, u_int8_t *, int);
236 static void	mbuf_jumbo_free(void *, int, u_int8_t);
237 
238 static MALLOC_DEFINE(M_JUMBOFRAME, "jumboframes", "mbuf jumbo frame buffers");
239 
240 /* Ensure that MSIZE doesn't break dtom() - it must be a power of 2 */
241 CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE);
242 
243 /*
244  * Initialize FreeBSD Network buffer allocation.
245  */
246 SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL);
247 static void
248 mbuf_init(void *dummy)
249 {
250 
251 	/*
252 	 * Configure UMA zones for Mbufs, Clusters, and Packets.
253 	 */
254 	zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE,
255 	    mb_ctor_mbuf, mb_dtor_mbuf,
256 #ifdef INVARIANTS
257 	    trash_init, trash_fini,
258 #else
259 	    NULL, NULL,
260 #endif
261 	    MSIZE - 1, UMA_ZONE_MAXBUCKET);
262 
263 	zone_clust = uma_zcreate(MBUF_CLUSTER_MEM_NAME, MCLBYTES,
264 	    mb_ctor_clust, mb_dtor_clust,
265 #ifdef INVARIANTS
266 	    trash_init, trash_fini,
267 #else
268 	    NULL, NULL,
269 #endif
270 	    UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
271 	if (nmbclusters > 0)
272 		uma_zone_set_max(zone_clust, nmbclusters);
273 
274 	zone_pack = uma_zsecond_create(MBUF_PACKET_MEM_NAME, mb_ctor_pack,
275 	    mb_dtor_pack, mb_zinit_pack, mb_zfini_pack, zone_mbuf);
276 
277 	/* Make jumbo frame zone too. Page size, 9k and 16k. */
278 	zone_jumbop = uma_zcreate(MBUF_JUMBOP_MEM_NAME, MJUMPAGESIZE,
279 	    mb_ctor_clust, mb_dtor_clust,
280 #ifdef INVARIANTS
281 	    trash_init, trash_fini,
282 #else
283 	    NULL, NULL,
284 #endif
285 	    UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
286 	if (nmbjumbop > 0)
287 		uma_zone_set_max(zone_jumbop, nmbjumbop);
288 
289 	zone_jumbo9 = uma_zcreate(MBUF_JUMBO9_MEM_NAME, MJUM9BYTES,
290 	    mb_ctor_clust, mb_dtor_clust,
291 #ifdef INVARIANTS
292 	    trash_init, trash_fini,
293 #else
294 	    NULL, NULL,
295 #endif
296 	    UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
297 	if (nmbjumbo9 > 0)
298 		uma_zone_set_max(zone_jumbo9, nmbjumbo9);
299 	uma_zone_set_allocf(zone_jumbo9, mbuf_jumbo_alloc);
300 	uma_zone_set_freef(zone_jumbo9, mbuf_jumbo_free);
301 
302 	zone_jumbo16 = uma_zcreate(MBUF_JUMBO16_MEM_NAME, MJUM16BYTES,
303 	    mb_ctor_clust, mb_dtor_clust,
304 #ifdef INVARIANTS
305 	    trash_init, trash_fini,
306 #else
307 	    NULL, NULL,
308 #endif
309 	    UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
310 	if (nmbjumbo16 > 0)
311 		uma_zone_set_max(zone_jumbo16, nmbjumbo16);
312 	uma_zone_set_allocf(zone_jumbo16, mbuf_jumbo_alloc);
313 	uma_zone_set_freef(zone_jumbo16, mbuf_jumbo_free);
314 
315 	zone_ext_refcnt = uma_zcreate(MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int),
316 	    NULL, NULL,
317 	    NULL, NULL,
318 	    UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
319 
320 	/* uma_prealloc() goes here... */
321 
322 	/*
323 	 * Hook event handler for low-memory situation, used to
324 	 * drain protocols and push data back to the caches (UMA
325 	 * later pushes it back to VM).
326 	 */
327 	EVENTHANDLER_REGISTER(vm_lowmem, mb_reclaim, NULL,
328 	    EVENTHANDLER_PRI_FIRST);
329 
330 	/*
331 	 * [Re]set counters and local statistics knobs.
332 	 * XXX Some of these should go and be replaced, but UMA stat
333 	 * gathering needs to be revised.
334 	 */
335 	mbstat.m_mbufs = 0;
336 	mbstat.m_mclusts = 0;
337 	mbstat.m_drain = 0;
338 	mbstat.m_msize = MSIZE;
339 	mbstat.m_mclbytes = MCLBYTES;
340 	mbstat.m_minclsize = MINCLSIZE;
341 	mbstat.m_mlen = MLEN;
342 	mbstat.m_mhlen = MHLEN;
343 	mbstat.m_numtypes = MT_NTYPES;
344 
345 	mbstat.m_mcfail = mbstat.m_mpfail = 0;
346 	mbstat.sf_iocnt = 0;
347 	mbstat.sf_allocwait = mbstat.sf_allocfail = 0;
348 }
349 
350 /*
351  * UMA backend page allocator for the jumbo frame zones.
352  *
353  * Allocates kernel virtual memory that is backed by contiguous physical
354  * pages.
355  */
356 static void *
357 mbuf_jumbo_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
358 {
359 
360 	/* Inform UMA that this allocator uses kernel_map/object. */
361 	*flags = UMA_SLAB_KERNEL;
362 	return (contigmalloc(bytes, M_JUMBOFRAME, wait, (vm_paddr_t)0,
363 	    ~(vm_paddr_t)0, 1, 0));
364 }
365 
366 /*
367  * UMA backend page deallocator for the jumbo frame zones.
368  */
369 static void
370 mbuf_jumbo_free(void *mem, int size, u_int8_t flags)
371 {
372 
373 	contigfree(mem, size, M_JUMBOFRAME);
374 }
375 
376 /*
377  * Constructor for Mbuf master zone.
378  *
379  * The 'arg' pointer points to a mb_args structure which
380  * contains call-specific information required to support the
381  * mbuf allocation API.  See mbuf.h.
382  */
383 static int
384 mb_ctor_mbuf(void *mem, int size, void *arg, int how)
385 {
386 	struct mbuf *m;
387 	struct mb_args *args;
388 #ifdef MAC
389 	int error;
390 #endif
391 	int flags;
392 	short type;
393 
394 #ifdef INVARIANTS
395 	trash_ctor(mem, size, arg, how);
396 #endif
397 	m = (struct mbuf *)mem;
398 	args = (struct mb_args *)arg;
399 	flags = args->flags;
400 	type = args->type;
401 
402 	/*
403 	 * The mbuf is initialized later.  The caller has the
404 	 * responsibility to set up any MAC labels too.
405 	 */
406 	if (type == MT_NOINIT)
407 		return (0);
408 
409 	m->m_next = NULL;
410 	m->m_nextpkt = NULL;
411 	m->m_len = 0;
412 	m->m_flags = flags;
413 	m->m_type = type;
414 	if (flags & M_PKTHDR) {
415 		m->m_data = m->m_pktdat;
416 		m->m_pkthdr.rcvif = NULL;
417 		m->m_pkthdr.header = NULL;
418 		m->m_pkthdr.len = 0;
419 		m->m_pkthdr.csum_flags = 0;
420 		m->m_pkthdr.csum_data = 0;
421 		m->m_pkthdr.tso_segsz = 0;
422 		m->m_pkthdr.ether_vtag = 0;
423 		m->m_pkthdr.flowid = 0;
424 		SLIST_INIT(&m->m_pkthdr.tags);
425 #ifdef MAC
426 		/* If the label init fails, fail the alloc */
427 		error = mac_mbuf_init(m, how);
428 		if (error)
429 			return (error);
430 #endif
431 	} else
432 		m->m_data = m->m_dat;
433 	return (0);
434 }
435 
436 /*
437  * The Mbuf master zone destructor.
438  */
439 static void
440 mb_dtor_mbuf(void *mem, int size, void *arg)
441 {
442 	struct mbuf *m;
443 	unsigned long flags;
444 
445 	m = (struct mbuf *)mem;
446 	flags = (unsigned long)arg;
447 
448 	if ((flags & MB_NOTAGS) == 0 && (m->m_flags & M_PKTHDR) != 0)
449 		m_tag_delete_chain(m, NULL);
450 	KASSERT((m->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__));
451 	KASSERT((m->m_flags & M_NOFREE) == 0, ("%s: M_NOFREE set", __func__));
452 #ifdef INVARIANTS
453 	trash_dtor(mem, size, arg);
454 #endif
455 }
456 
457 /*
458  * The Mbuf Packet zone destructor.
459  */
460 static void
461 mb_dtor_pack(void *mem, int size, void *arg)
462 {
463 	struct mbuf *m;
464 
465 	m = (struct mbuf *)mem;
466 	if ((m->m_flags & M_PKTHDR) != 0)
467 		m_tag_delete_chain(m, NULL);
468 
469 	/* Make sure we've got a clean cluster back. */
470 	KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
471 	KASSERT(m->m_ext.ext_buf != NULL, ("%s: ext_buf == NULL", __func__));
472 	KASSERT(m->m_ext.ext_free == NULL, ("%s: ext_free != NULL", __func__));
473 	KASSERT(m->m_ext.ext_arg1 == NULL, ("%s: ext_arg1 != NULL", __func__));
474 	KASSERT(m->m_ext.ext_arg2 == NULL, ("%s: ext_arg2 != NULL", __func__));
475 	KASSERT(m->m_ext.ext_size == MCLBYTES, ("%s: ext_size != MCLBYTES", __func__));
476 	KASSERT(m->m_ext.ext_type == EXT_PACKET, ("%s: ext_type != EXT_PACKET", __func__));
477 	KASSERT(*m->m_ext.ref_cnt == 1, ("%s: ref_cnt != 1", __func__));
478 #ifdef INVARIANTS
479 	trash_dtor(m->m_ext.ext_buf, MCLBYTES, arg);
480 #endif
481 	/*
482 	 * If there are processes blocked on zone_clust, waiting for pages
483 	 * to be freed up, * cause them to be woken up by draining the
484 	 * packet zone.  We are exposed to a race here * (in the check for
485 	 * the UMA_ZFLAG_FULL) where we might miss the flag set, but that
486 	 * is deliberate. We don't want to acquire the zone lock for every
487 	 * mbuf free.
488 	 */
489 	if (uma_zone_exhausted_nolock(zone_clust))
490 		zone_drain(zone_pack);
491 }
492 
493 /*
494  * The Cluster and Jumbo[PAGESIZE|9|16] zone constructor.
495  *
496  * Here the 'arg' pointer points to the Mbuf which we
497  * are configuring cluster storage for.  If 'arg' is
498  * empty we allocate just the cluster without setting
499  * the mbuf to it.  See mbuf.h.
500  */
501 static int
502 mb_ctor_clust(void *mem, int size, void *arg, int how)
503 {
504 	struct mbuf *m;
505 	u_int *refcnt;
506 	int type;
507 	uma_zone_t zone;
508 
509 #ifdef INVARIANTS
510 	trash_ctor(mem, size, arg, how);
511 #endif
512 	switch (size) {
513 	case MCLBYTES:
514 		type = EXT_CLUSTER;
515 		zone = zone_clust;
516 		break;
517 #if MJUMPAGESIZE != MCLBYTES
518 	case MJUMPAGESIZE:
519 		type = EXT_JUMBOP;
520 		zone = zone_jumbop;
521 		break;
522 #endif
523 	case MJUM9BYTES:
524 		type = EXT_JUMBO9;
525 		zone = zone_jumbo9;
526 		break;
527 	case MJUM16BYTES:
528 		type = EXT_JUMBO16;
529 		zone = zone_jumbo16;
530 		break;
531 	default:
532 		panic("unknown cluster size");
533 		break;
534 	}
535 
536 	m = (struct mbuf *)arg;
537 	refcnt = uma_find_refcnt(zone, mem);
538 	*refcnt = 1;
539 	if (m != NULL) {
540 		m->m_ext.ext_buf = (caddr_t)mem;
541 		m->m_data = m->m_ext.ext_buf;
542 		m->m_flags |= M_EXT;
543 		m->m_ext.ext_free = NULL;
544 		m->m_ext.ext_arg1 = NULL;
545 		m->m_ext.ext_arg2 = NULL;
546 		m->m_ext.ext_size = size;
547 		m->m_ext.ext_type = type;
548 		m->m_ext.ref_cnt = refcnt;
549 	}
550 
551 	return (0);
552 }
553 
554 /*
555  * The Mbuf Cluster zone destructor.
556  */
557 static void
558 mb_dtor_clust(void *mem, int size, void *arg)
559 {
560 #ifdef INVARIANTS
561 	uma_zone_t zone;
562 
563 	zone = m_getzone(size);
564 	KASSERT(*(uma_find_refcnt(zone, mem)) <= 1,
565 		("%s: refcnt incorrect %u", __func__,
566 		 *(uma_find_refcnt(zone, mem))) );
567 
568 	trash_dtor(mem, size, arg);
569 #endif
570 }
571 
572 /*
573  * The Packet secondary zone's init routine, executed on the
574  * object's transition from mbuf keg slab to zone cache.
575  */
576 static int
577 mb_zinit_pack(void *mem, int size, int how)
578 {
579 	struct mbuf *m;
580 
581 	m = (struct mbuf *)mem;		/* m is virgin. */
582 	if (uma_zalloc_arg(zone_clust, m, how) == NULL ||
583 	    m->m_ext.ext_buf == NULL)
584 		return (ENOMEM);
585 	m->m_ext.ext_type = EXT_PACKET;	/* Override. */
586 #ifdef INVARIANTS
587 	trash_init(m->m_ext.ext_buf, MCLBYTES, how);
588 #endif
589 	return (0);
590 }
591 
592 /*
593  * The Packet secondary zone's fini routine, executed on the
594  * object's transition from zone cache to keg slab.
595  */
596 static void
597 mb_zfini_pack(void *mem, int size)
598 {
599 	struct mbuf *m;
600 
601 	m = (struct mbuf *)mem;
602 #ifdef INVARIANTS
603 	trash_fini(m->m_ext.ext_buf, MCLBYTES);
604 #endif
605 	uma_zfree_arg(zone_clust, m->m_ext.ext_buf, NULL);
606 #ifdef INVARIANTS
607 	trash_dtor(mem, size, NULL);
608 #endif
609 }
610 
611 /*
612  * The "packet" keg constructor.
613  */
614 static int
615 mb_ctor_pack(void *mem, int size, void *arg, int how)
616 {
617 	struct mbuf *m;
618 	struct mb_args *args;
619 #ifdef MAC
620 	int error;
621 #endif
622 	int flags;
623 	short type;
624 
625 	m = (struct mbuf *)mem;
626 	args = (struct mb_args *)arg;
627 	flags = args->flags;
628 	type = args->type;
629 
630 #ifdef INVARIANTS
631 	trash_ctor(m->m_ext.ext_buf, MCLBYTES, arg, how);
632 #endif
633 	m->m_next = NULL;
634 	m->m_nextpkt = NULL;
635 	m->m_data = m->m_ext.ext_buf;
636 	m->m_len = 0;
637 	m->m_flags = (flags | M_EXT);
638 	m->m_type = type;
639 
640 	if (flags & M_PKTHDR) {
641 		m->m_pkthdr.rcvif = NULL;
642 		m->m_pkthdr.len = 0;
643 		m->m_pkthdr.header = NULL;
644 		m->m_pkthdr.csum_flags = 0;
645 		m->m_pkthdr.csum_data = 0;
646 		m->m_pkthdr.tso_segsz = 0;
647 		m->m_pkthdr.ether_vtag = 0;
648 		m->m_pkthdr.flowid = 0;
649 		SLIST_INIT(&m->m_pkthdr.tags);
650 #ifdef MAC
651 		/* If the label init fails, fail the alloc */
652 		error = mac_mbuf_init(m, how);
653 		if (error)
654 			return (error);
655 #endif
656 	}
657 	/* m_ext is already initialized. */
658 
659 	return (0);
660 }
661 
662 /*
663  * This is the protocol drain routine.
664  *
665  * No locks should be held when this is called.  The drain routines have to
666  * presently acquire some locks which raises the possibility of lock order
667  * reversal.
668  */
669 static void
670 mb_reclaim(void *junk)
671 {
672 	struct domain *dp;
673 	struct protosw *pr;
674 
675 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK | WARN_PANIC, NULL,
676 	    "mb_reclaim()");
677 
678 	for (dp = domains; dp != NULL; dp = dp->dom_next)
679 		for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
680 			if (pr->pr_drain != NULL)
681 				(*pr->pr_drain)();
682 }
683