xref: /dragonfly/sys/vm/vm_zone.c (revision a32bc35d)
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1997, 1998 John S. Dyson
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *	notice immediately at the beginning of the file, without modification,
12  *	this list of conditions, and the following disclaimer.
13  * 2. Absolutely no warranty of function or purpose is made by the author
14  *	John S. Dyson.
15  *
16  * $FreeBSD: src/sys/vm/vm_zone.c,v 1.30.2.6 2002/10/10 19:50:16 dillon Exp $
17  */
18 
19 #include <sys/param.h>
20 #include <sys/queue.h>
21 #include <sys/systm.h>
22 #include <sys/kernel.h>
23 #include <sys/lock.h>
24 #include <sys/malloc.h>
25 #include <sys/sysctl.h>
26 #include <sys/vmmeter.h>
27 
28 #include <vm/vm.h>
29 #include <vm/vm_object.h>
30 #include <vm/vm_page.h>
31 #include <vm/vm_map.h>
32 #include <vm/vm_kern.h>
33 #include <vm/vm_extern.h>
34 #include <vm/vm_zone.h>
35 
36 #include <sys/spinlock2.h>
37 
38 static MALLOC_DEFINE(M_ZONE, "ZONE", "Zone header");
39 
40 #define	ZONE_ERROR_INVALID 0
41 #define	ZONE_ERROR_NOTFREE 1
42 #define	ZONE_ERROR_ALREADYFREE 2
43 
44 #define ZONE_ROUNDING	32
45 
46 #define	ZENTRY_FREE	0x12342378
47 
48 int zone_burst = 32;
49 
50 static void *zget(vm_zone_t z);
51 
52 /*
53  * Return an item from the specified zone.   This function is non-blocking for
54  * ZONE_INTERRUPT zones.
55  *
56  * No requirements.
57  */
58 void *
59 zalloc(vm_zone_t z)
60 {
61 	globaldata_t gd = mycpu;
62 	void *item;
63 	int n;
64 
65 #ifdef INVARIANTS
66 	if (z == NULL)
67 		zerror(ZONE_ERROR_INVALID);
68 #endif
69 retry:
70 	/*
71 	 * Avoid spinlock contention by allocating from a per-cpu queue
72 	 */
73 	if (z->zfreecnt_pcpu[gd->gd_cpuid] > 0) {
74 		crit_enter_gd(gd);
75 		if (z->zfreecnt_pcpu[gd->gd_cpuid] > 0) {
76 			item = z->zitems_pcpu[gd->gd_cpuid];
77 #ifdef INVARIANTS
78 			KASSERT(item != NULL,
79 				("zitems_pcpu unexpectedly NULL"));
80 			if (((void **)item)[1] != (void *)ZENTRY_FREE)
81 				zerror(ZONE_ERROR_NOTFREE);
82 			((void **)item)[1] = NULL;
83 #endif
84 			z->zitems_pcpu[gd->gd_cpuid] = ((void **) item)[0];
85 			--z->zfreecnt_pcpu[gd->gd_cpuid];
86 			z->znalloc++;
87 			crit_exit_gd(gd);
88 			return item;
89 		}
90 		crit_exit_gd(gd);
91 	}
92 
93 	/*
94 	 * Per-zone spinlock for the remainder.  Always load at least one
95 	 * item.
96 	 */
97 	spin_lock(&z->zlock);
98 	if (z->zfreecnt > z->zfreemin) {
99 		n = zone_burst;
100 		do {
101 			item = z->zitems;
102 #ifdef INVARIANTS
103 			KASSERT(item != NULL, ("zitems unexpectedly NULL"));
104 			if (((void **)item)[1] != (void *)ZENTRY_FREE)
105 				zerror(ZONE_ERROR_NOTFREE);
106 #endif
107 			z->zitems = ((void **)item)[0];
108 			z->zfreecnt--;
109 			((void **)item)[0] = z->zitems_pcpu[gd->gd_cpuid];
110 			z->zitems_pcpu[gd->gd_cpuid] = item;
111 			++z->zfreecnt_pcpu[gd->gd_cpuid];
112 		} while (--n > 0 && z->zfreecnt > z->zfreemin);
113 		spin_unlock(&z->zlock);
114 		goto retry;
115 	} else {
116 		spin_unlock(&z->zlock);
117 		item = zget(z);
118 		/*
119 		 * PANICFAIL allows the caller to assume that the zalloc()
120 		 * will always succeed.  If it doesn't, we panic here.
121 		 */
122 		if (item == NULL && (z->zflags & ZONE_PANICFAIL))
123 			panic("zalloc(%s) failed", z->zname);
124 	}
125 	return item;
126 }
127 
128 /*
129  * Free an item to the specified zone.
130  *
131  * No requirements.
132  */
133 void
134 zfree(vm_zone_t z, void *item)
135 {
136 	globaldata_t gd = mycpu;
137 	int zmax;
138 
139 	/*
140 	 * Avoid spinlock contention by freeing into a per-cpu queue
141 	 */
142 	if ((zmax = z->zmax) != 0)
143 		zmax = zmax / ncpus / 16;
144 	if (zmax < 64)
145 		zmax = 64;
146 
147 	if (z->zfreecnt_pcpu[gd->gd_cpuid] < zmax) {
148 		crit_enter_gd(gd);
149 		((void **)item)[0] = z->zitems_pcpu[gd->gd_cpuid];
150 #ifdef INVARIANTS
151 		if (((void **)item)[1] == (void *)ZENTRY_FREE)
152 			zerror(ZONE_ERROR_ALREADYFREE);
153 		((void **)item)[1] = (void *)ZENTRY_FREE;
154 #endif
155 		z->zitems_pcpu[gd->gd_cpuid] = item;
156 		++z->zfreecnt_pcpu[gd->gd_cpuid];
157 		crit_exit_gd(gd);
158 		return;
159 	}
160 
161 	/*
162 	 * Per-zone spinlock for the remainder.
163 	 */
164 	spin_lock(&z->zlock);
165 	((void **)item)[0] = z->zitems;
166 #ifdef INVARIANTS
167 	if (((void **)item)[1] == (void *)ZENTRY_FREE)
168 		zerror(ZONE_ERROR_ALREADYFREE);
169 	((void **)item)[1] = (void *)ZENTRY_FREE;
170 #endif
171 	z->zitems = item;
172 	z->zfreecnt++;
173 	spin_unlock(&z->zlock);
174 }
175 
176 /*
177  * This file comprises a very simple zone allocator.  This is used
178  * in lieu of the malloc allocator, where needed or more optimal.
179  *
180  * Note that the initial implementation of this had coloring, and
181  * absolutely no improvement (actually perf degradation) occurred.
182  *
183  * Note also that the zones are type stable.  The only restriction is
184  * that the first two longwords of a data structure can be changed
185  * between allocations.  Any data that must be stable between allocations
186  * must reside in areas after the first two longwords.
187  *
188  * zinitna, zinit, zbootinit are the initialization routines.
189  * zalloc, zfree, are the allocation/free routines.
190  */
191 
192 LIST_HEAD(zlist, vm_zone) zlist = LIST_HEAD_INITIALIZER(zlist);
193 static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS);
194 static int zone_kmem_pages, zone_kern_pages;
195 static long zone_kmem_kvaspace;
196 
197 /*
198  * Create a zone, but don't allocate the zone structure.  If the
199  * zone had been previously created by the zone boot code, initialize
200  * various parts of the zone code.
201  *
202  * If waits are not allowed during allocation (e.g. during interrupt
203  * code), a-priori allocate the kernel virtual space, and allocate
204  * only pages when needed.
205  *
206  * Arguments:
207  * z		pointer to zone structure.
208  * obj		pointer to VM object (opt).
209  * name		name of zone.
210  * size		size of zone entries.
211  * nentries	number of zone entries allocated (only ZONE_INTERRUPT.)
212  * flags	ZONE_INTERRUPT -- items can be allocated at interrupt time.
213  * zalloc	number of pages allocated when memory is needed.
214  *
215  * Note that when using ZONE_INTERRUPT, the size of the zone is limited
216  * by the nentries argument.  The size of the memory allocatable is
217  * unlimited if ZONE_INTERRUPT is not set.
218  *
219  * No requirements.
220  */
221 int
222 zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
223 	int nentries, int flags, int zalloc)
224 {
225 	size_t totsize;
226 
227 	/*
228 	 * Only zones created with zinit() are destroyable.
229 	 */
230 	if (z->zflags & ZONE_DESTROYABLE)
231 		panic("zinitna: can't create destroyable zone");
232 
233 	/*
234 	 * NOTE: We can only adjust zsize if we previously did not
235 	 * 	 use zbootinit().
236 	 */
237 	if ((z->zflags & ZONE_BOOT) == 0) {
238 		z->zsize = (size + ZONE_ROUNDING - 1) & ~(ZONE_ROUNDING - 1);
239 		spin_init(&z->zlock);
240 		z->zfreecnt = 0;
241 		z->ztotal = 0;
242 		z->zmax = 0;
243 		z->zname = name;
244 		z->znalloc = 0;
245 		z->zitems = NULL;
246 
247 		lwkt_gettoken(&vm_token);
248 		LIST_INSERT_HEAD(&zlist, z, zlink);
249 		lwkt_reltoken(&vm_token);
250 
251 		bzero(z->zitems_pcpu, sizeof(z->zitems_pcpu));
252 		bzero(z->zfreecnt_pcpu, sizeof(z->zfreecnt_pcpu));
253 	}
254 
255 	z->zkmvec = NULL;
256 	z->zkmcur = z->zkmmax = 0;
257 	z->zflags |= flags;
258 
259 	/*
260 	 * If we cannot wait, allocate KVA space up front, and we will fill
261 	 * in pages as needed.  This is particularly required when creating
262 	 * an allocation space for map entries in kernel_map, because we
263 	 * do not want to go into a recursion deadlock with
264 	 * vm_map_entry_reserve().
265 	 */
266 	if (z->zflags & ZONE_INTERRUPT) {
267 		totsize = round_page((size_t)z->zsize * nentries);
268 		atomic_add_long(&zone_kmem_kvaspace, totsize);
269 
270 		z->zkva = kmem_alloc_pageable(&kernel_map, totsize);
271 		if (z->zkva == 0) {
272 			LIST_REMOVE(z, zlink);
273 			return 0;
274 		}
275 
276 		z->zpagemax = totsize / PAGE_SIZE;
277 		if (obj == NULL) {
278 			z->zobj = vm_object_allocate(OBJT_DEFAULT, z->zpagemax);
279 		} else {
280 			z->zobj = obj;
281 			_vm_object_allocate(OBJT_DEFAULT, z->zpagemax, obj);
282 			vm_object_drop(obj);
283 		}
284 		z->zallocflag = VM_ALLOC_SYSTEM | VM_ALLOC_INTERRUPT |
285 				VM_ALLOC_NORMAL | VM_ALLOC_RETRY;
286 		z->zmax += nentries;
287 	} else {
288 		z->zallocflag = VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM;
289 		z->zmax = 0;
290 	}
291 
292 
293 	if (z->zsize > PAGE_SIZE)
294 		z->zfreemin = 1;
295 	else
296 		z->zfreemin = PAGE_SIZE / z->zsize;
297 
298 	z->zpagecount = 0;
299 	if (zalloc)
300 		z->zalloc = zalloc;
301 	else
302 		z->zalloc = 1;
303 
304 	/*
305 	 * Populate the interrrupt zone at creation time rather than
306 	 * on first allocation, as this is a potentially long operation.
307 	 */
308 	if (z->zflags & ZONE_INTERRUPT) {
309 		void *buf;
310 
311 		buf = zget(z);
312 		zfree(z, buf);
313 	}
314 
315 	return 1;
316 }
317 
318 /*
319  * Subroutine same as zinitna, except zone data structure is allocated
320  * automatically by malloc.  This routine should normally be used, except
321  * in certain tricky startup conditions in the VM system -- then
322  * zbootinit and zinitna can be used.  Zinit is the standard zone
323  * initialization call.
324  *
325  * No requirements.
326  */
327 vm_zone_t
328 zinit(char *name, int size, int nentries, int flags, int zalloc)
329 {
330 	vm_zone_t z;
331 
332 	z = (vm_zone_t) kmalloc(sizeof (struct vm_zone), M_ZONE, M_NOWAIT);
333 	if (z == NULL)
334 		return NULL;
335 
336 	z->zflags = 0;
337 	if (zinitna(z, NULL, name, size, nentries,
338 	            flags & ~ZONE_DESTROYABLE, zalloc) == 0) {
339 		kfree(z, M_ZONE);
340 		return NULL;
341 	}
342 
343 	if (flags & ZONE_DESTROYABLE)
344 		z->zflags |= ZONE_DESTROYABLE;
345 
346 	return z;
347 }
348 
349 /*
350  * Initialize a zone before the system is fully up.  This routine should
351  * only be called before full VM startup.
352  *
353  * Called from the low level boot code only.
354  */
355 void
356 zbootinit(vm_zone_t z, char *name, int size, void *item, int nitems)
357 {
358 	int i;
359 
360 	bzero(z->zitems_pcpu, sizeof(z->zitems_pcpu));
361 	bzero(z->zfreecnt_pcpu, sizeof(z->zfreecnt_pcpu));
362 
363 	z->zname = name;
364 	z->zsize = size;
365 	z->zpagemax = 0;
366 	z->zobj = NULL;
367 	z->zflags = ZONE_BOOT;
368 	z->zfreemin = 0;
369 	z->zallocflag = 0;
370 	z->zpagecount = 0;
371 	z->zalloc = 0;
372 	z->znalloc = 0;
373 	spin_init(&z->zlock);
374 
375 	bzero(item, (size_t)nitems * z->zsize);
376 	z->zitems = NULL;
377 	for (i = 0; i < nitems; i++) {
378 		((void **)item)[0] = z->zitems;
379 #ifdef INVARIANTS
380 		((void **)item)[1] = (void *)ZENTRY_FREE;
381 #endif
382 		z->zitems = item;
383 		item = (uint8_t *)item + z->zsize;
384 	}
385 	z->zfreecnt = nitems;
386 	z->zmax = nitems;
387 	z->ztotal = nitems;
388 
389 	lwkt_gettoken(&vm_token);
390 	LIST_INSERT_HEAD(&zlist, z, zlink);
391 	lwkt_reltoken(&vm_token);
392 }
393 
394 /*
395  * Release all resources owned by zone created with zinit().
396  *
397  * No requirements.
398  */
399 void
400 zdestroy(vm_zone_t z)
401 {
402 	vm_page_t m;
403 	int i;
404 
405 	if (z == NULL)
406 		panic("zdestroy: null zone");
407 	if ((z->zflags & ZONE_DESTROYABLE) == 0)
408 		panic("zdestroy: undestroyable zone");
409 
410 	lwkt_gettoken(&vm_token);
411 	LIST_REMOVE(z, zlink);
412 	lwkt_reltoken(&vm_token);
413 
414 	/*
415 	 * Release virtual mappings, physical memory and update sysctl stats.
416 	 */
417 	if (z->zflags & ZONE_INTERRUPT) {
418 		/*
419 		 * Pages mapped via pmap_kenter() must be removed from the
420 		 * kernel_pmap() before calling kmem_free() to avoid issues
421 		 * with kernel_pmap.pm_stats.resident_count.
422 		 */
423 		pmap_qremove(z->zkva, z->zpagemax);
424 		vm_object_hold(z->zobj);
425 		for (i = 0; i < z->zpagecount; ++i) {
426 			m = vm_page_lookup_busy_wait(z->zobj, i, TRUE, "vmzd");
427 			vm_page_unwire(m, 0);
428 			vm_page_free(m);
429 		}
430 
431 		/*
432 		 * Free the mapping.
433 		 */
434 		kmem_free(&kernel_map, z->zkva,
435 			  (size_t)z->zpagemax * PAGE_SIZE);
436 		atomic_subtract_long(&zone_kmem_kvaspace,
437 				     (size_t)z->zpagemax * PAGE_SIZE);
438 
439 		/*
440 		 * Free the backing object and physical pages.
441 		 */
442 		vm_object_deallocate(z->zobj);
443 		vm_object_drop(z->zobj);
444 		atomic_subtract_int(&zone_kmem_pages, z->zpagecount);
445 	} else {
446 		for (i=0; i < z->zkmcur; i++) {
447 			kmem_free(&kernel_map, z->zkmvec[i],
448 				  (size_t)z->zalloc * PAGE_SIZE);
449 			atomic_subtract_int(&zone_kern_pages, z->zalloc);
450 		}
451 		if (z->zkmvec != NULL)
452 			kfree(z->zkmvec, M_ZONE);
453 	}
454 
455 	spin_uninit(&z->zlock);
456 	kfree(z, M_ZONE);
457 }
458 
459 
460 /*
461  * void *zalloc(vm_zone_t zone) --
462  *	Returns an item from a specified zone.  May not be called from a
463  *	FAST interrupt or IPI function.
464  *
465  * void zfree(vm_zone_t zone, void *item) --
466  *	Frees an item back to a specified zone.  May not be called from a
467  *	FAST interrupt or IPI function.
468  */
469 
470 /*
471  * Internal zone routine.  Not to be called from external (non vm_zone) code.
472  *
473  * No requirements.
474  */
475 static void *
476 zget(vm_zone_t z)
477 {
478 	int i;
479 	vm_page_t m;
480 	int nitems;
481 	int npages;
482 	int savezpc;
483 	size_t nbytes;
484 	size_t noffset;
485 	void *item;
486 
487 	if (z == NULL)
488 		panic("zget: null zone");
489 
490 	if (z->zflags & ZONE_INTERRUPT) {
491 		/*
492 		 * Interrupt zones do not mess with the kernel_map, they
493 		 * simply populate an existing mapping.
494 		 *
495 		 * First reserve the required space.
496 		 */
497 		vm_object_hold(z->zobj);
498 		noffset = (size_t)z->zpagecount * PAGE_SIZE;
499 		noffset -= noffset % z->zsize;
500 		savezpc = z->zpagecount;
501 		if (z->zpagecount + z->zalloc > z->zpagemax)
502 			z->zpagecount = z->zpagemax;
503 		else
504 			z->zpagecount += z->zalloc;
505 		item = (char *)z->zkva + noffset;
506 		npages = z->zpagecount - savezpc;
507 		nitems = ((size_t)(savezpc + npages) * PAGE_SIZE - noffset) /
508 			 z->zsize;
509 		atomic_add_int(&zone_kmem_pages, npages);
510 
511 		/*
512 		 * Now allocate the pages.  Note that we can block in the
513 		 * loop, so we've already done all the necessary calculations
514 		 * and reservations above.
515 		 */
516 		for (i = 0; i < npages; ++i) {
517 			vm_offset_t zkva;
518 
519 			m = vm_page_alloc(z->zobj, savezpc + i, z->zallocflag);
520 			KKASSERT(m != NULL);
521 			/* note: z might be modified due to blocking */
522 
523 			KKASSERT(m->queue == PQ_NONE);
524 			m->valid = VM_PAGE_BITS_ALL;
525 			vm_page_wire(m);
526 			vm_page_wakeup(m);
527 
528 			zkva = z->zkva + (size_t)(savezpc + i) * PAGE_SIZE;
529 			pmap_kenter(zkva, VM_PAGE_TO_PHYS(m));
530 			bzero((void *)zkva, PAGE_SIZE);
531 		}
532 		vm_object_drop(z->zobj);
533 	} else if (z->zflags & ZONE_SPECIAL) {
534 		/*
535 		 * The special zone is the one used for vm_map_entry_t's.
536 		 * We have to avoid an infinite recursion in
537 		 * vm_map_entry_reserve() by using vm_map_entry_kreserve()
538 		 * instead.  The map entries are pre-reserved by the kernel
539 		 * by vm_map_entry_reserve_cpu_init().
540 		 */
541 		nbytes = (size_t)z->zalloc * PAGE_SIZE;
542 
543 		item = (void *)kmem_alloc3(&kernel_map, nbytes, KM_KRESERVE);
544 
545 		/* note: z might be modified due to blocking */
546 		if (item != NULL) {
547 			zone_kern_pages += z->zalloc;	/* not MP-safe XXX */
548 			bzero(item, nbytes);
549 		} else {
550 			nbytes = 0;
551 		}
552 		nitems = nbytes / z->zsize;
553 	} else {
554 		/*
555 		 * Otherwise allocate KVA from the kernel_map.
556 		 */
557 		nbytes = (size_t)z->zalloc * PAGE_SIZE;
558 
559 		item = (void *)kmem_alloc3(&kernel_map, nbytes, 0);
560 
561 		/* note: z might be modified due to blocking */
562 		if (item != NULL) {
563 			zone_kern_pages += z->zalloc;	/* not MP-safe XXX */
564 			bzero(item, nbytes);
565 
566 			if (z->zflags & ZONE_DESTROYABLE) {
567 				if (z->zkmcur == z->zkmmax) {
568 					z->zkmmax =
569 						z->zkmmax==0 ? 1 : z->zkmmax*2;
570 					z->zkmvec = krealloc(z->zkmvec,
571 					    z->zkmmax * sizeof(z->zkmvec[0]),
572 					    M_ZONE, M_WAITOK);
573 				}
574 				z->zkmvec[z->zkmcur++] = (vm_offset_t)item;
575 			}
576 		} else {
577 			nbytes = 0;
578 		}
579 		nitems = nbytes / z->zsize;
580 	}
581 
582 	spin_lock(&z->zlock);
583 	z->ztotal += nitems;
584 	/*
585 	 * Save one for immediate allocation
586 	 */
587 	if (nitems != 0) {
588 		nitems -= 1;
589 		for (i = 0; i < nitems; i++) {
590 			((void **)item)[0] = z->zitems;
591 #ifdef INVARIANTS
592 			((void **)item)[1] = (void *)ZENTRY_FREE;
593 #endif
594 			z->zitems = item;
595 			item = (uint8_t *)item + z->zsize;
596 		}
597 		z->zfreecnt += nitems;
598 		z->znalloc++;
599 	} else if (z->zfreecnt > 0) {
600 		item = z->zitems;
601 		z->zitems = ((void **)item)[0];
602 #ifdef INVARIANTS
603 		if (((void **)item)[1] != (void *)ZENTRY_FREE)
604 			zerror(ZONE_ERROR_NOTFREE);
605 		((void **) item)[1] = NULL;
606 #endif
607 		z->zfreecnt--;
608 		z->znalloc++;
609 	} else {
610 		item = NULL;
611 	}
612 	spin_unlock(&z->zlock);
613 
614 	/*
615 	 * A special zone may have used a kernel-reserved vm_map_entry.  If
616 	 * so we have to be sure to recover our reserve so we don't run out.
617 	 * We will panic if we run out.
618 	 */
619 	if (z->zflags & ZONE_SPECIAL)
620 		vm_map_entry_reserve(0);
621 
622 	return item;
623 }
624 
625 /*
626  * No requirements.
627  */
628 static int
629 sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
630 {
631 	int error=0;
632 	vm_zone_t curzone;
633 	char tmpbuf[128];
634 	char tmpname[14];
635 
636 	ksnprintf(tmpbuf, sizeof(tmpbuf),
637 	    "\nITEM            SIZE     LIMIT    USED    FREE  REQUESTS\n");
638 	error = SYSCTL_OUT(req, tmpbuf, strlen(tmpbuf));
639 	if (error)
640 		return (error);
641 
642 	lwkt_gettoken(&vm_token);
643 	LIST_FOREACH(curzone, &zlist, zlink) {
644 		int i;
645 		int len;
646 		int offset;
647 
648 		len = strlen(curzone->zname);
649 		if (len >= (sizeof(tmpname) - 1))
650 			len = (sizeof(tmpname) - 1);
651 		for(i = 0; i < sizeof(tmpname) - 1; i++)
652 			tmpname[i] = ' ';
653 		tmpname[i] = 0;
654 		memcpy(tmpname, curzone->zname, len);
655 		tmpname[len] = ':';
656 		offset = 0;
657 		if (curzone == LIST_FIRST(&zlist)) {
658 			offset = 1;
659 			tmpbuf[0] = '\n';
660 		}
661 
662 		ksnprintf(tmpbuf + offset, sizeof(tmpbuf) - offset,
663 			"%s %6.6u, %8.8u, %6.6u, %6.6u, %8.8u\n",
664 			tmpname, curzone->zsize, curzone->zmax,
665 			(curzone->ztotal - curzone->zfreecnt),
666 			curzone->zfreecnt, curzone->znalloc);
667 
668 		len = strlen((char *)tmpbuf);
669 		if (LIST_NEXT(curzone, zlink) == NULL)
670 			tmpbuf[len - 1] = 0;
671 
672 		error = SYSCTL_OUT(req, tmpbuf, len);
673 
674 		if (error)
675 			break;
676 	}
677 	lwkt_reltoken(&vm_token);
678 	return (error);
679 }
680 
681 #if defined(INVARIANTS)
682 
683 /*
684  * Debugging only.
685  */
686 void
687 zerror(int error)
688 {
689 	char *msg;
690 
691 	switch (error) {
692 	case ZONE_ERROR_INVALID:
693 		msg = "zone: invalid zone";
694 		break;
695 	case ZONE_ERROR_NOTFREE:
696 		msg = "zone: entry not free";
697 		break;
698 	case ZONE_ERROR_ALREADYFREE:
699 		msg = "zone: freeing free entry";
700 		break;
701 	default:
702 		msg = "zone: invalid error";
703 		break;
704 	}
705 	panic("%s", msg);
706 }
707 #endif
708 
709 SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD, \
710 	NULL, 0, sysctl_vm_zone, "A", "Zone Info");
711 
712 SYSCTL_INT(_vm, OID_AUTO, zone_kmem_pages,
713 	CTLFLAG_RD, &zone_kmem_pages, 0, "Number of interrupt safe pages allocated by zone");
714 SYSCTL_INT(_vm, OID_AUTO, zone_burst,
715 	CTLFLAG_RW, &zone_burst, 0, "Burst from depot to pcpu cache");
716 SYSCTL_LONG(_vm, OID_AUTO, zone_kmem_kvaspace,
717 	CTLFLAG_RD, &zone_kmem_kvaspace, 0, "KVA space allocated by zone");
718 SYSCTL_INT(_vm, OID_AUTO, zone_kern_pages,
719 	CTLFLAG_RD, &zone_kern_pages, 0, "Number of non-interrupt safe pages allocated by zone");
720