xref: /dragonfly/sys/kern/kern_slaballoc.c (revision 1f8a7fec)
1 /*
2  * (MPSAFE)
3  *
4  * KERN_SLABALLOC.C	- Kernel SLAB memory allocator
5  *
6  * Copyright (c) 2003,2004,2010 The DragonFly Project.  All rights reserved.
7  *
8  * This code is derived from software contributed to The DragonFly Project
9  * by Matthew Dillon <dillon@backplane.com>
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  *
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in
19  *    the documentation and/or other materials provided with the
20  *    distribution.
21  * 3. Neither the name of The DragonFly Project nor the names of its
22  *    contributors may be used to endorse or promote products derived
23  *    from this software without specific, prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
29  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
31  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
33  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
34  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
35  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  * $DragonFly: src/sys/kern/kern_slaballoc.c,v 1.55 2008/10/22 01:42:17 dillon Exp $
39  *
40  * This module implements a slab allocator drop-in replacement for the
41  * kernel malloc().
42  *
43  * A slab allocator reserves a ZONE for each chunk size, then lays the
44  * chunks out in an array within the zone.  Allocation and deallocation
45  * is nearly instantanious, and fragmentation/overhead losses are limited
46  * to a fixed worst-case amount.
47  *
48  * The downside of this slab implementation is in the chunk size
49  * multiplied by the number of zones.  ~80 zones * 128K = 10MB of VM per cpu.
50  * In a kernel implementation all this memory will be physical so
51  * the zone size is adjusted downward on machines with less physical
52  * memory.  The upside is that overhead is bounded... this is the *worst*
53  * case overhead.
54  *
55  * Slab management is done on a per-cpu basis and no locking or mutexes
56  * are required, only a critical section.  When one cpu frees memory
57  * belonging to another cpu's slab manager an asynchronous IPI message
58  * will be queued to execute the operation.   In addition, both the
59  * high level slab allocator and the low level zone allocator optimize
60  * M_ZERO requests, and the slab allocator does not have to pre initialize
61  * the linked list of chunks.
62  *
63  * XXX Balancing is needed between cpus.  Balance will be handled through
64  * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks.
65  *
66  * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of
67  * the new zone should be restricted to M_USE_RESERVE requests only.
68  *
69  *	Alloc Size	Chunking        Number of zones
70  *	0-127		8		16
71  *	128-255		16		8
72  *	256-511		32		8
73  *	512-1023	64		8
74  *	1024-2047	128		8
75  *	2048-4095	256		8
76  *	4096-8191	512		8
77  *	8192-16383	1024		8
78  *	16384-32767	2048		8
79  *	(if PAGE_SIZE is 4K the maximum zone allocation is 16383)
80  *
81  *	Allocations >= ZoneLimit go directly to kmem.
82  *
83  *			API REQUIREMENTS AND SIDE EFFECTS
84  *
85  *    To operate as a drop-in replacement to the FreeBSD-4.x malloc() we
86  *    have remained compatible with the following API requirements:
87  *
88  *    + small power-of-2 sized allocations are power-of-2 aligned (kern_tty)
89  *    + all power-of-2 sized allocations are power-of-2 aligned (twe)
90  *    + malloc(0) is allowed and returns non-NULL (ahc driver)
91  *    + ability to allocate arbitrarily large chunks of memory
92  */
93 
94 #include "opt_vm.h"
95 
96 #include <sys/param.h>
97 #include <sys/systm.h>
98 #include <sys/kernel.h>
99 #include <sys/slaballoc.h>
100 #include <sys/mbuf.h>
101 #include <sys/vmmeter.h>
102 #include <sys/lock.h>
103 #include <sys/thread.h>
104 #include <sys/globaldata.h>
105 #include <sys/sysctl.h>
106 #include <sys/ktr.h>
107 
108 #include <vm/vm.h>
109 #include <vm/vm_param.h>
110 #include <vm/vm_kern.h>
111 #include <vm/vm_extern.h>
112 #include <vm/vm_object.h>
113 #include <vm/pmap.h>
114 #include <vm/vm_map.h>
115 #include <vm/vm_page.h>
116 #include <vm/vm_pageout.h>
117 
118 #include <machine/cpu.h>
119 
120 #include <sys/thread2.h>
121 
122 #define arysize(ary)	(sizeof(ary)/sizeof((ary)[0]))
123 
124 #define MEMORY_STRING	"ptr=%p type=%p size=%d flags=%04x"
125 #define MEMORY_ARG_SIZE	(sizeof(void *) * 2 + sizeof(unsigned long) + 	\
126 			sizeof(int))
127 
128 #if !defined(KTR_MEMORY)
129 #define KTR_MEMORY	KTR_ALL
130 #endif
131 KTR_INFO_MASTER(memory);
132 KTR_INFO(KTR_MEMORY, memory, malloc_beg, 0, "malloc begin", 0);
133 KTR_INFO(KTR_MEMORY, memory, malloc_end, 1, MEMORY_STRING, MEMORY_ARG_SIZE);
134 KTR_INFO(KTR_MEMORY, memory, free_zero, 2, MEMORY_STRING, MEMORY_ARG_SIZE);
135 KTR_INFO(KTR_MEMORY, memory, free_ovsz, 3, MEMORY_STRING, MEMORY_ARG_SIZE);
136 KTR_INFO(KTR_MEMORY, memory, free_ovsz_delayed, 4, MEMORY_STRING, MEMORY_ARG_SIZE);
137 KTR_INFO(KTR_MEMORY, memory, free_chunk, 5, MEMORY_STRING, MEMORY_ARG_SIZE);
138 #ifdef SMP
139 KTR_INFO(KTR_MEMORY, memory, free_request, 6, MEMORY_STRING, MEMORY_ARG_SIZE);
140 KTR_INFO(KTR_MEMORY, memory, free_rem_beg, 7, MEMORY_STRING, MEMORY_ARG_SIZE);
141 KTR_INFO(KTR_MEMORY, memory, free_rem_end, 8, MEMORY_STRING, MEMORY_ARG_SIZE);
142 #endif
143 KTR_INFO(KTR_MEMORY, memory, free_beg, 9, "free begin", 0);
144 KTR_INFO(KTR_MEMORY, memory, free_end, 10, "free end", 0);
145 
146 #define logmemory(name, ptr, type, size, flags)				\
147 	KTR_LOG(memory_ ## name, ptr, type, size, flags)
148 #define logmemory_quick(name)						\
149 	KTR_LOG(memory_ ## name)
150 
151 /*
152  * Fixed globals (not per-cpu)
153  */
154 static int ZoneSize;
155 static int ZoneLimit;
156 static int ZonePageCount;
157 static uintptr_t ZoneMask;
158 static int ZoneBigAlloc;		/* in KB */
159 static int ZoneGenAlloc;		/* in KB */
160 struct malloc_type *kmemstatistics;	/* exported to vmstat */
161 static struct kmemusage *kmemusage;
162 static int32_t weirdary[16];
163 
164 static void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags);
165 static void kmem_slab_free(void *ptr, vm_size_t bytes);
166 
167 #if defined(INVARIANTS)
168 static void chunk_mark_allocated(SLZone *z, void *chunk);
169 static void chunk_mark_free(SLZone *z, void *chunk);
170 #else
171 #define chunk_mark_allocated(z, chunk)
172 #define chunk_mark_free(z, chunk)
173 #endif
174 
175 /*
176  * Misc constants.  Note that allocations that are exact multiples of
177  * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
178  * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists.
179  */
180 #define MIN_CHUNK_SIZE		8		/* in bytes */
181 #define MIN_CHUNK_MASK		(MIN_CHUNK_SIZE - 1)
182 #define ZONE_RELS_THRESH	2		/* threshold number of zones */
183 #define IN_SAME_PAGE_MASK	(~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK)
184 
185 /*
186  * The WEIRD_ADDR is used as known text to copy into free objects to
187  * try to create deterministic failure cases if the data is accessed after
188  * free.
189  */
190 #define WEIRD_ADDR      0xdeadc0de
191 #define MAX_COPY        sizeof(weirdary)
192 #define ZERO_LENGTH_PTR	((void *)-8)
193 
194 /*
195  * Misc global malloc buckets
196  */
197 
198 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
199 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
200 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
201 
202 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
203 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
204 
205 /*
206  * Initialize the slab memory allocator.  We have to choose a zone size based
207  * on available physical memory.  We choose a zone side which is approximately
208  * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of
209  * 128K.  The zone size is limited to the bounds set in slaballoc.h
210  * (typically 32K min, 128K max).
211  */
212 static void kmeminit(void *dummy);
213 
214 char *ZeroPage;
215 
216 SYSINIT(kmem, SI_BOOT1_ALLOCATOR, SI_ORDER_FIRST, kmeminit, NULL)
217 
218 #ifdef INVARIANTS
219 /*
220  * If enabled any memory allocated without M_ZERO is initialized to -1.
221  */
222 static int  use_malloc_pattern;
223 SYSCTL_INT(_debug, OID_AUTO, use_malloc_pattern, CTLFLAG_RW,
224 		&use_malloc_pattern, 0, "");
225 #endif
226 
227 SYSCTL_INT(_kern, OID_AUTO, zone_big_alloc, CTLFLAG_RD, &ZoneBigAlloc, 0, "");
228 SYSCTL_INT(_kern, OID_AUTO, zone_gen_alloc, CTLFLAG_RD, &ZoneGenAlloc, 0, "");
229 
230 static void
231 kmeminit(void *dummy)
232 {
233     size_t limsize;
234     int usesize;
235     int i;
236     vm_offset_t npg;
237 
238     limsize = (size_t)vmstats.v_page_count * PAGE_SIZE;
239     if (limsize > KvaSize)
240 	limsize = KvaSize;
241 
242     usesize = (int)(limsize / 1024);	/* convert to KB */
243 
244     ZoneSize = ZALLOC_MIN_ZONE_SIZE;
245     while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize)
246 	ZoneSize <<= 1;
247     ZoneLimit = ZoneSize / 4;
248     if (ZoneLimit > ZALLOC_ZONE_LIMIT)
249 	ZoneLimit = ZALLOC_ZONE_LIMIT;
250     ZoneMask = ~(uintptr_t)(ZoneSize - 1);
251     ZonePageCount = ZoneSize / PAGE_SIZE;
252 
253     npg = KvaSize / PAGE_SIZE;
254     kmemusage = kmem_slab_alloc(npg * sizeof(struct kmemusage),
255 				PAGE_SIZE, M_WAITOK|M_ZERO);
256 
257     for (i = 0; i < arysize(weirdary); ++i)
258 	weirdary[i] = WEIRD_ADDR;
259 
260     ZeroPage = kmem_slab_alloc(PAGE_SIZE, PAGE_SIZE, M_WAITOK|M_ZERO);
261 
262     if (bootverbose)
263 	kprintf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024);
264 }
265 
266 /*
267  * Initialize a malloc type tracking structure.
268  */
269 void
270 malloc_init(void *data)
271 {
272     struct malloc_type *type = data;
273     size_t limsize;
274 
275     if (type->ks_magic != M_MAGIC)
276 	panic("malloc type lacks magic");
277 
278     if (type->ks_limit != 0)
279 	return;
280 
281     if (vmstats.v_page_count == 0)
282 	panic("malloc_init not allowed before vm init");
283 
284     limsize = (size_t)vmstats.v_page_count * PAGE_SIZE;
285     if (limsize > KvaSize)
286 	limsize = KvaSize;
287     type->ks_limit = limsize / 10;
288 
289     type->ks_next = kmemstatistics;
290     kmemstatistics = type;
291 }
292 
293 void
294 malloc_uninit(void *data)
295 {
296     struct malloc_type *type = data;
297     struct malloc_type *t;
298 #ifdef INVARIANTS
299     int i;
300     long ttl;
301 #endif
302 
303     if (type->ks_magic != M_MAGIC)
304 	panic("malloc type lacks magic");
305 
306     if (vmstats.v_page_count == 0)
307 	panic("malloc_uninit not allowed before vm init");
308 
309     if (type->ks_limit == 0)
310 	panic("malloc_uninit on uninitialized type");
311 
312 #ifdef SMP
313     /* Make sure that all pending kfree()s are finished. */
314     lwkt_synchronize_ipiqs("muninit");
315 #endif
316 
317 #ifdef INVARIANTS
318     /*
319      * memuse is only correct in aggregation.  Due to memory being allocated
320      * on one cpu and freed on another individual array entries may be
321      * negative or positive (canceling each other out).
322      */
323     for (i = ttl = 0; i < ncpus; ++i)
324 	ttl += type->ks_memuse[i];
325     if (ttl) {
326 	kprintf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n",
327 	    ttl, type->ks_shortdesc, i);
328     }
329 #endif
330     if (type == kmemstatistics) {
331 	kmemstatistics = type->ks_next;
332     } else {
333 	for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) {
334 	    if (t->ks_next == type) {
335 		t->ks_next = type->ks_next;
336 		break;
337 	    }
338 	}
339     }
340     type->ks_next = NULL;
341     type->ks_limit = 0;
342 }
343 
344 /*
345  * Increase the kmalloc pool limit for the specified pool.  No changes
346  * are the made if the pool would shrink.
347  */
348 void
349 kmalloc_raise_limit(struct malloc_type *type, size_t bytes)
350 {
351     if (type->ks_limit == 0)
352 	malloc_init(type);
353     if (bytes == 0)
354 	bytes = KvaSize;
355     if (type->ks_limit < bytes)
356 	type->ks_limit = bytes;
357 }
358 
359 /*
360  * Dynamically create a malloc pool.  This function is a NOP if *typep is
361  * already non-NULL.
362  */
363 void
364 kmalloc_create(struct malloc_type **typep, const char *descr)
365 {
366 	struct malloc_type *type;
367 
368 	if (*typep == NULL) {
369 		type = kmalloc(sizeof(*type), M_TEMP, M_WAITOK | M_ZERO);
370 		type->ks_magic = M_MAGIC;
371 		type->ks_shortdesc = descr;
372 		malloc_init(type);
373 		*typep = type;
374 	}
375 }
376 
377 /*
378  * Destroy a dynamically created malloc pool.  This function is a NOP if
379  * the pool has already been destroyed.
380  */
381 void
382 kmalloc_destroy(struct malloc_type **typep)
383 {
384 	if (*typep != NULL) {
385 		malloc_uninit(*typep);
386 		kfree(*typep, M_TEMP);
387 		*typep = NULL;
388 	}
389 }
390 
391 /*
392  * Calculate the zone index for the allocation request size and set the
393  * allocation request size to that particular zone's chunk size.
394  */
395 static __inline int
396 zoneindex(unsigned long *bytes)
397 {
398     unsigned int n = (unsigned int)*bytes;	/* unsigned for shift opt */
399     if (n < 128) {
400 	*bytes = n = (n + 7) & ~7;
401 	return(n / 8 - 1);		/* 8 byte chunks, 16 zones */
402     }
403     if (n < 256) {
404 	*bytes = n = (n + 15) & ~15;
405 	return(n / 16 + 7);
406     }
407     if (n < 8192) {
408 	if (n < 512) {
409 	    *bytes = n = (n + 31) & ~31;
410 	    return(n / 32 + 15);
411 	}
412 	if (n < 1024) {
413 	    *bytes = n = (n + 63) & ~63;
414 	    return(n / 64 + 23);
415 	}
416 	if (n < 2048) {
417 	    *bytes = n = (n + 127) & ~127;
418 	    return(n / 128 + 31);
419 	}
420 	if (n < 4096) {
421 	    *bytes = n = (n + 255) & ~255;
422 	    return(n / 256 + 39);
423 	}
424 	*bytes = n = (n + 511) & ~511;
425 	return(n / 512 + 47);
426     }
427 #if ZALLOC_ZONE_LIMIT > 8192
428     if (n < 16384) {
429 	*bytes = n = (n + 1023) & ~1023;
430 	return(n / 1024 + 55);
431     }
432 #endif
433 #if ZALLOC_ZONE_LIMIT > 16384
434     if (n < 32768) {
435 	*bytes = n = (n + 2047) & ~2047;
436 	return(n / 2048 + 63);
437     }
438 #endif
439     panic("Unexpected byte count %d", n);
440     return(0);
441 }
442 
443 /*
444  * kmalloc()	(SLAB ALLOCATOR)
445  *
446  *	Allocate memory via the slab allocator.  If the request is too large,
447  *	or if it page-aligned beyond a certain size, we fall back to the
448  *	KMEM subsystem.  A SLAB tracking descriptor must be specified, use
449  *	&SlabMisc if you don't care.
450  *
451  *	M_RNOWAIT	- don't block.
452  *	M_NULLOK	- return NULL instead of blocking.
453  *	M_ZERO		- zero the returned memory.
454  *	M_USE_RESERVE	- allow greater drawdown of the free list
455  *	M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted
456  *
457  * MPSAFE
458  */
459 void *
460 kmalloc(unsigned long size, struct malloc_type *type, int flags)
461 {
462     SLZone *z;
463     SLChunk *chunk;
464 #ifdef SMP
465     SLChunk *bchunk;
466 #endif
467     SLGlobalData *slgd;
468     struct globaldata *gd;
469     int zi;
470 #ifdef INVARIANTS
471     int i;
472 #endif
473 
474     logmemory_quick(malloc_beg);
475     gd = mycpu;
476     slgd = &gd->gd_slab;
477 
478     /*
479      * XXX silly to have this in the critical path.
480      */
481     if (type->ks_limit == 0) {
482 	crit_enter();
483 	if (type->ks_limit == 0)
484 	    malloc_init(type);
485 	crit_exit();
486     }
487     ++type->ks_calls;
488 
489     /*
490      * Handle the case where the limit is reached.  Panic if we can't return
491      * NULL.  The original malloc code looped, but this tended to
492      * simply deadlock the computer.
493      *
494      * ks_loosememuse is an up-only limit that is NOT MP-synchronized, used
495      * to determine if a more complete limit check should be done.  The
496      * actual memory use is tracked via ks_memuse[cpu].
497      */
498     while (type->ks_loosememuse >= type->ks_limit) {
499 	int i;
500 	long ttl;
501 
502 	for (i = ttl = 0; i < ncpus; ++i)
503 	    ttl += type->ks_memuse[i];
504 	type->ks_loosememuse = ttl;	/* not MP synchronized */
505 	if ((ssize_t)ttl < 0)		/* deal with occassional race */
506 		ttl = 0;
507 	if (ttl >= type->ks_limit) {
508 	    if (flags & M_NULLOK) {
509 		logmemory(malloc_end, NULL, type, size, flags);
510 		return(NULL);
511 	    }
512 	    panic("%s: malloc limit exceeded", type->ks_shortdesc);
513 	}
514     }
515 
516     /*
517      * Handle the degenerate size == 0 case.  Yes, this does happen.
518      * Return a special pointer.  This is to maintain compatibility with
519      * the original malloc implementation.  Certain devices, such as the
520      * adaptec driver, not only allocate 0 bytes, they check for NULL and
521      * also realloc() later on.  Joy.
522      */
523     if (size == 0) {
524 	logmemory(malloc_end, ZERO_LENGTH_PTR, type, size, flags);
525 	return(ZERO_LENGTH_PTR);
526     }
527 
528     /*
529      * Handle hysteresis from prior frees here in malloc().  We cannot
530      * safely manipulate the kernel_map in free() due to free() possibly
531      * being called via an IPI message or from sensitive interrupt code.
532      *
533      * NOTE: ku_pagecnt must be cleared before we free the slab or we
534      *	     might race another cpu allocating the kva and setting
535      *	     ku_pagecnt.
536      */
537     while (slgd->NFreeZones > ZONE_RELS_THRESH && (flags & M_RNOWAIT) == 0) {
538 	crit_enter();
539 	if (slgd->NFreeZones > ZONE_RELS_THRESH) {	/* crit sect race */
540 	    struct kmemusage *kup;
541 
542 	    z = slgd->FreeZones;
543 	    slgd->FreeZones = z->z_Next;
544 	    --slgd->NFreeZones;
545 	    kup = btokup(z);
546 	    kup->ku_pagecnt = 0;
547 	    kmem_slab_free(z, ZoneSize);	/* may block */
548 	    atomic_add_int(&ZoneGenAlloc, -(int)ZoneSize / 1024);
549 	}
550 	crit_exit();
551     }
552 
553     /*
554      * XXX handle oversized frees that were queued from kfree().
555      */
556     while (slgd->FreeOvZones && (flags & M_RNOWAIT) == 0) {
557 	crit_enter();
558 	if ((z = slgd->FreeOvZones) != NULL) {
559 	    vm_size_t tsize;
560 
561 	    KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC);
562 	    slgd->FreeOvZones = z->z_Next;
563 	    tsize = z->z_ChunkSize;
564 	    kmem_slab_free(z, tsize);	/* may block */
565 	    atomic_add_int(&ZoneBigAlloc, -(int)tsize / 1024);
566 	}
567 	crit_exit();
568     }
569 
570     /*
571      * Handle large allocations directly.  There should not be very many of
572      * these so performance is not a big issue.
573      *
574      * The backend allocator is pretty nasty on a SMP system.   Use the
575      * slab allocator for one and two page-sized chunks even though we lose
576      * some efficiency.  XXX maybe fix mmio and the elf loader instead.
577      */
578     if (size >= ZoneLimit || ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) {
579 	struct kmemusage *kup;
580 
581 	size = round_page(size);
582 	chunk = kmem_slab_alloc(size, PAGE_SIZE, flags);
583 	if (chunk == NULL) {
584 	    logmemory(malloc_end, NULL, type, size, flags);
585 	    return(NULL);
586 	}
587 	atomic_add_int(&ZoneBigAlloc, (int)size / 1024);
588 	flags &= ~M_ZERO;	/* result already zero'd if M_ZERO was set */
589 	flags |= M_PASSIVE_ZERO;
590 	kup = btokup(chunk);
591 	kup->ku_pagecnt = size / PAGE_SIZE;
592 	crit_enter();
593 	goto done;
594     }
595 
596     /*
597      * Attempt to allocate out of an existing zone.  First try the free list,
598      * then allocate out of unallocated space.  If we find a good zone move
599      * it to the head of the list so later allocations find it quickly
600      * (we might have thousands of zones in the list).
601      *
602      * Note: zoneindex() will panic of size is too large.
603      */
604     zi = zoneindex(&size);
605     KKASSERT(zi < NZONES);
606     crit_enter();
607 
608     if ((z = slgd->ZoneAry[zi]) != NULL) {
609 	/*
610 	 * Locate a chunk - we have to have at least one.  If this is the
611 	 * last chunk go ahead and do the work to retrieve chunks freed
612 	 * from remote cpus, and if the zone is still empty move it off
613 	 * the ZoneAry.
614 	 */
615 	if (--z->z_NFree <= 0) {
616 	    KKASSERT(z->z_NFree == 0);
617 
618 #ifdef SMP
619 	    /*
620 	     * WARNING! This code competes with other cpus.  It is ok
621 	     * for us to not drain RChunks here but we might as well, and
622 	     * it is ok if more accumulate after we're done.
623 	     *
624 	     * Set RSignal before pulling rchunks off, indicating that we
625 	     * will be moving ourselves off of the ZoneAry.  Remote ends will
626 	     * read RSignal before putting rchunks on thus interlocking
627 	     * their IPI signaling.
628 	     */
629 	    if (z->z_RChunks == NULL)
630 		atomic_swap_int(&z->z_RSignal, 1);
631 
632 	    while ((bchunk = z->z_RChunks) != NULL) {
633 		cpu_ccfence();
634 		if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, NULL)) {
635 		    *z->z_LChunksp = bchunk;
636 		    while (bchunk) {
637 			chunk_mark_free(z, bchunk);
638 			z->z_LChunksp = &bchunk->c_Next;
639 			bchunk = bchunk->c_Next;
640 			++z->z_NFree;
641 		    }
642 		    break;
643 		}
644 	    }
645 #endif
646 	    /*
647 	     * Remove from the zone list if no free chunks remain.
648 	     * Clear RSignal
649 	     */
650 	    if (z->z_NFree == 0) {
651 		slgd->ZoneAry[zi] = z->z_Next;
652 		z->z_Next = NULL;
653 	    } else {
654 		z->z_RSignal = 0;
655 	    }
656 	}
657 
658 	/*
659 	 * Fast path, we have chunks available in z_LChunks.
660 	 */
661 	chunk = z->z_LChunks;
662 	if (chunk) {
663 		chunk_mark_allocated(z, chunk);
664 		z->z_LChunks = chunk->c_Next;
665 		if (z->z_LChunks == NULL)
666 			z->z_LChunksp = &z->z_LChunks;
667 		goto done;
668 	}
669 
670 	/*
671 	 * No chunks are available in LChunks, the free chunk MUST be
672 	 * in the never-before-used memory area, controlled by UIndex.
673 	 *
674 	 * The consequences are very serious if our zone got corrupted so
675 	 * we use an explicit panic rather than a KASSERT.
676 	 */
677 	if (z->z_UIndex + 1 != z->z_NMax)
678 	    ++z->z_UIndex;
679 	else
680 	    z->z_UIndex = 0;
681 
682 	if (z->z_UIndex == z->z_UEndIndex)
683 	    panic("slaballoc: corrupted zone");
684 
685 	chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
686 	if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
687 	    flags &= ~M_ZERO;
688 	    flags |= M_PASSIVE_ZERO;
689 	}
690 	chunk_mark_allocated(z, chunk);
691 	goto done;
692     }
693 
694     /*
695      * If all zones are exhausted we need to allocate a new zone for this
696      * index.  Use M_ZERO to take advantage of pre-zerod pages.  Also see
697      * UAlloc use above in regards to M_ZERO.  Note that when we are reusing
698      * a zone from the FreeZones list UAlloc'd data will not be zero'd, and
699      * we do not pre-zero it because we do not want to mess up the L1 cache.
700      *
701      * At least one subsystem, the tty code (see CROUND) expects power-of-2
702      * allocations to be power-of-2 aligned.  We maintain compatibility by
703      * adjusting the base offset below.
704      */
705     {
706 	int off;
707 	struct kmemusage *kup;
708 
709 	if ((z = slgd->FreeZones) != NULL) {
710 	    slgd->FreeZones = z->z_Next;
711 	    --slgd->NFreeZones;
712 	    bzero(z, sizeof(SLZone));
713 	    z->z_Flags |= SLZF_UNOTZEROD;
714 	} else {
715 	    z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO);
716 	    if (z == NULL)
717 		goto fail;
718 	    atomic_add_int(&ZoneGenAlloc, (int)ZoneSize / 1024);
719 	}
720 
721 	/*
722 	 * How big is the base structure?
723 	 */
724 #if defined(INVARIANTS)
725 	/*
726 	 * Make room for z_Bitmap.  An exact calculation is somewhat more
727 	 * complicated so don't make an exact calculation.
728 	 */
729 	off = offsetof(SLZone, z_Bitmap[(ZoneSize / size + 31) / 32]);
730 	bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8);
731 #else
732 	off = sizeof(SLZone);
733 #endif
734 
735 	/*
736 	 * Guarentee power-of-2 alignment for power-of-2-sized chunks.
737 	 * Otherwise just 8-byte align the data.
738 	 */
739 	if ((size | (size - 1)) + 1 == (size << 1))
740 	    off = (off + size - 1) & ~(size - 1);
741 	else
742 	    off = (off + MIN_CHUNK_MASK) & ~MIN_CHUNK_MASK;
743 	z->z_Magic = ZALLOC_SLAB_MAGIC;
744 	z->z_ZoneIndex = zi;
745 	z->z_NMax = (ZoneSize - off) / size;
746 	z->z_NFree = z->z_NMax - 1;
747 	z->z_BasePtr = (char *)z + off;
748 	z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax;
749 	z->z_ChunkSize = size;
750 	z->z_CpuGd = gd;
751 	z->z_Cpu = gd->gd_cpuid;
752 	z->z_LChunksp = &z->z_LChunks;
753 	chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
754 	z->z_Next = slgd->ZoneAry[zi];
755 	slgd->ZoneAry[zi] = z;
756 	if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
757 	    flags &= ~M_ZERO;	/* already zero'd */
758 	    flags |= M_PASSIVE_ZERO;
759 	}
760 	kup = btokup(z);
761 	kup->ku_pagecnt = -(z->z_Cpu + 1);	/* -1 to -(N+1) */
762 	chunk_mark_allocated(z, chunk);
763 
764 	/*
765 	 * Slide the base index for initial allocations out of the next
766 	 * zone we create so we do not over-weight the lower part of the
767 	 * cpu memory caches.
768 	 */
769 	slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE)
770 				& (ZALLOC_MAX_ZONE_SIZE - 1);
771     }
772 
773 done:
774     ++type->ks_inuse[gd->gd_cpuid];
775     type->ks_memuse[gd->gd_cpuid] += size;
776     type->ks_loosememuse += size;	/* not MP synchronized */
777     crit_exit();
778 
779     if (flags & M_ZERO)
780 	bzero(chunk, size);
781 #ifdef INVARIANTS
782     else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0) {
783 	if (use_malloc_pattern) {
784 	    for (i = 0; i < size; i += sizeof(int)) {
785 		*(int *)((char *)chunk + i) = -1;
786 	    }
787 	}
788 	chunk->c_Next = (void *)-1; /* avoid accidental double-free check */
789     }
790 #endif
791     logmemory(malloc_end, chunk, type, size, flags);
792     return(chunk);
793 fail:
794     crit_exit();
795     logmemory(malloc_end, NULL, type, size, flags);
796     return(NULL);
797 }
798 
799 /*
800  * kernel realloc.  (SLAB ALLOCATOR) (MP SAFE)
801  *
802  * Generally speaking this routine is not called very often and we do
803  * not attempt to optimize it beyond reusing the same pointer if the
804  * new size fits within the chunking of the old pointer's zone.
805  */
806 void *
807 krealloc(void *ptr, unsigned long size, struct malloc_type *type, int flags)
808 {
809     struct kmemusage *kup;
810     SLZone *z;
811     void *nptr;
812     unsigned long osize;
813 
814     KKASSERT((flags & M_ZERO) == 0);	/* not supported */
815 
816     if (ptr == NULL || ptr == ZERO_LENGTH_PTR)
817 	return(kmalloc(size, type, flags));
818     if (size == 0) {
819 	kfree(ptr, type);
820 	return(NULL);
821     }
822 
823     /*
824      * Handle oversized allocations.  XXX we really should require that a
825      * size be passed to free() instead of this nonsense.
826      */
827     kup = btokup(ptr);
828     if (kup->ku_pagecnt > 0) {
829 	osize = kup->ku_pagecnt << PAGE_SHIFT;
830 	if (osize == round_page(size))
831 	    return(ptr);
832 	if ((nptr = kmalloc(size, type, flags)) == NULL)
833 	    return(NULL);
834 	bcopy(ptr, nptr, min(size, osize));
835 	kfree(ptr, type);
836 	return(nptr);
837     }
838 
839     /*
840      * Get the original allocation's zone.  If the new request winds up
841      * using the same chunk size we do not have to do anything.
842      */
843     z = (SLZone *)((uintptr_t)ptr & ZoneMask);
844     kup = btokup(z);
845     KKASSERT(kup->ku_pagecnt < 0);
846     KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
847 
848     /*
849      * Allocate memory for the new request size.  Note that zoneindex has
850      * already adjusted the request size to the appropriate chunk size, which
851      * should optimize our bcopy().  Then copy and return the new pointer.
852      *
853      * Resizing a non-power-of-2 allocation to a power-of-2 size does not
854      * necessary align the result.
855      *
856      * We can only zoneindex (to align size to the chunk size) if the new
857      * size is not too large.
858      */
859     if (size < ZoneLimit) {
860 	zoneindex(&size);
861 	if (z->z_ChunkSize == size)
862 	    return(ptr);
863     }
864     if ((nptr = kmalloc(size, type, flags)) == NULL)
865 	return(NULL);
866     bcopy(ptr, nptr, min(size, z->z_ChunkSize));
867     kfree(ptr, type);
868     return(nptr);
869 }
870 
871 /*
872  * Return the kmalloc limit for this type, in bytes.
873  */
874 long
875 kmalloc_limit(struct malloc_type *type)
876 {
877     if (type->ks_limit == 0) {
878 	crit_enter();
879 	if (type->ks_limit == 0)
880 	    malloc_init(type);
881 	crit_exit();
882     }
883     return(type->ks_limit);
884 }
885 
886 /*
887  * Allocate a copy of the specified string.
888  *
889  * (MP SAFE) (MAY BLOCK)
890  */
891 char *
892 kstrdup(const char *str, struct malloc_type *type)
893 {
894     int zlen;	/* length inclusive of terminating NUL */
895     char *nstr;
896 
897     if (str == NULL)
898 	return(NULL);
899     zlen = strlen(str) + 1;
900     nstr = kmalloc(zlen, type, M_WAITOK);
901     bcopy(str, nstr, zlen);
902     return(nstr);
903 }
904 
905 #ifdef SMP
906 /*
907  * Notify our cpu that a remote cpu has freed some chunks in a zone that
908  * we own.  Due to MP races we might no longer own the zone, use the
909  * kmemusage array to check.
910  */
911 static
912 void
913 kfree_remote(void *ptr)
914 {
915     struct kmemusage *kup;
916     SLGlobalData *slgd;
917     SLChunk *bchunk;
918     SLZone *z;
919     int nfree;
920 
921     /*
922      * Do not dereference (z) until we validate that its storage is
923      * still around.
924      */
925     slgd = &mycpu->gd_slab;
926     z = ptr;
927     kup = btokup(z);
928 
929     if (kup->ku_pagecnt == -((int)mycpuid + 1)) {	/* -1 to -(N+1) */
930 	logmemory(free_rem_beg, z, NULL, 0, 0);
931 	KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
932 	KKASSERT(z->z_Cpu  == mycpu->gd_cpuid);
933 	nfree = z->z_NFree;
934 
935 	/*
936 	 * Indicate that we will no longer be off of the ZoneAry by
937 	 * clearing RSignal.
938 	 */
939 	if (z->z_RChunks)
940 	    z->z_RSignal = 0;
941 
942 	/*
943 	 * Atomically extract the bchunks list and then process it back
944 	 * into the lchunks list.  We want to append our bchunks to the
945 	 * lchunks list and not prepend since we likely do not have
946 	 * cache mastership of the related data (not that it helps since
947 	 * we are using c_Next).
948 	 */
949 	while ((bchunk = z->z_RChunks) != NULL) {
950 	    cpu_ccfence();
951 	    if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, NULL)) {
952 		*z->z_LChunksp = bchunk;
953 		while (bchunk) {
954 			chunk_mark_free(z, bchunk);
955 			z->z_LChunksp = &bchunk->c_Next;
956 			bchunk = bchunk->c_Next;
957 			++z->z_NFree;
958 		}
959 		break;
960 	    }
961 	}
962 	if (z->z_NFree && nfree == 0) {
963 	    z->z_Next = slgd->ZoneAry[z->z_ZoneIndex];
964 	    slgd->ZoneAry[z->z_ZoneIndex] = z;
965 	}
966 
967 	/*
968 	 * If the zone becomes totally free, and there are other zones we
969 	 * can allocate from, move this zone to the FreeZones list.  Since
970 	 * this code can be called from an IPI callback, do *NOT* try to mess
971 	 * with kernel_map here.  Hysteresis will be performed at malloc() time.
972 	 */
973 	if (z->z_NFree == z->z_NMax &&
974 	    (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z)
975 	) {
976 	    struct kmemusage *kup;
977 	    SLZone **pz;
978 
979 	    for (pz = &slgd->ZoneAry[z->z_ZoneIndex];
980 		 z != *pz;
981 		 pz = &(*pz)->z_Next) {
982 		;
983 	    }
984 	    *pz = z->z_Next;
985 	    z->z_Magic = -1;
986 	    z->z_Next = slgd->FreeZones;
987 	    slgd->FreeZones = z;
988 	    ++slgd->NFreeZones;
989 	    kup = btokup(z);
990 	    kup->ku_pagecnt = 0;
991 	}
992 	logmemory(free_rem_end, z, bchunk, 0, 0);
993     }
994 }
995 
996 #endif
997 
998 /*
999  * free (SLAB ALLOCATOR)
1000  *
1001  * Free a memory block previously allocated by malloc.  Note that we do not
1002  * attempt to update ks_loosememuse as MP races could prevent us from
1003  * checking memory limits in malloc.
1004  *
1005  * MPSAFE
1006  */
1007 void
1008 kfree(void *ptr, struct malloc_type *type)
1009 {
1010     SLZone *z;
1011     SLChunk *chunk;
1012     SLGlobalData *slgd;
1013     struct globaldata *gd;
1014     struct kmemusage *kup;
1015     unsigned long size;
1016 #ifdef SMP
1017     SLChunk *bchunk;
1018     int rsignal;
1019 #endif
1020 
1021     logmemory_quick(free_beg);
1022     gd = mycpu;
1023     slgd = &gd->gd_slab;
1024 
1025     if (ptr == NULL)
1026 	panic("trying to free NULL pointer");
1027 
1028     /*
1029      * Handle special 0-byte allocations
1030      */
1031     if (ptr == ZERO_LENGTH_PTR) {
1032 	logmemory(free_zero, ptr, type, -1, 0);
1033 	logmemory_quick(free_end);
1034 	return;
1035     }
1036 
1037     /*
1038      * Panic on bad malloc type
1039      */
1040     if (type->ks_magic != M_MAGIC)
1041 	panic("free: malloc type lacks magic");
1042 
1043     /*
1044      * Handle oversized allocations.  XXX we really should require that a
1045      * size be passed to free() instead of this nonsense.
1046      *
1047      * This code is never called via an ipi.
1048      */
1049     kup = btokup(ptr);
1050     if (kup->ku_pagecnt > 0) {
1051 	size = kup->ku_pagecnt << PAGE_SHIFT;
1052 	kup->ku_pagecnt = 0;
1053 #ifdef INVARIANTS
1054 	KKASSERT(sizeof(weirdary) <= size);
1055 	bcopy(weirdary, ptr, sizeof(weirdary));
1056 #endif
1057 	/*
1058 	 * NOTE: For oversized allocations we do not record the
1059 	 *	     originating cpu.  It gets freed on the cpu calling
1060 	 *	     kfree().  The statistics are in aggregate.
1061 	 *
1062 	 * note: XXX we have still inherited the interrupts-can't-block
1063 	 * assumption.  An interrupt thread does not bump
1064 	 * gd_intr_nesting_level so check TDF_INTTHREAD.  This is
1065 	 * primarily until we can fix softupdate's assumptions about free().
1066 	 */
1067 	crit_enter();
1068 	--type->ks_inuse[gd->gd_cpuid];
1069 	type->ks_memuse[gd->gd_cpuid] -= size;
1070 	if (mycpu->gd_intr_nesting_level ||
1071 	    (gd->gd_curthread->td_flags & TDF_INTTHREAD))
1072 	{
1073 	    logmemory(free_ovsz_delayed, ptr, type, size, 0);
1074 	    z = (SLZone *)ptr;
1075 	    z->z_Magic = ZALLOC_OVSZ_MAGIC;
1076 	    z->z_Next = slgd->FreeOvZones;
1077 	    z->z_ChunkSize = size;
1078 	    slgd->FreeOvZones = z;
1079 	    crit_exit();
1080 	} else {
1081 	    crit_exit();
1082 	    logmemory(free_ovsz, ptr, type, size, 0);
1083 	    kmem_slab_free(ptr, size);	/* may block */
1084 	    atomic_add_int(&ZoneBigAlloc, -(int)size / 1024);
1085 	}
1086 	logmemory_quick(free_end);
1087 	return;
1088     }
1089 
1090     /*
1091      * Zone case.  Figure out the zone based on the fact that it is
1092      * ZoneSize aligned.
1093      */
1094     z = (SLZone *)((uintptr_t)ptr & ZoneMask);
1095     kup = btokup(z);
1096     KKASSERT(kup->ku_pagecnt < 0);
1097     KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1098 
1099     /*
1100      * If we do not own the zone then use atomic ops to free to the
1101      * remote cpu linked list and notify the target zone using a
1102      * passive message.
1103      *
1104      * The target zone cannot be deallocated while we own a chunk of it,
1105      * so the zone header's storage is stable until the very moment
1106      * we adjust z_RChunks.  After that we cannot safely dereference (z).
1107      *
1108      * (no critical section needed)
1109      */
1110     if (z->z_CpuGd != gd) {
1111 #ifdef SMP
1112 	/*
1113 	 * Making these adjustments now allow us to avoid passing (type)
1114 	 * to the remote cpu.  Note that ks_inuse/ks_memuse is being
1115 	 * adjusted on OUR cpu, not the zone cpu, but it should all still
1116 	 * sum up properly and cancel out.
1117 	 */
1118 	crit_enter();
1119 	--type->ks_inuse[gd->gd_cpuid];
1120 	type->ks_memuse[gd->gd_cpuid] -= z->z_ChunkSize;
1121 	crit_exit();
1122 
1123 	/*
1124 	 * WARNING! This code competes with other cpus.  Once we
1125 	 *	    successfully link the chunk to RChunks the remote
1126 	 *	    cpu can rip z's storage out from under us.
1127 	 */
1128 	rsignal = z->z_RSignal;
1129 	cpu_lfence();
1130 
1131 	chunk = ptr;
1132 	for (;;) {
1133 	    bchunk = z->z_RChunks;
1134 	    cpu_ccfence();
1135 	    chunk->c_Next = bchunk;
1136 	    cpu_sfence();
1137 
1138 	    if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, chunk))
1139 		break;
1140 	}
1141 	/* z cannot be dereferenced now */
1142 
1143 	/*
1144 	 * We have to signal the remote cpu if our actions will cause
1145 	 * the remote zone to be placed back on ZoneAry so it can
1146 	 * move the zone back on.
1147 	 *
1148 	 * We only need to deal with NULL->non-NULL RChunk transitions
1149 	 * and only if z_RSignal is set.  We interlock by reading rsignal
1150 	 * before adding our chunk to RChunks.  This should result in
1151 	 * virtually no IPI traffic.
1152 	 *
1153 	 * We can use a passive IPI to reduce overhead even further.
1154 	 */
1155 	if (bchunk == NULL && rsignal) {
1156 	    logmemory(free_request, ptr, type, z->z_ChunkSize, 0);
1157 	    lwkt_send_ipiq_passive(z->z_CpuGd, kfree_remote, z);
1158 	}
1159 #else
1160 	panic("Corrupt SLZone");
1161 #endif
1162 	logmemory_quick(free_end);
1163 	return;
1164     }
1165 
1166     /*
1167      * kfree locally
1168      */
1169     logmemory(free_chunk, ptr, type, z->z_ChunkSize, 0);
1170 
1171     crit_enter();
1172     chunk = ptr;
1173     chunk_mark_free(z, chunk);
1174 
1175     /*
1176      * Put weird data into the memory to detect modifications after freeing,
1177      * illegal pointer use after freeing (we should fault on the odd address),
1178      * and so forth.  XXX needs more work, see the old malloc code.
1179      */
1180 #ifdef INVARIANTS
1181     if (z->z_ChunkSize < sizeof(weirdary))
1182 	bcopy(weirdary, chunk, z->z_ChunkSize);
1183     else
1184 	bcopy(weirdary, chunk, sizeof(weirdary));
1185 #endif
1186 
1187     /*
1188      * Add this free non-zero'd chunk to a linked list for reuse.  Add
1189      * to the front of the linked list so it is more likely to be
1190      * reallocated, since it is already in our L1 cache.
1191      */
1192 #ifdef INVARIANTS
1193     if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd)
1194 	panic("BADFREE %p", chunk);
1195 #endif
1196     chunk->c_Next = z->z_LChunks;
1197     z->z_LChunks = chunk;
1198     if (chunk->c_Next == NULL)
1199 	    z->z_LChunksp = &chunk->c_Next;
1200 
1201 #ifdef INVARIANTS
1202     if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart)
1203 	panic("BADFREE2");
1204 #endif
1205 
1206     /*
1207      * Bump the number of free chunks.  If it becomes non-zero the zone
1208      * must be added back onto the appropriate list.
1209      */
1210     if (z->z_NFree++ == 0) {
1211 	z->z_Next = slgd->ZoneAry[z->z_ZoneIndex];
1212 	slgd->ZoneAry[z->z_ZoneIndex] = z;
1213     }
1214 
1215     --type->ks_inuse[z->z_Cpu];
1216     type->ks_memuse[z->z_Cpu] -= z->z_ChunkSize;
1217 
1218     /*
1219      * If the zone becomes totally free, and there are other zones we
1220      * can allocate from, move this zone to the FreeZones list.  Since
1221      * this code can be called from an IPI callback, do *NOT* try to mess
1222      * with kernel_map here.  Hysteresis will be performed at malloc() time.
1223      */
1224     if (z->z_NFree == z->z_NMax &&
1225 	(z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z)
1226     ) {
1227 	SLZone **pz;
1228 	struct kmemusage *kup;
1229 
1230 	for (pz = &slgd->ZoneAry[z->z_ZoneIndex]; z != *pz; pz = &(*pz)->z_Next)
1231 	    ;
1232 	*pz = z->z_Next;
1233 	z->z_Magic = -1;
1234 	z->z_Next = slgd->FreeZones;
1235 	slgd->FreeZones = z;
1236 	++slgd->NFreeZones;
1237 	kup = btokup(z);
1238 	kup->ku_pagecnt = 0;
1239     }
1240     logmemory_quick(free_end);
1241     crit_exit();
1242 }
1243 
1244 #if defined(INVARIANTS)
1245 
1246 /*
1247  * Helper routines for sanity checks
1248  */
1249 static
1250 void
1251 chunk_mark_allocated(SLZone *z, void *chunk)
1252 {
1253     int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1254     __uint32_t *bitptr;
1255 
1256     KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0);
1257     KASSERT(bitdex >= 0 && bitdex < z->z_NMax,
1258 	    ("memory chunk %p bit index %d is illegal", chunk, bitdex));
1259     bitptr = &z->z_Bitmap[bitdex >> 5];
1260     bitdex &= 31;
1261     KASSERT((*bitptr & (1 << bitdex)) == 0,
1262 	    ("memory chunk %p is already allocated!", chunk));
1263     *bitptr |= 1 << bitdex;
1264 }
1265 
1266 static
1267 void
1268 chunk_mark_free(SLZone *z, void *chunk)
1269 {
1270     int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1271     __uint32_t *bitptr;
1272 
1273     KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0);
1274     KASSERT(bitdex >= 0 && bitdex < z->z_NMax,
1275 	    ("memory chunk %p bit index %d is illegal!", chunk, bitdex));
1276     bitptr = &z->z_Bitmap[bitdex >> 5];
1277     bitdex &= 31;
1278     KASSERT((*bitptr & (1 << bitdex)) != 0,
1279 	    ("memory chunk %p is already free!", chunk));
1280     *bitptr &= ~(1 << bitdex);
1281 }
1282 
1283 #endif
1284 
1285 /*
1286  * kmem_slab_alloc()
1287  *
1288  *	Directly allocate and wire kernel memory in PAGE_SIZE chunks with the
1289  *	specified alignment.  M_* flags are expected in the flags field.
1290  *
1291  *	Alignment must be a multiple of PAGE_SIZE.
1292  *
1293  *	NOTE! XXX For the moment we use vm_map_entry_reserve/release(),
1294  *	but when we move zalloc() over to use this function as its backend
1295  *	we will have to switch to kreserve/krelease and call reserve(0)
1296  *	after the new space is made available.
1297  *
1298  *	Interrupt code which has preempted other code is not allowed to
1299  *	use PQ_CACHE pages.  However, if an interrupt thread is run
1300  *	non-preemptively or blocks and then runs non-preemptively, then
1301  *	it is free to use PQ_CACHE pages.
1302  */
1303 static void *
1304 kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags)
1305 {
1306     vm_size_t i;
1307     vm_offset_t addr;
1308     int count, vmflags, base_vmflags;
1309     thread_t td;
1310 
1311     size = round_page(size);
1312     addr = vm_map_min(&kernel_map);
1313 
1314     /*
1315      * Reserve properly aligned space from kernel_map.  RNOWAIT allocations
1316      * cannot block.
1317      */
1318     if (flags & M_RNOWAIT) {
1319 	if (lwkt_trytoken(&vm_token) == 0)
1320 	    return(NULL);
1321     } else {
1322 	lwkt_gettoken(&vm_token);
1323     }
1324     count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1325     crit_enter();
1326     vm_map_lock(&kernel_map);
1327     if (vm_map_findspace(&kernel_map, addr, size, align, 0, &addr)) {
1328 	vm_map_unlock(&kernel_map);
1329 	if ((flags & M_NULLOK) == 0)
1330 	    panic("kmem_slab_alloc(): kernel_map ran out of space!");
1331 	vm_map_entry_release(count);
1332 	crit_exit();
1333 	lwkt_reltoken(&vm_token);
1334 	return(NULL);
1335     }
1336 
1337     /*
1338      * kernel_object maps 1:1 to kernel_map.
1339      */
1340     vm_object_reference(&kernel_object);
1341     vm_map_insert(&kernel_map, &count,
1342 		    &kernel_object, addr, addr, addr + size,
1343 		    VM_MAPTYPE_NORMAL,
1344 		    VM_PROT_ALL, VM_PROT_ALL,
1345 		    0);
1346 
1347     td = curthread;
1348 
1349     base_vmflags = 0;
1350     if (flags & M_ZERO)
1351         base_vmflags |= VM_ALLOC_ZERO;
1352     if (flags & M_USE_RESERVE)
1353 	base_vmflags |= VM_ALLOC_SYSTEM;
1354     if (flags & M_USE_INTERRUPT_RESERVE)
1355         base_vmflags |= VM_ALLOC_INTERRUPT;
1356     if ((flags & (M_RNOWAIT|M_WAITOK)) == 0) {
1357 	panic("kmem_slab_alloc: bad flags %08x (%p)",
1358 	      flags, ((int **)&size)[-1]);
1359     }
1360 
1361 
1362     /*
1363      * Allocate the pages.  Do not mess with the PG_ZERO flag yet.
1364      */
1365     for (i = 0; i < size; i += PAGE_SIZE) {
1366 	vm_page_t m;
1367 
1368 	/*
1369 	 * VM_ALLOC_NORMAL can only be set if we are not preempting.
1370 	 *
1371 	 * VM_ALLOC_SYSTEM is automatically set if we are preempting and
1372 	 * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is
1373 	 * implied in this case), though I'm not sure if we really need to
1374 	 * do that.
1375 	 */
1376 	vmflags = base_vmflags;
1377 	if (flags & M_WAITOK) {
1378 	    if (td->td_preempted)
1379 		vmflags |= VM_ALLOC_SYSTEM;
1380 	    else
1381 		vmflags |= VM_ALLOC_NORMAL;
1382 	}
1383 
1384 	m = vm_page_alloc(&kernel_object, OFF_TO_IDX(addr + i), vmflags);
1385 
1386 	/*
1387 	 * If the allocation failed we either return NULL or we retry.
1388 	 *
1389 	 * If M_WAITOK is specified we wait for more memory and retry.
1390 	 * If M_WAITOK is specified from a preemption we yield instead of
1391 	 * wait.  Livelock will not occur because the interrupt thread
1392 	 * will not be preempting anyone the second time around after the
1393 	 * yield.
1394 	 */
1395 	if (m == NULL) {
1396 	    if (flags & M_WAITOK) {
1397 		if (td->td_preempted) {
1398 		    vm_map_unlock(&kernel_map);
1399 		    lwkt_switch();
1400 		    vm_map_lock(&kernel_map);
1401 		} else {
1402 		    vm_map_unlock(&kernel_map);
1403 		    vm_wait(0);
1404 		    vm_map_lock(&kernel_map);
1405 		}
1406 		i -= PAGE_SIZE;	/* retry */
1407 		continue;
1408 	    }
1409 
1410 	    /*
1411 	     * We were unable to recover, cleanup and return NULL
1412 	     *
1413 	     * (vm_token already held)
1414 	     */
1415 	    while (i != 0) {
1416 		i -= PAGE_SIZE;
1417 		m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i));
1418 		/* page should already be busy */
1419 		vm_page_free(m);
1420 	    }
1421 	    vm_map_delete(&kernel_map, addr, addr + size, &count);
1422 	    vm_map_unlock(&kernel_map);
1423 	    vm_map_entry_release(count);
1424 	    crit_exit();
1425 	    lwkt_reltoken(&vm_token);
1426 	    return(NULL);
1427 	}
1428     }
1429 
1430     /*
1431      * Success!
1432      *
1433      * Mark the map entry as non-pageable using a routine that allows us to
1434      * populate the underlying pages.
1435      *
1436      * The pages were busied by the allocations above.
1437      */
1438     vm_map_set_wired_quick(&kernel_map, addr, size, &count);
1439     crit_exit();
1440 
1441     /*
1442      * Enter the pages into the pmap and deal with PG_ZERO and M_ZERO.
1443      */
1444     lwkt_gettoken(&vm_token);
1445     for (i = 0; i < size; i += PAGE_SIZE) {
1446 	vm_page_t m;
1447 
1448 	m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i));
1449 	m->valid = VM_PAGE_BITS_ALL;
1450 	/* page should already be busy */
1451 	vm_page_wire(m);
1452 	vm_page_wakeup(m);
1453 	pmap_enter(&kernel_pmap, addr + i, m, VM_PROT_ALL, 1);
1454 	if ((m->flags & PG_ZERO) == 0 && (flags & M_ZERO))
1455 	    bzero((char *)addr + i, PAGE_SIZE);
1456 	vm_page_flag_clear(m, PG_ZERO);
1457 	KKASSERT(m->flags & (PG_WRITEABLE | PG_MAPPED));
1458 	vm_page_flag_set(m, PG_REFERENCED);
1459     }
1460     lwkt_reltoken(&vm_token);
1461     vm_map_unlock(&kernel_map);
1462     vm_map_entry_release(count);
1463     lwkt_reltoken(&vm_token);
1464     return((void *)addr);
1465 }
1466 
1467 /*
1468  * kmem_slab_free()
1469  */
1470 static void
1471 kmem_slab_free(void *ptr, vm_size_t size)
1472 {
1473     crit_enter();
1474     lwkt_gettoken(&vm_token);
1475     vm_map_remove(&kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size);
1476     lwkt_reltoken(&vm_token);
1477     crit_exit();
1478 }
1479 
1480