xref: /dragonfly/sys/kern/kern_slaballoc.c (revision a3127495)
1 /*
2  * (MPSAFE)
3  *
4  * KERN_SLABALLOC.C	- Kernel SLAB memory allocator
5  *
6  * Copyright (c) 2003,2004,2010 The DragonFly Project.  All rights reserved.
7  *
8  * This code is derived from software contributed to The DragonFly Project
9  * by Matthew Dillon <dillon@backplane.com>
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  *
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in
19  *    the documentation and/or other materials provided with the
20  *    distribution.
21  * 3. Neither the name of The DragonFly Project nor the names of its
22  *    contributors may be used to endorse or promote products derived
23  *    from this software without specific, prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
29  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
31  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
33  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
34  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
35  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  * This module implements a slab allocator drop-in replacement for the
39  * kernel malloc().
40  *
41  * A slab allocator reserves a ZONE for each chunk size, then lays the
42  * chunks out in an array within the zone.  Allocation and deallocation
43  * is nearly instantanious, and fragmentation/overhead losses are limited
44  * to a fixed worst-case amount.
45  *
46  * The downside of this slab implementation is in the chunk size
47  * multiplied by the number of zones.  ~80 zones * 128K = 10MB of VM per cpu.
48  * In a kernel implementation all this memory will be physical so
49  * the zone size is adjusted downward on machines with less physical
50  * memory.  The upside is that overhead is bounded... this is the *worst*
51  * case overhead.
52  *
53  * Slab management is done on a per-cpu basis and no locking or mutexes
54  * are required, only a critical section.  When one cpu frees memory
55  * belonging to another cpu's slab manager an asynchronous IPI message
56  * will be queued to execute the operation.   In addition, both the
57  * high level slab allocator and the low level zone allocator optimize
58  * M_ZERO requests, and the slab allocator does not have to pre initialize
59  * the linked list of chunks.
60  *
61  * XXX Balancing is needed between cpus.  Balance will be handled through
62  * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks.
63  *
64  * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of
65  * the new zone should be restricted to M_USE_RESERVE requests only.
66  *
67  *	Alloc Size	Chunking        Number of zones
68  *	0-127		8		16
69  *	128-255		16		8
70  *	256-511		32		8
71  *	512-1023	64		8
72  *	1024-2047	128		8
73  *	2048-4095	256		8
74  *	4096-8191	512		8
75  *	8192-16383	1024		8
76  *	16384-32767	2048		8
77  *	(if PAGE_SIZE is 4K the maximum zone allocation is 16383)
78  *
79  *	Allocations >= ZoneLimit go directly to kmem.
80  *	(n * PAGE_SIZE, n > 2) allocations go directly to kmem.
81  *
82  * Alignment properties:
83  * - All power-of-2 sized allocations are power-of-2 aligned.
84  * - Allocations with M_POWEROF2 are power-of-2 aligned on the nearest
85  *   power-of-2 round up of 'size'.
86  * - Non-power-of-2 sized allocations are zone chunk size aligned (see the
87  *   above table 'Chunking' column).
88  *
89  *			API REQUIREMENTS AND SIDE EFFECTS
90  *
91  *    To operate as a drop-in replacement to the FreeBSD-4.x malloc() we
92  *    have remained compatible with the following API requirements:
93  *
94  *    + malloc(0) is allowed and returns non-NULL (ahc driver)
95  *    + ability to allocate arbitrarily large chunks of memory
96  */
97 
98 #include "opt_vm.h"
99 
100 #include <sys/param.h>
101 #include <sys/systm.h>
102 #include <sys/kernel.h>
103 #include <sys/slaballoc.h>
104 #include <sys/mbuf.h>
105 #include <sys/vmmeter.h>
106 #include <sys/lock.h>
107 #include <sys/thread.h>
108 #include <sys/globaldata.h>
109 #include <sys/sysctl.h>
110 #include <sys/ktr.h>
111 
112 #include <vm/vm.h>
113 #include <vm/vm_param.h>
114 #include <vm/vm_kern.h>
115 #include <vm/vm_extern.h>
116 #include <vm/vm_object.h>
117 #include <vm/pmap.h>
118 #include <vm/vm_map.h>
119 #include <vm/vm_page.h>
120 #include <vm/vm_pageout.h>
121 
122 #include <machine/cpu.h>
123 
124 #include <sys/thread2.h>
125 #include <vm/vm_page2.h>
126 
127 #define btokup(z)	(&pmap_kvtom((vm_offset_t)(z))->ku_pagecnt)
128 
129 #define MEMORY_STRING	"ptr=%p type=%p size=%lu flags=%04x"
130 #define MEMORY_ARGS	void *ptr, void *type, unsigned long size, int flags
131 
132 #if !defined(KTR_MEMORY)
133 #define KTR_MEMORY	KTR_ALL
134 #endif
135 KTR_INFO_MASTER(memory);
136 KTR_INFO(KTR_MEMORY, memory, malloc_beg, 0, "malloc begin");
137 KTR_INFO(KTR_MEMORY, memory, malloc_end, 1, MEMORY_STRING, MEMORY_ARGS);
138 KTR_INFO(KTR_MEMORY, memory, free_zero, 2, MEMORY_STRING, MEMORY_ARGS);
139 KTR_INFO(KTR_MEMORY, memory, free_ovsz, 3, MEMORY_STRING, MEMORY_ARGS);
140 KTR_INFO(KTR_MEMORY, memory, free_ovsz_delayed, 4, MEMORY_STRING, MEMORY_ARGS);
141 KTR_INFO(KTR_MEMORY, memory, free_chunk, 5, MEMORY_STRING, MEMORY_ARGS);
142 KTR_INFO(KTR_MEMORY, memory, free_request, 6, MEMORY_STRING, MEMORY_ARGS);
143 KTR_INFO(KTR_MEMORY, memory, free_rem_beg, 7, MEMORY_STRING, MEMORY_ARGS);
144 KTR_INFO(KTR_MEMORY, memory, free_rem_end, 8, MEMORY_STRING, MEMORY_ARGS);
145 KTR_INFO(KTR_MEMORY, memory, free_beg, 9, "free begin");
146 KTR_INFO(KTR_MEMORY, memory, free_end, 10, "free end");
147 
148 #define logmemory(name, ptr, type, size, flags)				\
149 	KTR_LOG(memory_ ## name, ptr, type, size, flags)
150 #define logmemory_quick(name)						\
151 	KTR_LOG(memory_ ## name)
152 
153 /*
154  * Fixed globals (not per-cpu)
155  */
156 static int ZoneSize;
157 static int ZoneLimit;
158 static int ZonePageCount;
159 static uintptr_t ZoneMask;
160 static int ZoneBigAlloc;		/* in KB */
161 static int ZoneGenAlloc;		/* in KB */
162 struct malloc_type *kmemstatistics;	/* exported to vmstat */
163 #ifdef INVARIANTS
164 static int32_t weirdary[16];
165 #endif
166 
167 static void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags);
168 static void kmem_slab_free(void *ptr, vm_size_t bytes);
169 
170 #if defined(INVARIANTS)
171 static void chunk_mark_allocated(SLZone *z, void *chunk);
172 static void chunk_mark_free(SLZone *z, void *chunk);
173 #else
174 #define chunk_mark_allocated(z, chunk)
175 #define chunk_mark_free(z, chunk)
176 #endif
177 
178 /*
179  * Misc constants.  Note that allocations that are exact multiples of
180  * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
181  */
182 #define ZONE_RELS_THRESH	32		/* threshold number of zones */
183 
184 #ifdef INVARIANTS
185 /*
186  * The WEIRD_ADDR is used as known text to copy into free objects to
187  * try to create deterministic failure cases if the data is accessed after
188  * free.
189  */
190 #define WEIRD_ADDR      0xdeadc0de
191 #endif
192 #define ZERO_LENGTH_PTR	((void *)-8)
193 
194 /*
195  * Misc global malloc buckets
196  */
197 
198 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
199 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
200 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
201 MALLOC_DEFINE(M_DRM, "m_drm", "DRM memory allocations");
202 
203 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
204 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
205 
206 /*
207  * Initialize the slab memory allocator.  We have to choose a zone size based
208  * on available physical memory.  We choose a zone side which is approximately
209  * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of
210  * 128K.  The zone size is limited to the bounds set in slaballoc.h
211  * (typically 32K min, 128K max).
212  */
213 static void kmeminit(void *dummy);
214 
215 char *ZeroPage;
216 
217 SYSINIT(kmem, SI_BOOT1_ALLOCATOR, SI_ORDER_FIRST, kmeminit, NULL);
218 
219 #ifdef INVARIANTS
220 /*
221  * If enabled any memory allocated without M_ZERO is initialized to -1.
222  */
223 static int  use_malloc_pattern;
224 SYSCTL_INT(_debug, OID_AUTO, use_malloc_pattern, CTLFLAG_RW,
225     &use_malloc_pattern, 0,
226     "Initialize memory to -1 if M_ZERO not specified");
227 #endif
228 
229 static int ZoneRelsThresh = ZONE_RELS_THRESH;
230 SYSCTL_INT(_kern, OID_AUTO, zone_big_alloc, CTLFLAG_RD, &ZoneBigAlloc, 0, "");
231 SYSCTL_INT(_kern, OID_AUTO, zone_gen_alloc, CTLFLAG_RD, &ZoneGenAlloc, 0, "");
232 SYSCTL_INT(_kern, OID_AUTO, zone_cache, CTLFLAG_RW, &ZoneRelsThresh, 0, "");
233 static long SlabsAllocated;
234 static long SlabsFreed;
235 SYSCTL_LONG(_kern, OID_AUTO, slabs_allocated, CTLFLAG_RD,
236 	    &SlabsAllocated, 0, "");
237 SYSCTL_LONG(_kern, OID_AUTO, slabs_freed, CTLFLAG_RD,
238 	    &SlabsFreed, 0, "");
239 static int SlabFreeToTail;
240 SYSCTL_INT(_kern, OID_AUTO, slab_freetotail, CTLFLAG_RW,
241 	    &SlabFreeToTail, 0, "");
242 
243 static struct spinlock kmemstat_spin =
244 			SPINLOCK_INITIALIZER(&kmemstat_spin, "malinit");
245 
246 /*
247  * Returns the kernel memory size limit for the purposes of initializing
248  * various subsystem caches.  The smaller of available memory and the KVM
249  * memory space is returned.
250  *
251  * The size in megabytes is returned.
252  */
253 size_t
254 kmem_lim_size(void)
255 {
256     size_t limsize;
257 
258     limsize = (size_t)vmstats.v_page_count * PAGE_SIZE;
259     if (limsize > KvaSize)
260 	limsize = KvaSize;
261     return (limsize / (1024 * 1024));
262 }
263 
264 static void
265 kmeminit(void *dummy)
266 {
267     size_t limsize;
268     int usesize;
269 #ifdef INVARIANTS
270     int i;
271 #endif
272 
273     limsize = kmem_lim_size();
274     usesize = (int)(limsize * 1024);	/* convert to KB */
275 
276     /*
277      * If the machine has a large KVM space and more than 8G of ram,
278      * double the zone release threshold to reduce SMP invalidations.
279      * If more than 16G of ram, do it again.
280      *
281      * The BIOS eats a little ram so add some slop.  We want 8G worth of
282      * memory sticks to trigger the first adjustment.
283      */
284     if (ZoneRelsThresh == ZONE_RELS_THRESH) {
285 	    if (limsize >= 7 * 1024)
286 		    ZoneRelsThresh *= 2;
287 	    if (limsize >= 15 * 1024)
288 		    ZoneRelsThresh *= 2;
289     }
290 
291     /*
292      * Calculate the zone size.  This typically calculates to
293      * ZALLOC_MAX_ZONE_SIZE
294      */
295     ZoneSize = ZALLOC_MIN_ZONE_SIZE;
296     while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize)
297 	ZoneSize <<= 1;
298     ZoneLimit = ZoneSize / 4;
299     if (ZoneLimit > ZALLOC_ZONE_LIMIT)
300 	ZoneLimit = ZALLOC_ZONE_LIMIT;
301     ZoneMask = ~(uintptr_t)(ZoneSize - 1);
302     ZonePageCount = ZoneSize / PAGE_SIZE;
303 
304 #ifdef INVARIANTS
305     for (i = 0; i < NELEM(weirdary); ++i)
306 	weirdary[i] = WEIRD_ADDR;
307 #endif
308 
309     ZeroPage = kmem_slab_alloc(PAGE_SIZE, PAGE_SIZE, M_WAITOK|M_ZERO);
310 
311     if (bootverbose)
312 	kprintf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024);
313 }
314 
315 /*
316  * (low level) Initialize slab-related elements in the globaldata structure.
317  *
318  * Occurs after kmeminit().
319  */
320 void
321 slab_gdinit(globaldata_t gd)
322 {
323 	SLGlobalData *slgd;
324 	int i;
325 
326 	slgd = &gd->gd_slab;
327 	for (i = 0; i < NZONES; ++i)
328 		TAILQ_INIT(&slgd->ZoneAry[i]);
329 	TAILQ_INIT(&slgd->FreeZones);
330 	TAILQ_INIT(&slgd->FreeOvZones);
331 }
332 
333 /*
334  * Initialize a malloc type tracking structure.
335  */
336 void
337 malloc_init(void *data)
338 {
339     struct malloc_type *type = data;
340     size_t limsize;
341 
342     if (type->ks_magic != M_MAGIC)
343 	panic("malloc type lacks magic");
344 
345     if (type->ks_limit != 0)
346 	return;
347 
348     if (vmstats.v_page_count == 0)
349 	panic("malloc_init not allowed before vm init");
350 
351     limsize = kmem_lim_size() * (1024 * 1024);
352     type->ks_limit = limsize / 10;
353 
354     spin_lock(&kmemstat_spin);
355     type->ks_next = kmemstatistics;
356     kmemstatistics = type;
357     spin_unlock(&kmemstat_spin);
358 }
359 
360 void
361 malloc_uninit(void *data)
362 {
363     struct malloc_type *type = data;
364     struct malloc_type *t;
365 #ifdef INVARIANTS
366     int i;
367     long ttl;
368 #endif
369 
370     if (type->ks_magic != M_MAGIC)
371 	panic("malloc type lacks magic");
372 
373     if (vmstats.v_page_count == 0)
374 	panic("malloc_uninit not allowed before vm init");
375 
376     if (type->ks_limit == 0)
377 	panic("malloc_uninit on uninitialized type");
378 
379     /* Make sure that all pending kfree()s are finished. */
380     lwkt_synchronize_ipiqs("muninit");
381 
382 #ifdef INVARIANTS
383     /*
384      * memuse is only correct in aggregation.  Due to memory being allocated
385      * on one cpu and freed on another individual array entries may be
386      * negative or positive (canceling each other out).
387      */
388     for (i = ttl = 0; i < ncpus; ++i)
389 	ttl += type->ks_use[i].memuse;
390     if (ttl) {
391 	kprintf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n",
392 	    ttl, type->ks_shortdesc, i);
393     }
394 #endif
395     spin_lock(&kmemstat_spin);
396     if (type == kmemstatistics) {
397 	kmemstatistics = type->ks_next;
398     } else {
399 	for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) {
400 	    if (t->ks_next == type) {
401 		t->ks_next = type->ks_next;
402 		break;
403 	    }
404 	}
405     }
406     type->ks_next = NULL;
407     type->ks_limit = 0;
408     spin_unlock(&kmemstat_spin);
409 }
410 
411 /*
412  * Increase the kmalloc pool limit for the specified pool.  No changes
413  * are the made if the pool would shrink.
414  */
415 void
416 kmalloc_raise_limit(struct malloc_type *type, size_t bytes)
417 {
418     if (type->ks_limit == 0)
419 	malloc_init(type);
420     if (bytes == 0)
421 	bytes = KvaSize;
422     if (type->ks_limit < bytes)
423 	type->ks_limit = bytes;
424 }
425 
426 void
427 kmalloc_set_unlimited(struct malloc_type *type)
428 {
429     type->ks_limit = kmem_lim_size() * (1024 * 1024);
430 }
431 
432 /*
433  * Dynamically create a malloc pool.  This function is a NOP if *typep is
434  * already non-NULL.
435  */
436 void
437 kmalloc_create(struct malloc_type **typep, const char *descr)
438 {
439 	struct malloc_type *type;
440 
441 	if (*typep == NULL) {
442 		type = kmalloc(sizeof(*type), M_TEMP, M_WAITOK | M_ZERO);
443 		type->ks_magic = M_MAGIC;
444 		type->ks_shortdesc = descr;
445 		malloc_init(type);
446 		*typep = type;
447 	}
448 }
449 
450 /*
451  * Destroy a dynamically created malloc pool.  This function is a NOP if
452  * the pool has already been destroyed.
453  */
454 void
455 kmalloc_destroy(struct malloc_type **typep)
456 {
457 	if (*typep != NULL) {
458 		malloc_uninit(*typep);
459 		kfree(*typep, M_TEMP);
460 		*typep = NULL;
461 	}
462 }
463 
464 /*
465  * Calculate the zone index for the allocation request size and set the
466  * allocation request size to that particular zone's chunk size.
467  */
468 static __inline int
469 zoneindex(unsigned long *bytes, unsigned long *align)
470 {
471     unsigned int n = (unsigned int)*bytes;	/* unsigned for shift opt */
472 
473     if (n < 128) {
474 	*bytes = n = (n + 7) & ~7;
475 	*align = 8;
476 	return(n / 8 - 1);		/* 8 byte chunks, 16 zones */
477     }
478     if (n < 256) {
479 	*bytes = n = (n + 15) & ~15;
480 	*align = 16;
481 	return(n / 16 + 7);
482     }
483     if (n < 8192) {
484 	if (n < 512) {
485 	    *bytes = n = (n + 31) & ~31;
486 	    *align = 32;
487 	    return(n / 32 + 15);
488 	}
489 	if (n < 1024) {
490 	    *bytes = n = (n + 63) & ~63;
491 	    *align = 64;
492 	    return(n / 64 + 23);
493 	}
494 	if (n < 2048) {
495 	    *bytes = n = (n + 127) & ~127;
496 	    *align = 128;
497 	    return(n / 128 + 31);
498 	}
499 	if (n < 4096) {
500 	    *bytes = n = (n + 255) & ~255;
501 	    *align = 256;
502 	    return(n / 256 + 39);
503 	}
504 	*bytes = n = (n + 511) & ~511;
505 	*align = 512;
506 	return(n / 512 + 47);
507     }
508 #if ZALLOC_ZONE_LIMIT > 8192
509     if (n < 16384) {
510 	*bytes = n = (n + 1023) & ~1023;
511 	*align = 1024;
512 	return(n / 1024 + 55);
513     }
514 #endif
515 #if ZALLOC_ZONE_LIMIT > 16384
516     if (n < 32768) {
517 	*bytes = n = (n + 2047) & ~2047;
518 	*align = 2048;
519 	return(n / 2048 + 63);
520     }
521 #endif
522     panic("Unexpected byte count %d", n);
523     return(0);
524 }
525 
526 static __inline void
527 clean_zone_rchunks(SLZone *z)
528 {
529     SLChunk *bchunk;
530 
531     while ((bchunk = z->z_RChunks) != NULL) {
532 	cpu_ccfence();
533 	if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, NULL)) {
534 	    *z->z_LChunksp = bchunk;
535 	    while (bchunk) {
536 		chunk_mark_free(z, bchunk);
537 		z->z_LChunksp = &bchunk->c_Next;
538 		bchunk = bchunk->c_Next;
539 		++z->z_NFree;
540 	    }
541 	    break;
542 	}
543 	/* retry */
544     }
545 }
546 
547 /*
548  * If the zone becomes totally free and is not the only zone listed for a
549  * chunk size we move it to the FreeZones list.  We always leave at least
550  * one zone per chunk size listed, even if it is freeable.
551  *
552  * Do not move the zone if there is an IPI in_flight (z_RCount != 0),
553  * otherwise MP races can result in our free_remote code accessing a
554  * destroyed zone.  The remote end interlocks z_RCount with z_RChunks
555  * so one has to test both z_NFree and z_RCount.
556  *
557  * Since this code can be called from an IPI callback, do *NOT* try to mess
558  * with kernel_map here.  Hysteresis will be performed at kmalloc() time.
559  */
560 static __inline SLZone *
561 check_zone_free(SLGlobalData *slgd, SLZone *z)
562 {
563     SLZone *znext;
564 
565     znext = TAILQ_NEXT(z, z_Entry);
566     if (z->z_NFree == z->z_NMax && z->z_RCount == 0 &&
567 	(TAILQ_FIRST(&slgd->ZoneAry[z->z_ZoneIndex]) != z || znext)) {
568 	int *kup;
569 
570 	TAILQ_REMOVE(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry);
571 
572 	z->z_Magic = -1;
573 	TAILQ_INSERT_HEAD(&slgd->FreeZones, z, z_Entry);
574 	++slgd->NFreeZones;
575 	kup = btokup(z);
576 	*kup = 0;
577     }
578     return znext;
579 }
580 
581 #ifdef SLAB_DEBUG
582 /*
583  * Used to debug memory corruption issues.  Record up to (typically 32)
584  * allocation sources for this zone (for a particular chunk size).
585  */
586 
587 static void
588 slab_record_source(SLZone *z, const char *file, int line)
589 {
590     int i;
591     int b = line & (SLAB_DEBUG_ENTRIES - 1);
592 
593     i = b;
594     do {
595 	if (z->z_Sources[i].file == file && z->z_Sources[i].line == line)
596 		return;
597 	if (z->z_Sources[i].file == NULL)
598 		break;
599 	i = (i + 1) & (SLAB_DEBUG_ENTRIES - 1);
600     } while (i != b);
601     z->z_Sources[i].file = file;
602     z->z_Sources[i].line = line;
603 }
604 
605 #endif
606 
607 static __inline unsigned long
608 powerof2_size(unsigned long size)
609 {
610 	int i;
611 
612 	if (size == 0 || powerof2(size))
613 		return size;
614 
615 	i = flsl(size);
616 	return (1UL << i);
617 }
618 
619 /*
620  * kmalloc()	(SLAB ALLOCATOR)
621  *
622  *	Allocate memory via the slab allocator.  If the request is too large,
623  *	or if it page-aligned beyond a certain size, we fall back to the
624  *	KMEM subsystem.  A SLAB tracking descriptor must be specified, use
625  *	&SlabMisc if you don't care.
626  *
627  *	M_RNOWAIT	- don't block.
628  *	M_NULLOK	- return NULL instead of blocking.
629  *	M_ZERO		- zero the returned memory.
630  *	M_USE_RESERVE	- allow greater drawdown of the free list
631  *	M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted
632  *	M_POWEROF2	- roundup size to the nearest power of 2
633  *
634  * MPSAFE
635  */
636 
637 #ifdef SLAB_DEBUG
638 void *
639 kmalloc_debug(unsigned long size, struct malloc_type *type, int flags,
640 	      const char *file, int line)
641 #else
642 void *
643 kmalloc(unsigned long size, struct malloc_type *type, int flags)
644 #endif
645 {
646     SLZone *z;
647     SLChunk *chunk;
648     SLGlobalData *slgd;
649     struct globaldata *gd;
650     unsigned long align;
651     int zi;
652 #ifdef INVARIANTS
653     int i;
654 #endif
655 
656     logmemory_quick(malloc_beg);
657     gd = mycpu;
658     slgd = &gd->gd_slab;
659 
660     /*
661      * XXX silly to have this in the critical path.
662      */
663     if (type->ks_limit == 0) {
664 	crit_enter();
665 	malloc_init(type);
666 	crit_exit();
667     }
668     ++type->ks_use[gd->gd_cpuid].calls;
669 
670     if (flags & M_POWEROF2)
671 	size = powerof2_size(size);
672 
673     /*
674      * Handle the case where the limit is reached.  Panic if we can't return
675      * NULL.  The original malloc code looped, but this tended to
676      * simply deadlock the computer.
677      *
678      * ks_loosememuse is an up-only limit that is NOT MP-synchronized, used
679      * to determine if a more complete limit check should be done.  The
680      * actual memory use is tracked via ks_use[cpu].memuse.
681      */
682     while (type->ks_loosememuse >= type->ks_limit) {
683 	int i;
684 	long ttl;
685 
686 	for (i = ttl = 0; i < ncpus; ++i)
687 	    ttl += type->ks_use[i].memuse;
688 	type->ks_loosememuse = ttl;	/* not MP synchronized */
689 	if ((ssize_t)ttl < 0)		/* deal with occassional race */
690 		ttl = 0;
691 	if (ttl >= type->ks_limit) {
692 	    if (flags & M_NULLOK) {
693 		logmemory(malloc_end, NULL, type, size, flags);
694 		return(NULL);
695 	    }
696 	    panic("%s: malloc limit exceeded", type->ks_shortdesc);
697 	}
698     }
699 
700     /*
701      * Handle the degenerate size == 0 case.  Yes, this does happen.
702      * Return a special pointer.  This is to maintain compatibility with
703      * the original malloc implementation.  Certain devices, such as the
704      * adaptec driver, not only allocate 0 bytes, they check for NULL and
705      * also realloc() later on.  Joy.
706      */
707     if (size == 0) {
708 	logmemory(malloc_end, ZERO_LENGTH_PTR, type, size, flags);
709 	return(ZERO_LENGTH_PTR);
710     }
711 
712     /*
713      * Handle hysteresis from prior frees here in malloc().  We cannot
714      * safely manipulate the kernel_map in free() due to free() possibly
715      * being called via an IPI message or from sensitive interrupt code.
716      *
717      * NOTE: ku_pagecnt must be cleared before we free the slab or we
718      *	     might race another cpu allocating the kva and setting
719      *	     ku_pagecnt.
720      */
721     while (slgd->NFreeZones > ZoneRelsThresh && (flags & M_RNOWAIT) == 0) {
722 	crit_enter();
723 	if (slgd->NFreeZones > ZoneRelsThresh) {	/* crit sect race */
724 	    int *kup;
725 
726 	    z = TAILQ_LAST(&slgd->FreeZones, SLZoneList);
727 	    KKASSERT(z != NULL);
728 	    TAILQ_REMOVE(&slgd->FreeZones, z, z_Entry);
729 	    --slgd->NFreeZones;
730 	    kup = btokup(z);
731 	    *kup = 0;
732 	    kmem_slab_free(z, ZoneSize);	/* may block */
733 	    atomic_add_int(&ZoneGenAlloc, -ZoneSize / 1024);
734 	}
735 	crit_exit();
736     }
737 
738     /*
739      * XXX handle oversized frees that were queued from kfree().
740      */
741     while (TAILQ_FIRST(&slgd->FreeOvZones) && (flags & M_RNOWAIT) == 0) {
742 	crit_enter();
743 	if ((z = TAILQ_LAST(&slgd->FreeOvZones, SLZoneList)) != NULL) {
744 	    vm_size_t tsize;
745 
746 	    KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC);
747 	    TAILQ_REMOVE(&slgd->FreeOvZones, z, z_Entry);
748 	    tsize = z->z_ChunkSize;
749 	    kmem_slab_free(z, tsize);	/* may block */
750 	    atomic_add_int(&ZoneBigAlloc, -(int)tsize / 1024);
751 	}
752 	crit_exit();
753     }
754 
755     /*
756      * Handle large allocations directly.  There should not be very many of
757      * these so performance is not a big issue.
758      *
759      * The backend allocator is pretty nasty on a SMP system.   Use the
760      * slab allocator for one and two page-sized chunks even though we lose
761      * some efficiency.  XXX maybe fix mmio and the elf loader instead.
762      */
763     if (size >= ZoneLimit || ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) {
764 	int *kup;
765 
766 	size = round_page(size);
767 	chunk = kmem_slab_alloc(size, PAGE_SIZE, flags);
768 	if (chunk == NULL) {
769 	    logmemory(malloc_end, NULL, type, size, flags);
770 	    return(NULL);
771 	}
772 	atomic_add_int(&ZoneBigAlloc, (int)size / 1024);
773 	flags &= ~M_ZERO;	/* result already zero'd if M_ZERO was set */
774 	flags |= M_PASSIVE_ZERO;
775 	kup = btokup(chunk);
776 	*kup = size / PAGE_SIZE;
777 	crit_enter();
778 	goto done;
779     }
780 
781     /*
782      * Attempt to allocate out of an existing zone.  First try the free list,
783      * then allocate out of unallocated space.  If we find a good zone move
784      * it to the head of the list so later allocations find it quickly
785      * (we might have thousands of zones in the list).
786      *
787      * Note: zoneindex() will panic of size is too large.
788      */
789     zi = zoneindex(&size, &align);
790     KKASSERT(zi < NZONES);
791     crit_enter();
792 
793     if ((z = TAILQ_LAST(&slgd->ZoneAry[zi], SLZoneList)) != NULL) {
794 	/*
795 	 * Locate a chunk - we have to have at least one.  If this is the
796 	 * last chunk go ahead and do the work to retrieve chunks freed
797 	 * from remote cpus, and if the zone is still empty move it off
798 	 * the ZoneAry.
799 	 */
800 	if (--z->z_NFree <= 0) {
801 	    KKASSERT(z->z_NFree == 0);
802 
803 	    /*
804 	     * WARNING! This code competes with other cpus.  It is ok
805 	     * for us to not drain RChunks here but we might as well, and
806 	     * it is ok if more accumulate after we're done.
807 	     *
808 	     * Set RSignal before pulling rchunks off, indicating that we
809 	     * will be moving ourselves off of the ZoneAry.  Remote ends will
810 	     * read RSignal before putting rchunks on thus interlocking
811 	     * their IPI signaling.
812 	     */
813 	    if (z->z_RChunks == NULL)
814 		atomic_swap_int(&z->z_RSignal, 1);
815 
816 	    clean_zone_rchunks(z);
817 
818 	    /*
819 	     * Remove from the zone list if no free chunks remain.
820 	     * Clear RSignal
821 	     */
822 	    if (z->z_NFree == 0) {
823 		TAILQ_REMOVE(&slgd->ZoneAry[zi], z, z_Entry);
824 	    } else {
825 		z->z_RSignal = 0;
826 	    }
827 	}
828 
829 	/*
830 	 * Fast path, we have chunks available in z_LChunks.
831 	 */
832 	chunk = z->z_LChunks;
833 	if (chunk) {
834 		chunk_mark_allocated(z, chunk);
835 		z->z_LChunks = chunk->c_Next;
836 		if (z->z_LChunks == NULL)
837 			z->z_LChunksp = &z->z_LChunks;
838 #ifdef SLAB_DEBUG
839 		slab_record_source(z, file, line);
840 #endif
841 		goto done;
842 	}
843 
844 	/*
845 	 * No chunks are available in LChunks, the free chunk MUST be
846 	 * in the never-before-used memory area, controlled by UIndex.
847 	 *
848 	 * The consequences are very serious if our zone got corrupted so
849 	 * we use an explicit panic rather than a KASSERT.
850 	 */
851 	if (z->z_UIndex + 1 != z->z_NMax)
852 	    ++z->z_UIndex;
853 	else
854 	    z->z_UIndex = 0;
855 
856 	if (z->z_UIndex == z->z_UEndIndex)
857 	    panic("slaballoc: corrupted zone");
858 
859 	chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
860 	if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
861 	    flags &= ~M_ZERO;
862 	    flags |= M_PASSIVE_ZERO;
863 	}
864 	chunk_mark_allocated(z, chunk);
865 #ifdef SLAB_DEBUG
866 	slab_record_source(z, file, line);
867 #endif
868 	goto done;
869     }
870 
871     /*
872      * If all zones are exhausted we need to allocate a new zone for this
873      * index.  Use M_ZERO to take advantage of pre-zerod pages.  Also see
874      * UAlloc use above in regards to M_ZERO.  Note that when we are reusing
875      * a zone from the FreeZones list UAlloc'd data will not be zero'd, and
876      * we do not pre-zero it because we do not want to mess up the L1 cache.
877      *
878      * At least one subsystem, the tty code (see CROUND) expects power-of-2
879      * allocations to be power-of-2 aligned.  We maintain compatibility by
880      * adjusting the base offset below.
881      */
882     {
883 	int off;
884 	int *kup;
885 
886 	if ((z = TAILQ_FIRST(&slgd->FreeZones)) != NULL) {
887 	    TAILQ_REMOVE(&slgd->FreeZones, z, z_Entry);
888 	    --slgd->NFreeZones;
889 	    bzero(z, sizeof(SLZone));
890 	    z->z_Flags |= SLZF_UNOTZEROD;
891 	} else {
892 	    z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO);
893 	    if (z == NULL)
894 		goto fail;
895 	    atomic_add_int(&ZoneGenAlloc, ZoneSize / 1024);
896 	}
897 
898 	/*
899 	 * How big is the base structure?
900 	 */
901 #if defined(INVARIANTS)
902 	/*
903 	 * Make room for z_Bitmap.  An exact calculation is somewhat more
904 	 * complicated so don't make an exact calculation.
905 	 */
906 	off = offsetof(SLZone, z_Bitmap[(ZoneSize / size + 31) / 32]);
907 	bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8);
908 #else
909 	off = sizeof(SLZone);
910 #endif
911 
912 	/*
913 	 * Guarentee power-of-2 alignment for power-of-2-sized chunks.
914 	 * Otherwise properly align the data according to the chunk size.
915 	 */
916 	if (powerof2(size))
917 	    align = size;
918 	off = roundup2(off, align);
919 
920 	z->z_Magic = ZALLOC_SLAB_MAGIC;
921 	z->z_ZoneIndex = zi;
922 	z->z_NMax = (ZoneSize - off) / size;
923 	z->z_NFree = z->z_NMax - 1;
924 	z->z_BasePtr = (char *)z + off;
925 	z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax;
926 	z->z_ChunkSize = size;
927 	z->z_CpuGd = gd;
928 	z->z_Cpu = gd->gd_cpuid;
929 	z->z_LChunksp = &z->z_LChunks;
930 #ifdef SLAB_DEBUG
931 	bcopy(z->z_Sources, z->z_AltSources, sizeof(z->z_Sources));
932 	bzero(z->z_Sources, sizeof(z->z_Sources));
933 #endif
934 	chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
935 	TAILQ_INSERT_HEAD(&slgd->ZoneAry[zi], z, z_Entry);
936 	if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
937 	    flags &= ~M_ZERO;	/* already zero'd */
938 	    flags |= M_PASSIVE_ZERO;
939 	}
940 	kup = btokup(z);
941 	*kup = -(z->z_Cpu + 1);	/* -1 to -(N+1) */
942 	chunk_mark_allocated(z, chunk);
943 #ifdef SLAB_DEBUG
944 	slab_record_source(z, file, line);
945 #endif
946 
947 	/*
948 	 * Slide the base index for initial allocations out of the next
949 	 * zone we create so we do not over-weight the lower part of the
950 	 * cpu memory caches.
951 	 */
952 	slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE)
953 				& (ZALLOC_MAX_ZONE_SIZE - 1);
954     }
955 
956 done:
957     ++type->ks_use[gd->gd_cpuid].inuse;
958     type->ks_use[gd->gd_cpuid].memuse += size;
959     type->ks_use[gd->gd_cpuid].loosememuse += size;
960     if (type->ks_use[gd->gd_cpuid].loosememuse >= ZoneSize) {
961 	/* not MP synchronized */
962 	type->ks_loosememuse += type->ks_use[gd->gd_cpuid].loosememuse;
963 	type->ks_use[gd->gd_cpuid].loosememuse = 0;
964     }
965     crit_exit();
966 
967     if (flags & M_ZERO)
968 	bzero(chunk, size);
969 #ifdef INVARIANTS
970     else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0) {
971 	if (use_malloc_pattern) {
972 	    for (i = 0; i < size; i += sizeof(int)) {
973 		*(int *)((char *)chunk + i) = -1;
974 	    }
975 	}
976 	chunk->c_Next = (void *)-1; /* avoid accidental double-free check */
977     }
978 #endif
979     logmemory(malloc_end, chunk, type, size, flags);
980     return(chunk);
981 fail:
982     crit_exit();
983     logmemory(malloc_end, NULL, type, size, flags);
984     return(NULL);
985 }
986 
987 /*
988  * kernel realloc.  (SLAB ALLOCATOR) (MP SAFE)
989  *
990  * Generally speaking this routine is not called very often and we do
991  * not attempt to optimize it beyond reusing the same pointer if the
992  * new size fits within the chunking of the old pointer's zone.
993  */
994 #ifdef SLAB_DEBUG
995 void *
996 krealloc_debug(void *ptr, unsigned long size,
997 	       struct malloc_type *type, int flags,
998 	       const char *file, int line)
999 #else
1000 void *
1001 krealloc(void *ptr, unsigned long size, struct malloc_type *type, int flags)
1002 #endif
1003 {
1004     unsigned long osize;
1005     unsigned long align;
1006     SLZone *z;
1007     void *nptr;
1008     int *kup;
1009 
1010     KKASSERT((flags & M_ZERO) == 0);	/* not supported */
1011 
1012     if (ptr == NULL || ptr == ZERO_LENGTH_PTR)
1013 	return(kmalloc_debug(size, type, flags, file, line));
1014     if (size == 0) {
1015 	kfree(ptr, type);
1016 	return(NULL);
1017     }
1018 
1019     /*
1020      * Handle oversized allocations.  XXX we really should require that a
1021      * size be passed to free() instead of this nonsense.
1022      */
1023     kup = btokup(ptr);
1024     if (*kup > 0) {
1025 	osize = *kup << PAGE_SHIFT;
1026 	if (osize == round_page(size))
1027 	    return(ptr);
1028 	if ((nptr = kmalloc_debug(size, type, flags, file, line)) == NULL)
1029 	    return(NULL);
1030 	bcopy(ptr, nptr, min(size, osize));
1031 	kfree(ptr, type);
1032 	return(nptr);
1033     }
1034 
1035     /*
1036      * Get the original allocation's zone.  If the new request winds up
1037      * using the same chunk size we do not have to do anything.
1038      */
1039     z = (SLZone *)((uintptr_t)ptr & ZoneMask);
1040     kup = btokup(z);
1041     KKASSERT(*kup < 0);
1042     KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1043 
1044     /*
1045      * Allocate memory for the new request size.  Note that zoneindex has
1046      * already adjusted the request size to the appropriate chunk size, which
1047      * should optimize our bcopy().  Then copy and return the new pointer.
1048      *
1049      * Resizing a non-power-of-2 allocation to a power-of-2 size does not
1050      * necessary align the result.
1051      *
1052      * We can only zoneindex (to align size to the chunk size) if the new
1053      * size is not too large.
1054      */
1055     if (size < ZoneLimit) {
1056 	zoneindex(&size, &align);
1057 	if (z->z_ChunkSize == size)
1058 	    return(ptr);
1059     }
1060     if ((nptr = kmalloc_debug(size, type, flags, file, line)) == NULL)
1061 	return(NULL);
1062     bcopy(ptr, nptr, min(size, z->z_ChunkSize));
1063     kfree(ptr, type);
1064     return(nptr);
1065 }
1066 
1067 /*
1068  * Return the kmalloc limit for this type, in bytes.
1069  */
1070 long
1071 kmalloc_limit(struct malloc_type *type)
1072 {
1073     if (type->ks_limit == 0) {
1074 	crit_enter();
1075 	if (type->ks_limit == 0)
1076 	    malloc_init(type);
1077 	crit_exit();
1078     }
1079     return(type->ks_limit);
1080 }
1081 
1082 /*
1083  * Allocate a copy of the specified string.
1084  *
1085  * (MP SAFE) (MAY BLOCK)
1086  */
1087 #ifdef SLAB_DEBUG
1088 char *
1089 kstrdup_debug(const char *str, struct malloc_type *type,
1090 	      const char *file, int line)
1091 #else
1092 char *
1093 kstrdup(const char *str, struct malloc_type *type)
1094 #endif
1095 {
1096     int zlen;	/* length inclusive of terminating NUL */
1097     char *nstr;
1098 
1099     if (str == NULL)
1100 	return(NULL);
1101     zlen = strlen(str) + 1;
1102     nstr = kmalloc_debug(zlen, type, M_WAITOK, file, line);
1103     bcopy(str, nstr, zlen);
1104     return(nstr);
1105 }
1106 
1107 #ifdef SLAB_DEBUG
1108 char *
1109 kstrndup_debug(const char *str, size_t maxlen, struct malloc_type *type,
1110 	      const char *file, int line)
1111 #else
1112 char *
1113 kstrndup(const char *str, size_t maxlen, struct malloc_type *type)
1114 #endif
1115 {
1116     int zlen;	/* length inclusive of terminating NUL */
1117     char *nstr;
1118 
1119     if (str == NULL)
1120 	return(NULL);
1121     zlen = strnlen(str, maxlen) + 1;
1122     nstr = kmalloc_debug(zlen, type, M_WAITOK, file, line);
1123     bcopy(str, nstr, zlen);
1124     nstr[zlen - 1] = '\0';
1125     return(nstr);
1126 }
1127 
1128 /*
1129  * Notify our cpu that a remote cpu has freed some chunks in a zone that
1130  * we own.  RCount will be bumped so the memory should be good, but validate
1131  * that it really is.
1132  */
1133 static void
1134 kfree_remote(void *ptr)
1135 {
1136     SLGlobalData *slgd;
1137     SLZone *z;
1138     int nfree;
1139     int *kup;
1140 
1141     slgd = &mycpu->gd_slab;
1142     z = ptr;
1143     kup = btokup(z);
1144     KKASSERT(*kup == -((int)mycpuid + 1));
1145     KKASSERT(z->z_RCount > 0);
1146     atomic_subtract_int(&z->z_RCount, 1);
1147 
1148     logmemory(free_rem_beg, z, NULL, 0L, 0);
1149     KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1150     KKASSERT(z->z_Cpu  == mycpu->gd_cpuid);
1151     nfree = z->z_NFree;
1152 
1153     /*
1154      * Indicate that we will no longer be off of the ZoneAry by
1155      * clearing RSignal.
1156      */
1157     if (z->z_RChunks)
1158 	z->z_RSignal = 0;
1159 
1160     /*
1161      * Atomically extract the bchunks list and then process it back
1162      * into the lchunks list.  We want to append our bchunks to the
1163      * lchunks list and not prepend since we likely do not have
1164      * cache mastership of the related data (not that it helps since
1165      * we are using c_Next).
1166      */
1167     clean_zone_rchunks(z);
1168     if (z->z_NFree && nfree == 0) {
1169 	TAILQ_INSERT_HEAD(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry);
1170     }
1171 
1172     check_zone_free(slgd, z);
1173     logmemory(free_rem_end, z, NULL, 0L, 0);
1174 }
1175 
1176 /*
1177  * free (SLAB ALLOCATOR)
1178  *
1179  * Free a memory block previously allocated by malloc.
1180  *
1181  * Note: We do not attempt to update ks_loosememuse as MP races could
1182  * prevent us from checking memory limits in malloc.   YYY we may
1183  * consider updating ks_cpu.loosememuse.
1184  *
1185  * MPSAFE
1186  */
1187 void
1188 kfree(void *ptr, struct malloc_type *type)
1189 {
1190     SLZone *z;
1191     SLChunk *chunk;
1192     SLGlobalData *slgd;
1193     struct globaldata *gd;
1194     int *kup;
1195     unsigned long size;
1196     SLChunk *bchunk;
1197     int rsignal;
1198 
1199     logmemory_quick(free_beg);
1200     gd = mycpu;
1201     slgd = &gd->gd_slab;
1202 
1203     if (ptr == NULL)
1204 	panic("trying to free NULL pointer");
1205 
1206     /*
1207      * Handle special 0-byte allocations
1208      */
1209     if (ptr == ZERO_LENGTH_PTR) {
1210 	logmemory(free_zero, ptr, type, -1UL, 0);
1211 	logmemory_quick(free_end);
1212 	return;
1213     }
1214 
1215     /*
1216      * Panic on bad malloc type
1217      */
1218     if (type->ks_magic != M_MAGIC)
1219 	panic("free: malloc type lacks magic");
1220 
1221     /*
1222      * Handle oversized allocations.  XXX we really should require that a
1223      * size be passed to free() instead of this nonsense.
1224      *
1225      * This code is never called via an ipi.
1226      */
1227     kup = btokup(ptr);
1228     if (*kup > 0) {
1229 	size = *kup << PAGE_SHIFT;
1230 	*kup = 0;
1231 #ifdef INVARIANTS
1232 	KKASSERT(sizeof(weirdary) <= size);
1233 	bcopy(weirdary, ptr, sizeof(weirdary));
1234 #endif
1235 	/*
1236 	 * NOTE: For oversized allocations we do not record the
1237 	 *	     originating cpu.  It gets freed on the cpu calling
1238 	 *	     kfree().  The statistics are in aggregate.
1239 	 *
1240 	 * note: XXX we have still inherited the interrupts-can't-block
1241 	 * assumption.  An interrupt thread does not bump
1242 	 * gd_intr_nesting_level so check TDF_INTTHREAD.  This is
1243 	 * primarily until we can fix softupdate's assumptions about free().
1244 	 */
1245 	crit_enter();
1246 	--type->ks_use[gd->gd_cpuid].inuse;
1247 	type->ks_use[gd->gd_cpuid].memuse -= size;
1248 	if (mycpu->gd_intr_nesting_level ||
1249 	    (gd->gd_curthread->td_flags & TDF_INTTHREAD)) {
1250 	    logmemory(free_ovsz_delayed, ptr, type, size, 0);
1251 	    z = (SLZone *)ptr;
1252 	    z->z_Magic = ZALLOC_OVSZ_MAGIC;
1253 	    z->z_ChunkSize = size;
1254 
1255 	    TAILQ_INSERT_HEAD(&slgd->FreeOvZones, z, z_Entry);
1256 	    crit_exit();
1257 	} else {
1258 	    crit_exit();
1259 	    logmemory(free_ovsz, ptr, type, size, 0);
1260 	    kmem_slab_free(ptr, size);	/* may block */
1261 	    atomic_add_int(&ZoneBigAlloc, -(int)size / 1024);
1262 	}
1263 	logmemory_quick(free_end);
1264 	return;
1265     }
1266 
1267     /*
1268      * Zone case.  Figure out the zone based on the fact that it is
1269      * ZoneSize aligned.
1270      */
1271     z = (SLZone *)((uintptr_t)ptr & ZoneMask);
1272     kup = btokup(z);
1273     KKASSERT(*kup < 0);
1274     KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1275 
1276     /*
1277      * If we do not own the zone then use atomic ops to free to the
1278      * remote cpu linked list and notify the target zone using a
1279      * passive message.
1280      *
1281      * The target zone cannot be deallocated while we own a chunk of it,
1282      * so the zone header's storage is stable until the very moment
1283      * we adjust z_RChunks.  After that we cannot safely dereference (z).
1284      *
1285      * (no critical section needed)
1286      */
1287     if (z->z_CpuGd != gd) {
1288 	/*
1289 	 * Making these adjustments now allow us to avoid passing (type)
1290 	 * to the remote cpu.  Note that inuse/memuse is being
1291 	 * adjusted on OUR cpu, not the zone cpu, but it should all still
1292 	 * sum up properly and cancel out.
1293 	 */
1294 	crit_enter();
1295 	--type->ks_use[gd->gd_cpuid].inuse;
1296 	type->ks_use[gd->gd_cpuid].memuse -= z->z_ChunkSize;
1297 	crit_exit();
1298 
1299 	/*
1300 	 * WARNING! This code competes with other cpus.  Once we
1301 	 *	    successfully link the chunk to RChunks the remote
1302 	 *	    cpu can rip z's storage out from under us.
1303 	 *
1304 	 *	    Bumping RCount prevents z's storage from getting
1305 	 *	    ripped out.
1306 	 */
1307 	rsignal = z->z_RSignal;
1308 	cpu_lfence();
1309 	if (rsignal)
1310 		atomic_add_int(&z->z_RCount, 1);
1311 
1312 	chunk = ptr;
1313 	for (;;) {
1314 	    bchunk = z->z_RChunks;
1315 	    cpu_ccfence();
1316 	    chunk->c_Next = bchunk;
1317 	    cpu_sfence();
1318 
1319 	    if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, chunk))
1320 		break;
1321 	}
1322 
1323 	/*
1324 	 * We have to signal the remote cpu if our actions will cause
1325 	 * the remote zone to be placed back on ZoneAry so it can
1326 	 * move the zone back on.
1327 	 *
1328 	 * We only need to deal with NULL->non-NULL RChunk transitions
1329 	 * and only if z_RSignal is set.  We interlock by reading rsignal
1330 	 * before adding our chunk to RChunks.  This should result in
1331 	 * virtually no IPI traffic.
1332 	 *
1333 	 * We can use a passive IPI to reduce overhead even further.
1334 	 */
1335 	if (bchunk == NULL && rsignal) {
1336 	    logmemory(free_request, ptr, type,
1337 		      (unsigned long)z->z_ChunkSize, 0);
1338 	    lwkt_send_ipiq_passive(z->z_CpuGd, kfree_remote, z);
1339 	    /* z can get ripped out from under us from this point on */
1340 	} else if (rsignal) {
1341 	    atomic_subtract_int(&z->z_RCount, 1);
1342 	    /* z can get ripped out from under us from this point on */
1343 	}
1344 	logmemory_quick(free_end);
1345 	return;
1346     }
1347 
1348     /*
1349      * kfree locally
1350      */
1351     logmemory(free_chunk, ptr, type, (unsigned long)z->z_ChunkSize, 0);
1352 
1353     crit_enter();
1354     chunk = ptr;
1355     chunk_mark_free(z, chunk);
1356 
1357     /*
1358      * Put weird data into the memory to detect modifications after freeing,
1359      * illegal pointer use after freeing (we should fault on the odd address),
1360      * and so forth.  XXX needs more work, see the old malloc code.
1361      */
1362 #ifdef INVARIANTS
1363     if (z->z_ChunkSize < sizeof(weirdary))
1364 	bcopy(weirdary, chunk, z->z_ChunkSize);
1365     else
1366 	bcopy(weirdary, chunk, sizeof(weirdary));
1367 #endif
1368 
1369     /*
1370      * Add this free non-zero'd chunk to a linked list for reuse.  Add
1371      * to the front of the linked list so it is more likely to be
1372      * reallocated, since it is already in our L1 cache.
1373      */
1374 #ifdef INVARIANTS
1375     if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd)
1376 	panic("BADFREE %p", chunk);
1377 #endif
1378     chunk->c_Next = z->z_LChunks;
1379     z->z_LChunks = chunk;
1380     if (chunk->c_Next == NULL)
1381 	z->z_LChunksp = &chunk->c_Next;
1382 
1383 #ifdef INVARIANTS
1384     if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart)
1385 	panic("BADFREE2");
1386 #endif
1387 
1388     /*
1389      * Bump the number of free chunks.  If it becomes non-zero the zone
1390      * must be added back onto the appropriate list.  A fully allocated
1391      * zone that sees its first free is considered 'mature' and is placed
1392      * at the head, giving the system time to potentially free the remaining
1393      * entries even while other allocations are going on and making the zone
1394      * freeable.
1395      */
1396     if (z->z_NFree++ == 0) {
1397 	if (SlabFreeToTail)
1398 	    TAILQ_INSERT_TAIL(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry);
1399 	else
1400 	    TAILQ_INSERT_HEAD(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry);
1401     }
1402 
1403     --type->ks_use[z->z_Cpu].inuse;
1404     type->ks_use[z->z_Cpu].memuse -= z->z_ChunkSize;
1405 
1406     check_zone_free(slgd, z);
1407     logmemory_quick(free_end);
1408     crit_exit();
1409 }
1410 
1411 /*
1412  * Cleanup slabs which are hanging around due to RChunks or which are wholely
1413  * free and can be moved to the free list if not moved by other means.
1414  *
1415  * Called once every 10 seconds on all cpus.
1416  */
1417 void
1418 slab_cleanup(void)
1419 {
1420     SLGlobalData *slgd = &mycpu->gd_slab;
1421     SLZone *z;
1422     int i;
1423 
1424     crit_enter();
1425     for (i = 0; i < NZONES; ++i) {
1426 	if ((z = TAILQ_FIRST(&slgd->ZoneAry[i])) == NULL)
1427 		continue;
1428 
1429 	/*
1430 	 * Scan zones.
1431 	 */
1432 	while (z) {
1433 	    /*
1434 	     * Shift all RChunks to the end of the LChunks list.  This is
1435 	     * an O(1) operation.
1436 	     *
1437 	     * Then free the zone if possible.
1438 	     */
1439 	    clean_zone_rchunks(z);
1440 	    z = check_zone_free(slgd, z);
1441 	}
1442     }
1443     crit_exit();
1444 }
1445 
1446 #if defined(INVARIANTS)
1447 
1448 /*
1449  * Helper routines for sanity checks
1450  */
1451 static void
1452 chunk_mark_allocated(SLZone *z, void *chunk)
1453 {
1454     int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1455     uint32_t *bitptr;
1456 
1457     KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0);
1458     KASSERT(bitdex >= 0 && bitdex < z->z_NMax,
1459 	    ("memory chunk %p bit index %d is illegal", chunk, bitdex));
1460     bitptr = &z->z_Bitmap[bitdex >> 5];
1461     bitdex &= 31;
1462     KASSERT((*bitptr & (1 << bitdex)) == 0,
1463 	    ("memory chunk %p is already allocated!", chunk));
1464     *bitptr |= 1 << bitdex;
1465 }
1466 
1467 static void
1468 chunk_mark_free(SLZone *z, void *chunk)
1469 {
1470     int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1471     uint32_t *bitptr;
1472 
1473     KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0);
1474     KASSERT(bitdex >= 0 && bitdex < z->z_NMax,
1475 	    ("memory chunk %p bit index %d is illegal!", chunk, bitdex));
1476     bitptr = &z->z_Bitmap[bitdex >> 5];
1477     bitdex &= 31;
1478     KASSERT((*bitptr & (1 << bitdex)) != 0,
1479 	    ("memory chunk %p is already free!", chunk));
1480     *bitptr &= ~(1 << bitdex);
1481 }
1482 
1483 #endif
1484 
1485 /*
1486  * kmem_slab_alloc()
1487  *
1488  *	Directly allocate and wire kernel memory in PAGE_SIZE chunks with the
1489  *	specified alignment.  M_* flags are expected in the flags field.
1490  *
1491  *	Alignment must be a multiple of PAGE_SIZE.
1492  *
1493  *	NOTE! XXX For the moment we use vm_map_entry_reserve/release(),
1494  *	but when we move zalloc() over to use this function as its backend
1495  *	we will have to switch to kreserve/krelease and call reserve(0)
1496  *	after the new space is made available.
1497  *
1498  *	Interrupt code which has preempted other code is not allowed to
1499  *	use PQ_CACHE pages.  However, if an interrupt thread is run
1500  *	non-preemptively or blocks and then runs non-preemptively, then
1501  *	it is free to use PQ_CACHE pages.  <--- may not apply any longer XXX
1502  */
1503 static void *
1504 kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags)
1505 {
1506     vm_size_t i;
1507     vm_offset_t addr;
1508     int count, vmflags, base_vmflags;
1509     vm_page_t mbase = NULL;
1510     vm_page_t m;
1511     thread_t td;
1512 
1513     size = round_page(size);
1514     addr = vm_map_min(&kernel_map);
1515 
1516     count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1517     crit_enter();
1518     vm_map_lock(&kernel_map);
1519     if (vm_map_findspace(&kernel_map, addr, size, align, 0, &addr)) {
1520 	vm_map_unlock(&kernel_map);
1521 	if ((flags & M_NULLOK) == 0)
1522 	    panic("kmem_slab_alloc(): kernel_map ran out of space!");
1523 	vm_map_entry_release(count);
1524 	crit_exit();
1525 	return(NULL);
1526     }
1527 
1528     /*
1529      * kernel_object maps 1:1 to kernel_map.
1530      */
1531     vm_object_hold(&kernel_object);
1532     vm_object_reference_locked(&kernel_object);
1533     vm_map_insert(&kernel_map, &count,
1534 		  &kernel_object, NULL,
1535 		  addr, addr, addr + size,
1536 		  VM_MAPTYPE_NORMAL,
1537 		  VM_SUBSYS_KMALLOC,
1538 		  VM_PROT_ALL, VM_PROT_ALL, 0);
1539     vm_object_drop(&kernel_object);
1540     vm_map_set_wired_quick(&kernel_map, addr, size, &count);
1541     vm_map_unlock(&kernel_map);
1542 
1543     td = curthread;
1544 
1545     base_vmflags = 0;
1546     if (flags & M_ZERO)
1547         base_vmflags |= VM_ALLOC_ZERO;
1548     if (flags & M_USE_RESERVE)
1549 	base_vmflags |= VM_ALLOC_SYSTEM;
1550     if (flags & M_USE_INTERRUPT_RESERVE)
1551         base_vmflags |= VM_ALLOC_INTERRUPT;
1552     if ((flags & (M_RNOWAIT|M_WAITOK)) == 0) {
1553 	panic("kmem_slab_alloc: bad flags %08x (%p)",
1554 	      flags, ((int **)&size)[-1]);
1555     }
1556 
1557     /*
1558      * Allocate the pages.  Do not map them yet.  VM_ALLOC_NORMAL can only
1559      * be set if we are not preempting.
1560      *
1561      * VM_ALLOC_SYSTEM is automatically set if we are preempting and
1562      * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is
1563      * implied in this case), though I'm not sure if we really need to
1564      * do that.
1565      */
1566     vmflags = base_vmflags;
1567     if (flags & M_WAITOK) {
1568 	if (td->td_preempted)
1569 	    vmflags |= VM_ALLOC_SYSTEM;
1570 	else
1571 	    vmflags |= VM_ALLOC_NORMAL;
1572     }
1573 
1574     vm_object_hold(&kernel_object);
1575     for (i = 0; i < size; i += PAGE_SIZE) {
1576 	m = vm_page_alloc(&kernel_object, OFF_TO_IDX(addr + i), vmflags);
1577 	if (i == 0)
1578 		mbase = m;
1579 
1580 	/*
1581 	 * If the allocation failed we either return NULL or we retry.
1582 	 *
1583 	 * If M_WAITOK is specified we wait for more memory and retry.
1584 	 * If M_WAITOK is specified from a preemption we yield instead of
1585 	 * wait.  Livelock will not occur because the interrupt thread
1586 	 * will not be preempting anyone the second time around after the
1587 	 * yield.
1588 	 */
1589 	if (m == NULL) {
1590 	    if (flags & M_WAITOK) {
1591 		if (td->td_preempted) {
1592 		    lwkt_switch();
1593 		} else {
1594 		    vm_wait(0);
1595 		}
1596 		i -= PAGE_SIZE;	/* retry */
1597 		continue;
1598 	    }
1599 	    break;
1600 	}
1601     }
1602 
1603     /*
1604      * Check and deal with an allocation failure
1605      */
1606     if (i != size) {
1607 	while (i != 0) {
1608 	    i -= PAGE_SIZE;
1609 	    m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i));
1610 	    /* page should already be busy */
1611 	    vm_page_free(m);
1612 	}
1613 	vm_map_lock(&kernel_map);
1614 	vm_map_delete(&kernel_map, addr, addr + size, &count);
1615 	vm_map_unlock(&kernel_map);
1616 	vm_object_drop(&kernel_object);
1617 
1618 	vm_map_entry_release(count);
1619 	crit_exit();
1620 	return(NULL);
1621     }
1622 
1623     /*
1624      * Success!
1625      *
1626      * NOTE: The VM pages are still busied.  mbase points to the first one
1627      *	     but we have to iterate via vm_page_next()
1628      */
1629     vm_object_drop(&kernel_object);
1630     crit_exit();
1631 
1632     /*
1633      * Enter the pages into the pmap and deal with M_ZERO.
1634      */
1635     m = mbase;
1636     i = 0;
1637 
1638     while (i < size) {
1639 	/*
1640 	 * page should already be busy
1641 	 */
1642 	m->valid = VM_PAGE_BITS_ALL;
1643 	vm_page_wire(m);
1644 	pmap_enter(&kernel_pmap, addr + i, m,
1645 		   VM_PROT_ALL | VM_PROT_NOSYNC, 1, NULL);
1646 	if (flags & M_ZERO)
1647 		pagezero((char *)addr + i);
1648 	KKASSERT(m->flags & (PG_WRITEABLE | PG_MAPPED));
1649 	vm_page_flag_set(m, PG_REFERENCED);
1650 	vm_page_wakeup(m);
1651 
1652 	i += PAGE_SIZE;
1653 	vm_object_hold(&kernel_object);
1654 	m = vm_page_next(m);
1655 	vm_object_drop(&kernel_object);
1656     }
1657     smp_invltlb();
1658     vm_map_entry_release(count);
1659     atomic_add_long(&SlabsAllocated, 1);
1660     return((void *)addr);
1661 }
1662 
1663 /*
1664  * kmem_slab_free()
1665  */
1666 static void
1667 kmem_slab_free(void *ptr, vm_size_t size)
1668 {
1669     crit_enter();
1670     vm_map_remove(&kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size);
1671     atomic_add_long(&SlabsFreed, 1);
1672     crit_exit();
1673 }
1674 
1675 void *
1676 kmalloc_cachealign(unsigned long size_alloc, struct malloc_type *type,
1677     int flags)
1678 {
1679 #if (__VM_CACHELINE_SIZE == 32)
1680 #define CAN_CACHEALIGN(sz)	((sz) >= 256)
1681 #elif (__VM_CACHELINE_SIZE == 64)
1682 #define CAN_CACHEALIGN(sz)	((sz) >= 512)
1683 #elif (__VM_CACHELINE_SIZE == 128)
1684 #define CAN_CACHEALIGN(sz)	((sz) >= 1024)
1685 #else
1686 #error "unsupported cacheline size"
1687 #endif
1688 
1689 	void *ret;
1690 
1691 	if (size_alloc < __VM_CACHELINE_SIZE)
1692 		size_alloc = __VM_CACHELINE_SIZE;
1693 	else if (!CAN_CACHEALIGN(size_alloc))
1694 		flags |= M_POWEROF2;
1695 
1696 	ret = kmalloc(size_alloc, type, flags);
1697 	KASSERT(((uintptr_t)ret & (__VM_CACHELINE_SIZE - 1)) == 0,
1698 	    ("%p(%lu) not cacheline %d aligned",
1699 	     ret, size_alloc, __VM_CACHELINE_SIZE));
1700 	return ret;
1701 
1702 #undef CAN_CACHEALIGN
1703 }
1704