xref: /dragonfly/sys/kern/kern_slaballoc.c (revision 1aa0974c)
1 /*
2  * (MPSAFE)
3  *
4  * KERN_SLABALLOC.C	- Kernel SLAB memory allocator
5  *
6  * Copyright (c) 2003,2004,2010 The DragonFly Project.  All rights reserved.
7  *
8  * This code is derived from software contributed to The DragonFly Project
9  * by Matthew Dillon <dillon@backplane.com>
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  *
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in
19  *    the documentation and/or other materials provided with the
20  *    distribution.
21  * 3. Neither the name of The DragonFly Project nor the names of its
22  *    contributors may be used to endorse or promote products derived
23  *    from this software without specific, prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
29  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
31  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
33  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
34  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
35  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  * This module implements a slab allocator drop-in replacement for the
39  * kernel malloc().
40  *
41  * A slab allocator reserves a ZONE for each chunk size, then lays the
42  * chunks out in an array within the zone.  Allocation and deallocation
43  * is nearly instantanious, and fragmentation/overhead losses are limited
44  * to a fixed worst-case amount.
45  *
46  * The downside of this slab implementation is in the chunk size
47  * multiplied by the number of zones.  ~80 zones * 128K = 10MB of VM per cpu.
48  * In a kernel implementation all this memory will be physical so
49  * the zone size is adjusted downward on machines with less physical
50  * memory.  The upside is that overhead is bounded... this is the *worst*
51  * case overhead.
52  *
53  * Slab management is done on a per-cpu basis and no locking or mutexes
54  * are required, only a critical section.  When one cpu frees memory
55  * belonging to another cpu's slab manager an asynchronous IPI message
56  * will be queued to execute the operation.   In addition, both the
57  * high level slab allocator and the low level zone allocator optimize
58  * M_ZERO requests, and the slab allocator does not have to pre initialize
59  * the linked list of chunks.
60  *
61  * XXX Balancing is needed between cpus.  Balance will be handled through
62  * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks.
63  *
64  * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of
65  * the new zone should be restricted to M_USE_RESERVE requests only.
66  *
67  *	Alloc Size	Chunking        Number of zones
68  *	0-127		8		16
69  *	128-255		16		8
70  *	256-511		32		8
71  *	512-1023	64		8
72  *	1024-2047	128		8
73  *	2048-4095	256		8
74  *	4096-8191	512		8
75  *	8192-16383	1024		8
76  *	16384-32767	2048		8
77  *	(if PAGE_SIZE is 4K the maximum zone allocation is 16383)
78  *
79  *	Allocations >= ZoneLimit go directly to kmem.
80  *
81  * Alignment properties:
82  * - All power-of-2 sized allocations are power-of-2 aligned.
83  * - Allocations with M_POWEROF2 are power-of-2 aligned on the nearest
84  *   power-of-2 round up of 'size'.
85  * - Non-power-of-2 sized allocations are zone chunk size aligned (see the
86  *   above table 'Chunking' column).
87  *
88  *			API REQUIREMENTS AND SIDE EFFECTS
89  *
90  *    To operate as a drop-in replacement to the FreeBSD-4.x malloc() we
91  *    have remained compatible with the following API requirements:
92  *
93  *    + malloc(0) is allowed and returns non-NULL (ahc driver)
94  *    + ability to allocate arbitrarily large chunks of memory
95  */
96 
97 #include "opt_vm.h"
98 
99 #include <sys/param.h>
100 #include <sys/systm.h>
101 #include <sys/kernel.h>
102 #include <sys/slaballoc.h>
103 #include <sys/mbuf.h>
104 #include <sys/vmmeter.h>
105 #include <sys/lock.h>
106 #include <sys/thread.h>
107 #include <sys/globaldata.h>
108 #include <sys/sysctl.h>
109 #include <sys/ktr.h>
110 
111 #include <vm/vm.h>
112 #include <vm/vm_param.h>
113 #include <vm/vm_kern.h>
114 #include <vm/vm_extern.h>
115 #include <vm/vm_object.h>
116 #include <vm/pmap.h>
117 #include <vm/vm_map.h>
118 #include <vm/vm_page.h>
119 #include <vm/vm_pageout.h>
120 
121 #include <machine/cpu.h>
122 
123 #include <sys/thread2.h>
124 #include <vm/vm_page2.h>
125 
126 #define btokup(z)	(&pmap_kvtom((vm_offset_t)(z))->ku_pagecnt)
127 
128 #define MEMORY_STRING	"ptr=%p type=%p size=%lu flags=%04x"
129 #define MEMORY_ARGS	void *ptr, void *type, unsigned long size, int flags
130 
131 #if !defined(KTR_MEMORY)
132 #define KTR_MEMORY	KTR_ALL
133 #endif
134 KTR_INFO_MASTER(memory);
135 KTR_INFO(KTR_MEMORY, memory, malloc_beg, 0, "malloc begin");
136 KTR_INFO(KTR_MEMORY, memory, malloc_end, 1, MEMORY_STRING, MEMORY_ARGS);
137 KTR_INFO(KTR_MEMORY, memory, free_zero, 2, MEMORY_STRING, MEMORY_ARGS);
138 KTR_INFO(KTR_MEMORY, memory, free_ovsz, 3, MEMORY_STRING, MEMORY_ARGS);
139 KTR_INFO(KTR_MEMORY, memory, free_ovsz_delayed, 4, MEMORY_STRING, MEMORY_ARGS);
140 KTR_INFO(KTR_MEMORY, memory, free_chunk, 5, MEMORY_STRING, MEMORY_ARGS);
141 KTR_INFO(KTR_MEMORY, memory, free_request, 6, MEMORY_STRING, MEMORY_ARGS);
142 KTR_INFO(KTR_MEMORY, memory, free_rem_beg, 7, MEMORY_STRING, MEMORY_ARGS);
143 KTR_INFO(KTR_MEMORY, memory, free_rem_end, 8, MEMORY_STRING, MEMORY_ARGS);
144 KTR_INFO(KTR_MEMORY, memory, free_beg, 9, "free begin");
145 KTR_INFO(KTR_MEMORY, memory, free_end, 10, "free end");
146 
147 #define logmemory(name, ptr, type, size, flags)				\
148 	KTR_LOG(memory_ ## name, ptr, type, size, flags)
149 #define logmemory_quick(name)						\
150 	KTR_LOG(memory_ ## name)
151 
152 /*
153  * Fixed globals (not per-cpu)
154  */
155 static int ZoneSize;
156 static int ZoneLimit;
157 static int ZonePageCount;
158 static uintptr_t ZoneMask;
159 static int ZoneBigAlloc;		/* in KB */
160 static int ZoneGenAlloc;		/* in KB */
161 struct malloc_type *kmemstatistics;	/* exported to vmstat */
162 static int32_t weirdary[16];
163 
164 static void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags);
165 static void kmem_slab_free(void *ptr, vm_size_t bytes);
166 
167 #if defined(INVARIANTS)
168 static void chunk_mark_allocated(SLZone *z, void *chunk);
169 static void chunk_mark_free(SLZone *z, void *chunk);
170 #else
171 #define chunk_mark_allocated(z, chunk)
172 #define chunk_mark_free(z, chunk)
173 #endif
174 
175 /*
176  * Misc constants.  Note that allocations that are exact multiples of
177  * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
178  */
179 #define ZONE_RELS_THRESH	32		/* threshold number of zones */
180 
181 /*
182  * The WEIRD_ADDR is used as known text to copy into free objects to
183  * try to create deterministic failure cases if the data is accessed after
184  * free.
185  */
186 #define WEIRD_ADDR      0xdeadc0de
187 #define MAX_COPY        sizeof(weirdary)
188 #define ZERO_LENGTH_PTR	((void *)-8)
189 
190 /*
191  * Misc global malloc buckets
192  */
193 
194 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
195 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
196 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
197 
198 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
199 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
200 
201 /*
202  * Initialize the slab memory allocator.  We have to choose a zone size based
203  * on available physical memory.  We choose a zone side which is approximately
204  * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of
205  * 128K.  The zone size is limited to the bounds set in slaballoc.h
206  * (typically 32K min, 128K max).
207  */
208 static void kmeminit(void *dummy);
209 
210 char *ZeroPage;
211 
212 SYSINIT(kmem, SI_BOOT1_ALLOCATOR, SI_ORDER_FIRST, kmeminit, NULL);
213 
214 #ifdef INVARIANTS
215 /*
216  * If enabled any memory allocated without M_ZERO is initialized to -1.
217  */
218 static int  use_malloc_pattern;
219 SYSCTL_INT(_debug, OID_AUTO, use_malloc_pattern, CTLFLAG_RW,
220     &use_malloc_pattern, 0,
221     "Initialize memory to -1 if M_ZERO not specified");
222 #endif
223 
224 static int ZoneRelsThresh = ZONE_RELS_THRESH;
225 SYSCTL_INT(_kern, OID_AUTO, zone_big_alloc, CTLFLAG_RD, &ZoneBigAlloc, 0, "");
226 SYSCTL_INT(_kern, OID_AUTO, zone_gen_alloc, CTLFLAG_RD, &ZoneGenAlloc, 0, "");
227 SYSCTL_INT(_kern, OID_AUTO, zone_cache, CTLFLAG_RW, &ZoneRelsThresh, 0, "");
228 static long SlabsAllocated;
229 static long SlabsFreed;
230 SYSCTL_LONG(_kern, OID_AUTO, slabs_allocated, CTLFLAG_RD, &SlabsAllocated, 0, "");
231 SYSCTL_LONG(_kern, OID_AUTO, slabs_freed, CTLFLAG_RD, &SlabsFreed, 0, "");
232 
233 /*
234  * Returns the kernel memory size limit for the purposes of initializing
235  * various subsystem caches.  The smaller of available memory and the KVM
236  * memory space is returned.
237  *
238  * The size in megabytes is returned.
239  */
240 size_t
241 kmem_lim_size(void)
242 {
243     size_t limsize;
244 
245     limsize = (size_t)vmstats.v_page_count * PAGE_SIZE;
246     if (limsize > KvaSize)
247 	limsize = KvaSize;
248     return (limsize / (1024 * 1024));
249 }
250 
251 static void
252 kmeminit(void *dummy)
253 {
254     size_t limsize;
255     int usesize;
256     int i;
257 
258     limsize = kmem_lim_size();
259     usesize = (int)(limsize * 1024);	/* convert to KB */
260 
261     /*
262      * If the machine has a large KVM space and more than 8G of ram,
263      * double the zone release threshold to reduce SMP invalidations.
264      * If more than 16G of ram, do it again.
265      *
266      * The BIOS eats a little ram so add some slop.  We want 8G worth of
267      * memory sticks to trigger the first adjustment.
268      */
269     if (ZoneRelsThresh == ZONE_RELS_THRESH) {
270 	    if (limsize >= 7 * 1024)
271 		    ZoneRelsThresh *= 2;
272 	    if (limsize >= 15 * 1024)
273 		    ZoneRelsThresh *= 2;
274     }
275 
276     /*
277      * Calculate the zone size.  This typically calculates to
278      * ZALLOC_MAX_ZONE_SIZE
279      */
280     ZoneSize = ZALLOC_MIN_ZONE_SIZE;
281     while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize)
282 	ZoneSize <<= 1;
283     ZoneLimit = ZoneSize / 4;
284     if (ZoneLimit > ZALLOC_ZONE_LIMIT)
285 	ZoneLimit = ZALLOC_ZONE_LIMIT;
286     ZoneMask = ~(uintptr_t)(ZoneSize - 1);
287     ZonePageCount = ZoneSize / PAGE_SIZE;
288 
289     for (i = 0; i < NELEM(weirdary); ++i)
290 	weirdary[i] = WEIRD_ADDR;
291 
292     ZeroPage = kmem_slab_alloc(PAGE_SIZE, PAGE_SIZE, M_WAITOK|M_ZERO);
293 
294     if (bootverbose)
295 	kprintf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024);
296 }
297 
298 /*
299  * Initialize a malloc type tracking structure.
300  */
301 void
302 malloc_init(void *data)
303 {
304     struct malloc_type *type = data;
305     size_t limsize;
306 
307     if (type->ks_magic != M_MAGIC)
308 	panic("malloc type lacks magic");
309 
310     if (type->ks_limit != 0)
311 	return;
312 
313     if (vmstats.v_page_count == 0)
314 	panic("malloc_init not allowed before vm init");
315 
316     limsize = kmem_lim_size() * (1024 * 1024);
317     type->ks_limit = limsize / 10;
318 
319     type->ks_next = kmemstatistics;
320     kmemstatistics = type;
321 }
322 
323 void
324 malloc_uninit(void *data)
325 {
326     struct malloc_type *type = data;
327     struct malloc_type *t;
328 #ifdef INVARIANTS
329     int i;
330     long ttl;
331 #endif
332 
333     if (type->ks_magic != M_MAGIC)
334 	panic("malloc type lacks magic");
335 
336     if (vmstats.v_page_count == 0)
337 	panic("malloc_uninit not allowed before vm init");
338 
339     if (type->ks_limit == 0)
340 	panic("malloc_uninit on uninitialized type");
341 
342     /* Make sure that all pending kfree()s are finished. */
343     lwkt_synchronize_ipiqs("muninit");
344 
345 #ifdef INVARIANTS
346     /*
347      * memuse is only correct in aggregation.  Due to memory being allocated
348      * on one cpu and freed on another individual array entries may be
349      * negative or positive (canceling each other out).
350      */
351     for (i = ttl = 0; i < ncpus; ++i)
352 	ttl += type->ks_memuse[i];
353     if (ttl) {
354 	kprintf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n",
355 	    ttl, type->ks_shortdesc, i);
356     }
357 #endif
358     if (type == kmemstatistics) {
359 	kmemstatistics = type->ks_next;
360     } else {
361 	for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) {
362 	    if (t->ks_next == type) {
363 		t->ks_next = type->ks_next;
364 		break;
365 	    }
366 	}
367     }
368     type->ks_next = NULL;
369     type->ks_limit = 0;
370 }
371 
372 /*
373  * Increase the kmalloc pool limit for the specified pool.  No changes
374  * are the made if the pool would shrink.
375  */
376 void
377 kmalloc_raise_limit(struct malloc_type *type, size_t bytes)
378 {
379     if (type->ks_limit == 0)
380 	malloc_init(type);
381     if (bytes == 0)
382 	bytes = KvaSize;
383     if (type->ks_limit < bytes)
384 	type->ks_limit = bytes;
385 }
386 
387 /*
388  * Dynamically create a malloc pool.  This function is a NOP if *typep is
389  * already non-NULL.
390  */
391 void
392 kmalloc_create(struct malloc_type **typep, const char *descr)
393 {
394 	struct malloc_type *type;
395 
396 	if (*typep == NULL) {
397 		type = kmalloc(sizeof(*type), M_TEMP, M_WAITOK | M_ZERO);
398 		type->ks_magic = M_MAGIC;
399 		type->ks_shortdesc = descr;
400 		malloc_init(type);
401 		*typep = type;
402 	}
403 }
404 
405 /*
406  * Destroy a dynamically created malloc pool.  This function is a NOP if
407  * the pool has already been destroyed.
408  */
409 void
410 kmalloc_destroy(struct malloc_type **typep)
411 {
412 	if (*typep != NULL) {
413 		malloc_uninit(*typep);
414 		kfree(*typep, M_TEMP);
415 		*typep = NULL;
416 	}
417 }
418 
419 /*
420  * Calculate the zone index for the allocation request size and set the
421  * allocation request size to that particular zone's chunk size.
422  */
423 static __inline int
424 zoneindex(unsigned long *bytes, unsigned long *align)
425 {
426     unsigned int n = (unsigned int)*bytes;	/* unsigned for shift opt */
427     if (n < 128) {
428 	*bytes = n = (n + 7) & ~7;
429 	*align = 8;
430 	return(n / 8 - 1);		/* 8 byte chunks, 16 zones */
431     }
432     if (n < 256) {
433 	*bytes = n = (n + 15) & ~15;
434 	*align = 16;
435 	return(n / 16 + 7);
436     }
437     if (n < 8192) {
438 	if (n < 512) {
439 	    *bytes = n = (n + 31) & ~31;
440 	    *align = 32;
441 	    return(n / 32 + 15);
442 	}
443 	if (n < 1024) {
444 	    *bytes = n = (n + 63) & ~63;
445 	    *align = 64;
446 	    return(n / 64 + 23);
447 	}
448 	if (n < 2048) {
449 	    *bytes = n = (n + 127) & ~127;
450 	    *align = 128;
451 	    return(n / 128 + 31);
452 	}
453 	if (n < 4096) {
454 	    *bytes = n = (n + 255) & ~255;
455 	    *align = 256;
456 	    return(n / 256 + 39);
457 	}
458 	*bytes = n = (n + 511) & ~511;
459 	*align = 512;
460 	return(n / 512 + 47);
461     }
462 #if ZALLOC_ZONE_LIMIT > 8192
463     if (n < 16384) {
464 	*bytes = n = (n + 1023) & ~1023;
465 	*align = 1024;
466 	return(n / 1024 + 55);
467     }
468 #endif
469 #if ZALLOC_ZONE_LIMIT > 16384
470     if (n < 32768) {
471 	*bytes = n = (n + 2047) & ~2047;
472 	*align = 2048;
473 	return(n / 2048 + 63);
474     }
475 #endif
476     panic("Unexpected byte count %d", n);
477     return(0);
478 }
479 
480 static __inline
481 void
482 clean_zone_rchunks(SLZone *z)
483 {
484     SLChunk *bchunk;
485 
486     while ((bchunk = z->z_RChunks) != NULL) {
487 	cpu_ccfence();
488 	if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, NULL)) {
489 	    *z->z_LChunksp = bchunk;
490 	    while (bchunk) {
491 		chunk_mark_free(z, bchunk);
492 		z->z_LChunksp = &bchunk->c_Next;
493 		bchunk = bchunk->c_Next;
494 		++z->z_NFree;
495 	    }
496 	    break;
497 	}
498 	/* retry */
499     }
500 }
501 
502 /*
503  * If the zone becomes totally free, and there are other zones we
504  * can allocate from, move this zone to the FreeZones list.  Since
505  * this code can be called from an IPI callback, do *NOT* try to mess
506  * with kernel_map here.  Hysteresis will be performed at malloc() time.
507  */
508 static __inline
509 SLZone *
510 check_zone_free(SLGlobalData *slgd, SLZone *z)
511 {
512     if (z->z_NFree == z->z_NMax &&
513 	(z->z_Next || LIST_FIRST(&slgd->ZoneAry[z->z_ZoneIndex]) != z) &&
514 	z->z_RCount == 0
515     ) {
516 	SLZone *znext;
517 	int *kup;
518 
519 	znext = LIST_NEXT(z, z_Entry);
520 	LIST_REMOVE(z, z_Entry);
521 
522 	z->z_Magic = -1;
523 	LIST_INSERT_HEAD(&slgd->FreeZones, z, z_Entry);
524 	++slgd->NFreeZones;
525 	kup = btokup(z);
526 	*kup = 0;
527 	z = znext;
528     } else {
529 	z = LIST_NEXT(z, z_Entry);
530     }
531     return z;
532 }
533 
534 #ifdef SLAB_DEBUG
535 /*
536  * Used to debug memory corruption issues.  Record up to (typically 32)
537  * allocation sources for this zone (for a particular chunk size).
538  */
539 
540 static void
541 slab_record_source(SLZone *z, const char *file, int line)
542 {
543     int i;
544     int b = line & (SLAB_DEBUG_ENTRIES - 1);
545 
546     i = b;
547     do {
548 	if (z->z_Sources[i].file == file && z->z_Sources[i].line == line)
549 		return;
550 	if (z->z_Sources[i].file == NULL)
551 		break;
552 	i = (i + 1) & (SLAB_DEBUG_ENTRIES - 1);
553     } while (i != b);
554     z->z_Sources[i].file = file;
555     z->z_Sources[i].line = line;
556 }
557 
558 #endif
559 
560 static __inline unsigned long
561 powerof2_size(unsigned long size)
562 {
563 	int i;
564 
565 	if (size == 0 || powerof2(size))
566 		return size;
567 
568 	i = flsl(size);
569 	return (1UL << i);
570 }
571 
572 /*
573  * kmalloc()	(SLAB ALLOCATOR)
574  *
575  *	Allocate memory via the slab allocator.  If the request is too large,
576  *	or if it page-aligned beyond a certain size, we fall back to the
577  *	KMEM subsystem.  A SLAB tracking descriptor must be specified, use
578  *	&SlabMisc if you don't care.
579  *
580  *	M_RNOWAIT	- don't block.
581  *	M_NULLOK	- return NULL instead of blocking.
582  *	M_ZERO		- zero the returned memory.
583  *	M_USE_RESERVE	- allow greater drawdown of the free list
584  *	M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted
585  *	M_POWEROF2	- roundup size to the nearest power of 2
586  *
587  * MPSAFE
588  */
589 
590 #ifdef SLAB_DEBUG
591 void *
592 kmalloc_debug(unsigned long size, struct malloc_type *type, int flags,
593 	      const char *file, int line)
594 #else
595 void *
596 kmalloc(unsigned long size, struct malloc_type *type, int flags)
597 #endif
598 {
599     SLZone *z;
600     SLChunk *chunk;
601     SLGlobalData *slgd;
602     struct globaldata *gd;
603     unsigned long align;
604     int zi;
605 #ifdef INVARIANTS
606     int i;
607 #endif
608 
609     logmemory_quick(malloc_beg);
610     gd = mycpu;
611     slgd = &gd->gd_slab;
612 
613     /*
614      * XXX silly to have this in the critical path.
615      */
616     if (type->ks_limit == 0) {
617 	crit_enter();
618 	malloc_init(type);
619 	crit_exit();
620     }
621     ++type->ks_calls;
622 
623     if (flags & M_POWEROF2)
624 	size = powerof2_size(size);
625 
626     /*
627      * Handle the case where the limit is reached.  Panic if we can't return
628      * NULL.  The original malloc code looped, but this tended to
629      * simply deadlock the computer.
630      *
631      * ks_loosememuse is an up-only limit that is NOT MP-synchronized, used
632      * to determine if a more complete limit check should be done.  The
633      * actual memory use is tracked via ks_memuse[cpu].
634      */
635     while (type->ks_loosememuse >= type->ks_limit) {
636 	int i;
637 	long ttl;
638 
639 	for (i = ttl = 0; i < ncpus; ++i)
640 	    ttl += type->ks_memuse[i];
641 	type->ks_loosememuse = ttl;	/* not MP synchronized */
642 	if ((ssize_t)ttl < 0)		/* deal with occassional race */
643 		ttl = 0;
644 	if (ttl >= type->ks_limit) {
645 	    if (flags & M_NULLOK) {
646 		logmemory(malloc_end, NULL, type, size, flags);
647 		return(NULL);
648 	    }
649 	    panic("%s: malloc limit exceeded", type->ks_shortdesc);
650 	}
651     }
652 
653     /*
654      * Handle the degenerate size == 0 case.  Yes, this does happen.
655      * Return a special pointer.  This is to maintain compatibility with
656      * the original malloc implementation.  Certain devices, such as the
657      * adaptec driver, not only allocate 0 bytes, they check for NULL and
658      * also realloc() later on.  Joy.
659      */
660     if (size == 0) {
661 	logmemory(malloc_end, ZERO_LENGTH_PTR, type, size, flags);
662 	return(ZERO_LENGTH_PTR);
663     }
664 
665     /*
666      * Handle hysteresis from prior frees here in malloc().  We cannot
667      * safely manipulate the kernel_map in free() due to free() possibly
668      * being called via an IPI message or from sensitive interrupt code.
669      *
670      * NOTE: ku_pagecnt must be cleared before we free the slab or we
671      *	     might race another cpu allocating the kva and setting
672      *	     ku_pagecnt.
673      */
674     while (slgd->NFreeZones > ZoneRelsThresh && (flags & M_RNOWAIT) == 0) {
675 	crit_enter();
676 	if (slgd->NFreeZones > ZoneRelsThresh) {	/* crit sect race */
677 	    int *kup;
678 
679 	    z = LIST_FIRST(&slgd->FreeZones);
680 	    LIST_REMOVE(z, z_Entry);
681 	    --slgd->NFreeZones;
682 	    kup = btokup(z);
683 	    *kup = 0;
684 	    kmem_slab_free(z, ZoneSize);	/* may block */
685 	    atomic_add_int(&ZoneGenAlloc, -ZoneSize / 1024);
686 	}
687 	crit_exit();
688     }
689 
690     /*
691      * XXX handle oversized frees that were queued from kfree().
692      */
693     while (LIST_FIRST(&slgd->FreeOvZones) && (flags & M_RNOWAIT) == 0) {
694 	crit_enter();
695 	if ((z = LIST_FIRST(&slgd->FreeOvZones)) != NULL) {
696 	    vm_size_t tsize;
697 
698 	    KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC);
699 	    LIST_REMOVE(z, z_Entry);
700 	    tsize = z->z_ChunkSize;
701 	    kmem_slab_free(z, tsize);	/* may block */
702 	    atomic_add_int(&ZoneBigAlloc, -(int)tsize / 1024);
703 	}
704 	crit_exit();
705     }
706 
707     /*
708      * Handle large allocations directly.  There should not be very many of
709      * these so performance is not a big issue.
710      *
711      * The backend allocator is pretty nasty on a SMP system.   Use the
712      * slab allocator for one and two page-sized chunks even though we lose
713      * some efficiency.  XXX maybe fix mmio and the elf loader instead.
714      */
715     if (size >= ZoneLimit || ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) {
716 	int *kup;
717 
718 	size = round_page(size);
719 	chunk = kmem_slab_alloc(size, PAGE_SIZE, flags);
720 	if (chunk == NULL) {
721 	    logmemory(malloc_end, NULL, type, size, flags);
722 	    return(NULL);
723 	}
724 	atomic_add_int(&ZoneBigAlloc, (int)size / 1024);
725 	flags &= ~M_ZERO;	/* result already zero'd if M_ZERO was set */
726 	flags |= M_PASSIVE_ZERO;
727 	kup = btokup(chunk);
728 	*kup = size / PAGE_SIZE;
729 	crit_enter();
730 	goto done;
731     }
732 
733     /*
734      * Attempt to allocate out of an existing zone.  First try the free list,
735      * then allocate out of unallocated space.  If we find a good zone move
736      * it to the head of the list so later allocations find it quickly
737      * (we might have thousands of zones in the list).
738      *
739      * Note: zoneindex() will panic of size is too large.
740      */
741     zi = zoneindex(&size, &align);
742     KKASSERT(zi < NZONES);
743     crit_enter();
744 
745     if ((z = LIST_FIRST(&slgd->ZoneAry[zi])) != NULL) {
746 	/*
747 	 * Locate a chunk - we have to have at least one.  If this is the
748 	 * last chunk go ahead and do the work to retrieve chunks freed
749 	 * from remote cpus, and if the zone is still empty move it off
750 	 * the ZoneAry.
751 	 */
752 	if (--z->z_NFree <= 0) {
753 	    KKASSERT(z->z_NFree == 0);
754 
755 	    /*
756 	     * WARNING! This code competes with other cpus.  It is ok
757 	     * for us to not drain RChunks here but we might as well, and
758 	     * it is ok if more accumulate after we're done.
759 	     *
760 	     * Set RSignal before pulling rchunks off, indicating that we
761 	     * will be moving ourselves off of the ZoneAry.  Remote ends will
762 	     * read RSignal before putting rchunks on thus interlocking
763 	     * their IPI signaling.
764 	     */
765 	    if (z->z_RChunks == NULL)
766 		atomic_swap_int(&z->z_RSignal, 1);
767 
768 	    clean_zone_rchunks(z);
769 
770 	    /*
771 	     * Remove from the zone list if no free chunks remain.
772 	     * Clear RSignal
773 	     */
774 	    if (z->z_NFree == 0) {
775 		LIST_REMOVE(z, z_Entry);
776 	    } else {
777 		z->z_RSignal = 0;
778 	    }
779 	}
780 
781 	/*
782 	 * Fast path, we have chunks available in z_LChunks.
783 	 */
784 	chunk = z->z_LChunks;
785 	if (chunk) {
786 		chunk_mark_allocated(z, chunk);
787 		z->z_LChunks = chunk->c_Next;
788 		if (z->z_LChunks == NULL)
789 			z->z_LChunksp = &z->z_LChunks;
790 #ifdef SLAB_DEBUG
791 		slab_record_source(z, file, line);
792 #endif
793 		goto done;
794 	}
795 
796 	/*
797 	 * No chunks are available in LChunks, the free chunk MUST be
798 	 * in the never-before-used memory area, controlled by UIndex.
799 	 *
800 	 * The consequences are very serious if our zone got corrupted so
801 	 * we use an explicit panic rather than a KASSERT.
802 	 */
803 	if (z->z_UIndex + 1 != z->z_NMax)
804 	    ++z->z_UIndex;
805 	else
806 	    z->z_UIndex = 0;
807 
808 	if (z->z_UIndex == z->z_UEndIndex)
809 	    panic("slaballoc: corrupted zone");
810 
811 	chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
812 	if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
813 	    flags &= ~M_ZERO;
814 	    flags |= M_PASSIVE_ZERO;
815 	}
816 	chunk_mark_allocated(z, chunk);
817 #ifdef SLAB_DEBUG
818 	slab_record_source(z, file, line);
819 #endif
820 	goto done;
821     }
822 
823     /*
824      * If all zones are exhausted we need to allocate a new zone for this
825      * index.  Use M_ZERO to take advantage of pre-zerod pages.  Also see
826      * UAlloc use above in regards to M_ZERO.  Note that when we are reusing
827      * a zone from the FreeZones list UAlloc'd data will not be zero'd, and
828      * we do not pre-zero it because we do not want to mess up the L1 cache.
829      *
830      * At least one subsystem, the tty code (see CROUND) expects power-of-2
831      * allocations to be power-of-2 aligned.  We maintain compatibility by
832      * adjusting the base offset below.
833      */
834     {
835 	int off;
836 	int *kup;
837 
838 	if ((z = LIST_FIRST(&slgd->FreeZones)) != NULL) {
839 	    LIST_REMOVE(z, z_Entry);
840 	    --slgd->NFreeZones;
841 	    bzero(z, sizeof(SLZone));
842 	    z->z_Flags |= SLZF_UNOTZEROD;
843 	} else {
844 	    z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO);
845 	    if (z == NULL)
846 		goto fail;
847 	    atomic_add_int(&ZoneGenAlloc, ZoneSize / 1024);
848 	}
849 
850 	/*
851 	 * How big is the base structure?
852 	 */
853 #if defined(INVARIANTS)
854 	/*
855 	 * Make room for z_Bitmap.  An exact calculation is somewhat more
856 	 * complicated so don't make an exact calculation.
857 	 */
858 	off = offsetof(SLZone, z_Bitmap[(ZoneSize / size + 31) / 32]);
859 	bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8);
860 #else
861 	off = sizeof(SLZone);
862 #endif
863 
864 	/*
865 	 * Guarentee power-of-2 alignment for power-of-2-sized chunks.
866 	 * Otherwise properly align the data according to the chunk size.
867 	 */
868 	if (powerof2(size))
869 	    align = size;
870 	off = (off + align - 1) & ~(align - 1);
871 
872 	z->z_Magic = ZALLOC_SLAB_MAGIC;
873 	z->z_ZoneIndex = zi;
874 	z->z_NMax = (ZoneSize - off) / size;
875 	z->z_NFree = z->z_NMax - 1;
876 	z->z_BasePtr = (char *)z + off;
877 	z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax;
878 	z->z_ChunkSize = size;
879 	z->z_CpuGd = gd;
880 	z->z_Cpu = gd->gd_cpuid;
881 	z->z_LChunksp = &z->z_LChunks;
882 #ifdef SLAB_DEBUG
883 	bcopy(z->z_Sources, z->z_AltSources, sizeof(z->z_Sources));
884 	bzero(z->z_Sources, sizeof(z->z_Sources));
885 #endif
886 	chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
887 	LIST_INSERT_HEAD(&slgd->ZoneAry[zi], z, z_Entry);
888 	if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
889 	    flags &= ~M_ZERO;	/* already zero'd */
890 	    flags |= M_PASSIVE_ZERO;
891 	}
892 	kup = btokup(z);
893 	*kup = -(z->z_Cpu + 1);	/* -1 to -(N+1) */
894 	chunk_mark_allocated(z, chunk);
895 #ifdef SLAB_DEBUG
896 	slab_record_source(z, file, line);
897 #endif
898 
899 	/*
900 	 * Slide the base index for initial allocations out of the next
901 	 * zone we create so we do not over-weight the lower part of the
902 	 * cpu memory caches.
903 	 */
904 	slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE)
905 				& (ZALLOC_MAX_ZONE_SIZE - 1);
906     }
907 
908 done:
909     ++type->ks_inuse[gd->gd_cpuid];
910     type->ks_memuse[gd->gd_cpuid] += size;
911     type->ks_loosememuse += size;	/* not MP synchronized */
912     crit_exit();
913 
914     if (flags & M_ZERO)
915 	bzero(chunk, size);
916 #ifdef INVARIANTS
917     else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0) {
918 	if (use_malloc_pattern) {
919 	    for (i = 0; i < size; i += sizeof(int)) {
920 		*(int *)((char *)chunk + i) = -1;
921 	    }
922 	}
923 	chunk->c_Next = (void *)-1; /* avoid accidental double-free check */
924     }
925 #endif
926     logmemory(malloc_end, chunk, type, size, flags);
927     return(chunk);
928 fail:
929     crit_exit();
930     logmemory(malloc_end, NULL, type, size, flags);
931     return(NULL);
932 }
933 
934 /*
935  * kernel realloc.  (SLAB ALLOCATOR) (MP SAFE)
936  *
937  * Generally speaking this routine is not called very often and we do
938  * not attempt to optimize it beyond reusing the same pointer if the
939  * new size fits within the chunking of the old pointer's zone.
940  */
941 #ifdef SLAB_DEBUG
942 void *
943 krealloc_debug(void *ptr, unsigned long size,
944 	       struct malloc_type *type, int flags,
945 	       const char *file, int line)
946 #else
947 void *
948 krealloc(void *ptr, unsigned long size, struct malloc_type *type, int flags)
949 #endif
950 {
951     unsigned long osize;
952     unsigned long align;
953     SLZone *z;
954     void *nptr;
955     int *kup;
956 
957     KKASSERT((flags & M_ZERO) == 0);	/* not supported */
958 
959     if (ptr == NULL || ptr == ZERO_LENGTH_PTR)
960 	return(kmalloc_debug(size, type, flags, file, line));
961     if (size == 0) {
962 	kfree(ptr, type);
963 	return(NULL);
964     }
965 
966     /*
967      * Handle oversized allocations.  XXX we really should require that a
968      * size be passed to free() instead of this nonsense.
969      */
970     kup = btokup(ptr);
971     if (*kup > 0) {
972 	osize = *kup << PAGE_SHIFT;
973 	if (osize == round_page(size))
974 	    return(ptr);
975 	if ((nptr = kmalloc_debug(size, type, flags, file, line)) == NULL)
976 	    return(NULL);
977 	bcopy(ptr, nptr, min(size, osize));
978 	kfree(ptr, type);
979 	return(nptr);
980     }
981 
982     /*
983      * Get the original allocation's zone.  If the new request winds up
984      * using the same chunk size we do not have to do anything.
985      */
986     z = (SLZone *)((uintptr_t)ptr & ZoneMask);
987     kup = btokup(z);
988     KKASSERT(*kup < 0);
989     KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
990 
991     /*
992      * Allocate memory for the new request size.  Note that zoneindex has
993      * already adjusted the request size to the appropriate chunk size, which
994      * should optimize our bcopy().  Then copy and return the new pointer.
995      *
996      * Resizing a non-power-of-2 allocation to a power-of-2 size does not
997      * necessary align the result.
998      *
999      * We can only zoneindex (to align size to the chunk size) if the new
1000      * size is not too large.
1001      */
1002     if (size < ZoneLimit) {
1003 	zoneindex(&size, &align);
1004 	if (z->z_ChunkSize == size)
1005 	    return(ptr);
1006     }
1007     if ((nptr = kmalloc_debug(size, type, flags, file, line)) == NULL)
1008 	return(NULL);
1009     bcopy(ptr, nptr, min(size, z->z_ChunkSize));
1010     kfree(ptr, type);
1011     return(nptr);
1012 }
1013 
1014 /*
1015  * Return the kmalloc limit for this type, in bytes.
1016  */
1017 long
1018 kmalloc_limit(struct malloc_type *type)
1019 {
1020     if (type->ks_limit == 0) {
1021 	crit_enter();
1022 	if (type->ks_limit == 0)
1023 	    malloc_init(type);
1024 	crit_exit();
1025     }
1026     return(type->ks_limit);
1027 }
1028 
1029 /*
1030  * Allocate a copy of the specified string.
1031  *
1032  * (MP SAFE) (MAY BLOCK)
1033  */
1034 #ifdef SLAB_DEBUG
1035 char *
1036 kstrdup_debug(const char *str, struct malloc_type *type,
1037 	      const char *file, int line)
1038 #else
1039 char *
1040 kstrdup(const char *str, struct malloc_type *type)
1041 #endif
1042 {
1043     int zlen;	/* length inclusive of terminating NUL */
1044     char *nstr;
1045 
1046     if (str == NULL)
1047 	return(NULL);
1048     zlen = strlen(str) + 1;
1049     nstr = kmalloc_debug(zlen, type, M_WAITOK, file, line);
1050     bcopy(str, nstr, zlen);
1051     return(nstr);
1052 }
1053 
1054 /*
1055  * Notify our cpu that a remote cpu has freed some chunks in a zone that
1056  * we own.  RCount will be bumped so the memory should be good, but validate
1057  * that it really is.
1058  */
1059 static
1060 void
1061 kfree_remote(void *ptr)
1062 {
1063     SLGlobalData *slgd;
1064     SLZone *z;
1065     int nfree;
1066     int *kup;
1067 
1068     slgd = &mycpu->gd_slab;
1069     z = ptr;
1070     kup = btokup(z);
1071     KKASSERT(*kup == -((int)mycpuid + 1));
1072     KKASSERT(z->z_RCount > 0);
1073     atomic_subtract_int(&z->z_RCount, 1);
1074 
1075     logmemory(free_rem_beg, z, NULL, 0L, 0);
1076     KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1077     KKASSERT(z->z_Cpu  == mycpu->gd_cpuid);
1078     nfree = z->z_NFree;
1079 
1080     /*
1081      * Indicate that we will no longer be off of the ZoneAry by
1082      * clearing RSignal.
1083      */
1084     if (z->z_RChunks)
1085 	z->z_RSignal = 0;
1086 
1087     /*
1088      * Atomically extract the bchunks list and then process it back
1089      * into the lchunks list.  We want to append our bchunks to the
1090      * lchunks list and not prepend since we likely do not have
1091      * cache mastership of the related data (not that it helps since
1092      * we are using c_Next).
1093      */
1094     clean_zone_rchunks(z);
1095     if (z->z_NFree && nfree == 0) {
1096 	LIST_INSERT_HEAD(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry);
1097     }
1098 
1099     /*
1100      * If the zone becomes totally free, and there are other zones we
1101      * can allocate from, move this zone to the FreeZones list.  Since
1102      * this code can be called from an IPI callback, do *NOT* try to mess
1103      * with kernel_map here.  Hysteresis will be performed at malloc() time.
1104      *
1105      * Do not move the zone if there is an IPI inflight, otherwise MP
1106      * races can result in our free_remote code accessing a destroyed
1107      * zone.
1108      */
1109     if (z->z_NFree == z->z_NMax &&
1110 	(z->z_Next || LIST_FIRST(&slgd->ZoneAry[z->z_ZoneIndex]) != z) &&
1111 	z->z_RCount == 0
1112     ) {
1113 	int *kup;
1114 
1115 	LIST_REMOVE(z, z_Entry);
1116 	z->z_Magic = -1;
1117 	LIST_INSERT_HEAD(&slgd->FreeZones, z, z_Entry);
1118 	++slgd->NFreeZones;
1119 	kup = btokup(z);
1120 	*kup = 0;
1121     }
1122     logmemory(free_rem_end, z, NULL, 0L, 0);
1123 }
1124 
1125 /*
1126  * free (SLAB ALLOCATOR)
1127  *
1128  * Free a memory block previously allocated by malloc.  Note that we do not
1129  * attempt to update ks_loosememuse as MP races could prevent us from
1130  * checking memory limits in malloc.
1131  *
1132  * MPSAFE
1133  */
1134 void
1135 kfree(void *ptr, struct malloc_type *type)
1136 {
1137     SLZone *z;
1138     SLChunk *chunk;
1139     SLGlobalData *slgd;
1140     struct globaldata *gd;
1141     int *kup;
1142     unsigned long size;
1143     SLChunk *bchunk;
1144     int rsignal;
1145 
1146     logmemory_quick(free_beg);
1147     gd = mycpu;
1148     slgd = &gd->gd_slab;
1149 
1150     if (ptr == NULL)
1151 	panic("trying to free NULL pointer");
1152 
1153     /*
1154      * Handle special 0-byte allocations
1155      */
1156     if (ptr == ZERO_LENGTH_PTR) {
1157 	logmemory(free_zero, ptr, type, -1UL, 0);
1158 	logmemory_quick(free_end);
1159 	return;
1160     }
1161 
1162     /*
1163      * Panic on bad malloc type
1164      */
1165     if (type->ks_magic != M_MAGIC)
1166 	panic("free: malloc type lacks magic");
1167 
1168     /*
1169      * Handle oversized allocations.  XXX we really should require that a
1170      * size be passed to free() instead of this nonsense.
1171      *
1172      * This code is never called via an ipi.
1173      */
1174     kup = btokup(ptr);
1175     if (*kup > 0) {
1176 	size = *kup << PAGE_SHIFT;
1177 	*kup = 0;
1178 #ifdef INVARIANTS
1179 	KKASSERT(sizeof(weirdary) <= size);
1180 	bcopy(weirdary, ptr, sizeof(weirdary));
1181 #endif
1182 	/*
1183 	 * NOTE: For oversized allocations we do not record the
1184 	 *	     originating cpu.  It gets freed on the cpu calling
1185 	 *	     kfree().  The statistics are in aggregate.
1186 	 *
1187 	 * note: XXX we have still inherited the interrupts-can't-block
1188 	 * assumption.  An interrupt thread does not bump
1189 	 * gd_intr_nesting_level so check TDF_INTTHREAD.  This is
1190 	 * primarily until we can fix softupdate's assumptions about free().
1191 	 */
1192 	crit_enter();
1193 	--type->ks_inuse[gd->gd_cpuid];
1194 	type->ks_memuse[gd->gd_cpuid] -= size;
1195 	if (mycpu->gd_intr_nesting_level ||
1196 	    (gd->gd_curthread->td_flags & TDF_INTTHREAD))
1197 	{
1198 	    logmemory(free_ovsz_delayed, ptr, type, size, 0);
1199 	    z = (SLZone *)ptr;
1200 	    z->z_Magic = ZALLOC_OVSZ_MAGIC;
1201 	    z->z_ChunkSize = size;
1202 
1203 	    LIST_INSERT_HEAD(&slgd->FreeOvZones, z, z_Entry);
1204 	    crit_exit();
1205 	} else {
1206 	    crit_exit();
1207 	    logmemory(free_ovsz, ptr, type, size, 0);
1208 	    kmem_slab_free(ptr, size);	/* may block */
1209 	    atomic_add_int(&ZoneBigAlloc, -(int)size / 1024);
1210 	}
1211 	logmemory_quick(free_end);
1212 	return;
1213     }
1214 
1215     /*
1216      * Zone case.  Figure out the zone based on the fact that it is
1217      * ZoneSize aligned.
1218      */
1219     z = (SLZone *)((uintptr_t)ptr & ZoneMask);
1220     kup = btokup(z);
1221     KKASSERT(*kup < 0);
1222     KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1223 
1224     /*
1225      * If we do not own the zone then use atomic ops to free to the
1226      * remote cpu linked list and notify the target zone using a
1227      * passive message.
1228      *
1229      * The target zone cannot be deallocated while we own a chunk of it,
1230      * so the zone header's storage is stable until the very moment
1231      * we adjust z_RChunks.  After that we cannot safely dereference (z).
1232      *
1233      * (no critical section needed)
1234      */
1235     if (z->z_CpuGd != gd) {
1236 	/*
1237 	 * Making these adjustments now allow us to avoid passing (type)
1238 	 * to the remote cpu.  Note that ks_inuse/ks_memuse is being
1239 	 * adjusted on OUR cpu, not the zone cpu, but it should all still
1240 	 * sum up properly and cancel out.
1241 	 */
1242 	crit_enter();
1243 	--type->ks_inuse[gd->gd_cpuid];
1244 	type->ks_memuse[gd->gd_cpuid] -= z->z_ChunkSize;
1245 	crit_exit();
1246 
1247 	/*
1248 	 * WARNING! This code competes with other cpus.  Once we
1249 	 *	    successfully link the chunk to RChunks the remote
1250 	 *	    cpu can rip z's storage out from under us.
1251 	 *
1252 	 *	    Bumping RCount prevents z's storage from getting
1253 	 *	    ripped out.
1254 	 */
1255 	rsignal = z->z_RSignal;
1256 	cpu_lfence();
1257 	if (rsignal)
1258 		atomic_add_int(&z->z_RCount, 1);
1259 
1260 	chunk = ptr;
1261 	for (;;) {
1262 	    bchunk = z->z_RChunks;
1263 	    cpu_ccfence();
1264 	    chunk->c_Next = bchunk;
1265 	    cpu_sfence();
1266 
1267 	    if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, chunk))
1268 		break;
1269 	}
1270 
1271 	/*
1272 	 * We have to signal the remote cpu if our actions will cause
1273 	 * the remote zone to be placed back on ZoneAry so it can
1274 	 * move the zone back on.
1275 	 *
1276 	 * We only need to deal with NULL->non-NULL RChunk transitions
1277 	 * and only if z_RSignal is set.  We interlock by reading rsignal
1278 	 * before adding our chunk to RChunks.  This should result in
1279 	 * virtually no IPI traffic.
1280 	 *
1281 	 * We can use a passive IPI to reduce overhead even further.
1282 	 */
1283 	if (bchunk == NULL && rsignal) {
1284 		logmemory(free_request, ptr, type,
1285 			  (unsigned long)z->z_ChunkSize, 0);
1286 	    lwkt_send_ipiq_passive(z->z_CpuGd, kfree_remote, z);
1287 	    /* z can get ripped out from under us from this point on */
1288 	} else if (rsignal) {
1289 	    atomic_subtract_int(&z->z_RCount, 1);
1290 	    /* z can get ripped out from under us from this point on */
1291 	}
1292 	logmemory_quick(free_end);
1293 	return;
1294     }
1295 
1296     /*
1297      * kfree locally
1298      */
1299     logmemory(free_chunk, ptr, type, (unsigned long)z->z_ChunkSize, 0);
1300 
1301     crit_enter();
1302     chunk = ptr;
1303     chunk_mark_free(z, chunk);
1304 
1305     /*
1306      * Put weird data into the memory to detect modifications after freeing,
1307      * illegal pointer use after freeing (we should fault on the odd address),
1308      * and so forth.  XXX needs more work, see the old malloc code.
1309      */
1310 #ifdef INVARIANTS
1311     if (z->z_ChunkSize < sizeof(weirdary))
1312 	bcopy(weirdary, chunk, z->z_ChunkSize);
1313     else
1314 	bcopy(weirdary, chunk, sizeof(weirdary));
1315 #endif
1316 
1317     /*
1318      * Add this free non-zero'd chunk to a linked list for reuse.  Add
1319      * to the front of the linked list so it is more likely to be
1320      * reallocated, since it is already in our L1 cache.
1321      */
1322 #ifdef INVARIANTS
1323     if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd)
1324 	panic("BADFREE %p", chunk);
1325 #endif
1326     chunk->c_Next = z->z_LChunks;
1327     z->z_LChunks = chunk;
1328     if (chunk->c_Next == NULL)
1329 	    z->z_LChunksp = &chunk->c_Next;
1330 
1331 #ifdef INVARIANTS
1332     if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart)
1333 	panic("BADFREE2");
1334 #endif
1335 
1336     /*
1337      * Bump the number of free chunks.  If it becomes non-zero the zone
1338      * must be added back onto the appropriate list.
1339      */
1340     if (z->z_NFree++ == 0) {
1341 	LIST_INSERT_HEAD(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry);
1342     }
1343 
1344     --type->ks_inuse[z->z_Cpu];
1345     type->ks_memuse[z->z_Cpu] -= z->z_ChunkSize;
1346 
1347     check_zone_free(slgd, z);
1348     logmemory_quick(free_end);
1349     crit_exit();
1350 }
1351 
1352 /*
1353  * Cleanup slabs which are hanging around due to RChunks.  Called once every
1354  * 10 seconds on all cpus.
1355  */
1356 void
1357 slab_cleanup(void)
1358 {
1359     SLGlobalData *slgd = &mycpu->gd_slab;
1360     SLZone *z;
1361     int i;
1362 
1363     crit_enter();
1364     for (i = 0; i < NZONES; ++i) {
1365 	if ((z = LIST_FIRST(&slgd->ZoneAry[i])) == NULL)
1366 		continue;
1367 	z = LIST_NEXT(z, z_Entry);
1368 
1369 	/*
1370 	 * Scan zones starting with the second zone in each list.
1371 	 */
1372 	while (z) {
1373 	    /*
1374 	     * Shift all RChunks to the end of the LChunks list.  This is
1375 	     * an O(1) operation.
1376 	     *
1377 	     * Then free the zone if possible.
1378 	     */
1379 	    clean_zone_rchunks(z);
1380 	    z = check_zone_free(slgd, z);
1381 	}
1382     }
1383     crit_exit();
1384 }
1385 
1386 #if defined(INVARIANTS)
1387 
1388 /*
1389  * Helper routines for sanity checks
1390  */
1391 static
1392 void
1393 chunk_mark_allocated(SLZone *z, void *chunk)
1394 {
1395     int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1396     __uint32_t *bitptr;
1397 
1398     KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0);
1399     KASSERT(bitdex >= 0 && bitdex < z->z_NMax,
1400 	    ("memory chunk %p bit index %d is illegal", chunk, bitdex));
1401     bitptr = &z->z_Bitmap[bitdex >> 5];
1402     bitdex &= 31;
1403     KASSERT((*bitptr & (1 << bitdex)) == 0,
1404 	    ("memory chunk %p is already allocated!", chunk));
1405     *bitptr |= 1 << bitdex;
1406 }
1407 
1408 static
1409 void
1410 chunk_mark_free(SLZone *z, void *chunk)
1411 {
1412     int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1413     __uint32_t *bitptr;
1414 
1415     KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0);
1416     KASSERT(bitdex >= 0 && bitdex < z->z_NMax,
1417 	    ("memory chunk %p bit index %d is illegal!", chunk, bitdex));
1418     bitptr = &z->z_Bitmap[bitdex >> 5];
1419     bitdex &= 31;
1420     KASSERT((*bitptr & (1 << bitdex)) != 0,
1421 	    ("memory chunk %p is already free!", chunk));
1422     *bitptr &= ~(1 << bitdex);
1423 }
1424 
1425 #endif
1426 
1427 /*
1428  * kmem_slab_alloc()
1429  *
1430  *	Directly allocate and wire kernel memory in PAGE_SIZE chunks with the
1431  *	specified alignment.  M_* flags are expected in the flags field.
1432  *
1433  *	Alignment must be a multiple of PAGE_SIZE.
1434  *
1435  *	NOTE! XXX For the moment we use vm_map_entry_reserve/release(),
1436  *	but when we move zalloc() over to use this function as its backend
1437  *	we will have to switch to kreserve/krelease and call reserve(0)
1438  *	after the new space is made available.
1439  *
1440  *	Interrupt code which has preempted other code is not allowed to
1441  *	use PQ_CACHE pages.  However, if an interrupt thread is run
1442  *	non-preemptively or blocks and then runs non-preemptively, then
1443  *	it is free to use PQ_CACHE pages.  <--- may not apply any longer XXX
1444  */
1445 static void *
1446 kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags)
1447 {
1448     vm_size_t i;
1449     vm_offset_t addr;
1450     int count, vmflags, base_vmflags;
1451     vm_page_t mbase = NULL;
1452     vm_page_t m;
1453     thread_t td;
1454 
1455     size = round_page(size);
1456     addr = vm_map_min(&kernel_map);
1457 
1458     count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1459     crit_enter();
1460     vm_map_lock(&kernel_map);
1461     if (vm_map_findspace(&kernel_map, addr, size, align, 0, &addr)) {
1462 	vm_map_unlock(&kernel_map);
1463 	if ((flags & M_NULLOK) == 0)
1464 	    panic("kmem_slab_alloc(): kernel_map ran out of space!");
1465 	vm_map_entry_release(count);
1466 	crit_exit();
1467 	return(NULL);
1468     }
1469 
1470     /*
1471      * kernel_object maps 1:1 to kernel_map.
1472      */
1473     vm_object_hold(&kernel_object);
1474     vm_object_reference_locked(&kernel_object);
1475     vm_map_insert(&kernel_map, &count,
1476 		  &kernel_object, NULL,
1477 		  addr, addr, addr + size,
1478 		  VM_MAPTYPE_NORMAL,
1479 		  VM_PROT_ALL, VM_PROT_ALL,
1480 		  0);
1481     vm_object_drop(&kernel_object);
1482     vm_map_set_wired_quick(&kernel_map, addr, size, &count);
1483     vm_map_unlock(&kernel_map);
1484 
1485     td = curthread;
1486 
1487     base_vmflags = 0;
1488     if (flags & M_ZERO)
1489         base_vmflags |= VM_ALLOC_ZERO;
1490     if (flags & M_USE_RESERVE)
1491 	base_vmflags |= VM_ALLOC_SYSTEM;
1492     if (flags & M_USE_INTERRUPT_RESERVE)
1493         base_vmflags |= VM_ALLOC_INTERRUPT;
1494     if ((flags & (M_RNOWAIT|M_WAITOK)) == 0) {
1495 	panic("kmem_slab_alloc: bad flags %08x (%p)",
1496 	      flags, ((int **)&size)[-1]);
1497     }
1498 
1499     /*
1500      * Allocate the pages.  Do not mess with the PG_ZERO flag or map
1501      * them yet.  VM_ALLOC_NORMAL can only be set if we are not preempting.
1502      *
1503      * VM_ALLOC_SYSTEM is automatically set if we are preempting and
1504      * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is
1505      * implied in this case), though I'm not sure if we really need to
1506      * do that.
1507      */
1508     vmflags = base_vmflags;
1509     if (flags & M_WAITOK) {
1510 	if (td->td_preempted)
1511 	    vmflags |= VM_ALLOC_SYSTEM;
1512 	else
1513 	    vmflags |= VM_ALLOC_NORMAL;
1514     }
1515 
1516     vm_object_hold(&kernel_object);
1517     for (i = 0; i < size; i += PAGE_SIZE) {
1518 	m = vm_page_alloc(&kernel_object, OFF_TO_IDX(addr + i), vmflags);
1519 	if (i == 0)
1520 		mbase = m;
1521 
1522 	/*
1523 	 * If the allocation failed we either return NULL or we retry.
1524 	 *
1525 	 * If M_WAITOK is specified we wait for more memory and retry.
1526 	 * If M_WAITOK is specified from a preemption we yield instead of
1527 	 * wait.  Livelock will not occur because the interrupt thread
1528 	 * will not be preempting anyone the second time around after the
1529 	 * yield.
1530 	 */
1531 	if (m == NULL) {
1532 	    if (flags & M_WAITOK) {
1533 		if (td->td_preempted) {
1534 		    lwkt_switch();
1535 		} else {
1536 		    vm_wait(0);
1537 		}
1538 		i -= PAGE_SIZE;	/* retry */
1539 		continue;
1540 	    }
1541 	    break;
1542 	}
1543     }
1544 
1545     /*
1546      * Check and deal with an allocation failure
1547      */
1548     if (i != size) {
1549 	while (i != 0) {
1550 	    i -= PAGE_SIZE;
1551 	    m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i));
1552 	    /* page should already be busy */
1553 	    vm_page_free(m);
1554 	}
1555 	vm_map_lock(&kernel_map);
1556 	vm_map_delete(&kernel_map, addr, addr + size, &count);
1557 	vm_map_unlock(&kernel_map);
1558 	vm_object_drop(&kernel_object);
1559 
1560 	vm_map_entry_release(count);
1561 	crit_exit();
1562 	return(NULL);
1563     }
1564 
1565     /*
1566      * Success!
1567      *
1568      * NOTE: The VM pages are still busied.  mbase points to the first one
1569      *	     but we have to iterate via vm_page_next()
1570      */
1571     vm_object_drop(&kernel_object);
1572     crit_exit();
1573 
1574     /*
1575      * Enter the pages into the pmap and deal with PG_ZERO and M_ZERO.
1576      */
1577     m = mbase;
1578     i = 0;
1579 
1580     while (i < size) {
1581 	/*
1582 	 * page should already be busy
1583 	 */
1584 	m->valid = VM_PAGE_BITS_ALL;
1585 	vm_page_wire(m);
1586 	pmap_enter(&kernel_pmap, addr + i, m, VM_PROT_ALL | VM_PROT_NOSYNC,
1587 		   1, NULL);
1588 	if ((m->flags & PG_ZERO) == 0 && (flags & M_ZERO))
1589 	    bzero((char *)addr + i, PAGE_SIZE);
1590 	vm_page_flag_clear(m, PG_ZERO);
1591 	KKASSERT(m->flags & (PG_WRITEABLE | PG_MAPPED));
1592 	vm_page_flag_set(m, PG_REFERENCED);
1593 	vm_page_wakeup(m);
1594 
1595 	i += PAGE_SIZE;
1596 	vm_object_hold(&kernel_object);
1597 	m = vm_page_next(m);
1598 	vm_object_drop(&kernel_object);
1599     }
1600     smp_invltlb();
1601     vm_map_entry_release(count);
1602     atomic_add_long(&SlabsAllocated, 1);
1603     return((void *)addr);
1604 }
1605 
1606 /*
1607  * kmem_slab_free()
1608  */
1609 static void
1610 kmem_slab_free(void *ptr, vm_size_t size)
1611 {
1612     crit_enter();
1613     vm_map_remove(&kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size);
1614     atomic_add_long(&SlabsFreed, 1);
1615     crit_exit();
1616 }
1617 
1618 void *
1619 kmalloc_cachealign(unsigned long size_alloc, struct malloc_type *type,
1620     int flags)
1621 {
1622 #if (__VM_CACHELINE_SIZE == 32)
1623 #define CAN_CACHEALIGN(sz)	((sz) >= 256)
1624 #elif (__VM_CACHELINE_SIZE == 64)
1625 #define CAN_CACHEALIGN(sz)	((sz) >= 512)
1626 #elif (__VM_CACHELINE_SIZE == 128)
1627 #define CAN_CACHEALIGN(sz)	((sz) >= 1024)
1628 #else
1629 #error "unsupported cacheline size"
1630 #endif
1631 
1632 	void *ret;
1633 
1634 	if (size_alloc < __VM_CACHELINE_SIZE)
1635 		size_alloc = __VM_CACHELINE_SIZE;
1636 	else if (!CAN_CACHEALIGN(size_alloc))
1637 		flags |= M_POWEROF2;
1638 
1639 	ret = kmalloc(size_alloc, type, flags);
1640 	KASSERT(((uintptr_t)ret & (__VM_CACHELINE_SIZE - 1)) == 0,
1641 	    ("%p(%lu) not cacheline %d aligned",
1642 	     ret, size_alloc, __VM_CACHELINE_SIZE));
1643 	return ret;
1644 
1645 #undef CAN_CACHEALIGN
1646 }
1647