xref: /dragonfly/sys/kern/kern_slaballoc.c (revision b7367ef6)
1 /*
2  * KERN_SLABALLOC.C	- Kernel SLAB memory allocator
3  *
4  * Copyright (c) 2003,2004 The DragonFly Project.  All rights reserved.
5  *
6  * This code is derived from software contributed to The DragonFly Project
7  * by Matthew Dillon <dillon@backplane.com>
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in
17  *    the documentation and/or other materials provided with the
18  *    distribution.
19  * 3. Neither the name of The DragonFly Project nor the names of its
20  *    contributors may be used to endorse or promote products derived
21  *    from this software without specific, prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
27  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  * $DragonFly: src/sys/kern/kern_slaballoc.c,v 1.50 2007/06/07 20:34:14 dillon Exp $
37  *
38  * This module implements a slab allocator drop-in replacement for the
39  * kernel malloc().
40  *
41  * A slab allocator reserves a ZONE for each chunk size, then lays the
42  * chunks out in an array within the zone.  Allocation and deallocation
43  * is nearly instantanious, and fragmentation/overhead losses are limited
44  * to a fixed worst-case amount.
45  *
46  * The downside of this slab implementation is in the chunk size
47  * multiplied by the number of zones.  ~80 zones * 128K = 10MB of VM per cpu.
48  * In a kernel implementation all this memory will be physical so
49  * the zone size is adjusted downward on machines with less physical
50  * memory.  The upside is that overhead is bounded... this is the *worst*
51  * case overhead.
52  *
53  * Slab management is done on a per-cpu basis and no locking or mutexes
54  * are required, only a critical section.  When one cpu frees memory
55  * belonging to another cpu's slab manager an asynchronous IPI message
56  * will be queued to execute the operation.   In addition, both the
57  * high level slab allocator and the low level zone allocator optimize
58  * M_ZERO requests, and the slab allocator does not have to pre initialize
59  * the linked list of chunks.
60  *
61  * XXX Balancing is needed between cpus.  Balance will be handled through
62  * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks.
63  *
64  * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of
65  * the new zone should be restricted to M_USE_RESERVE requests only.
66  *
67  *	Alloc Size	Chunking        Number of zones
68  *	0-127		8		16
69  *	128-255		16		8
70  *	256-511		32		8
71  *	512-1023	64		8
72  *	1024-2047	128		8
73  *	2048-4095	256		8
74  *	4096-8191	512		8
75  *	8192-16383	1024		8
76  *	16384-32767	2048		8
77  *	(if PAGE_SIZE is 4K the maximum zone allocation is 16383)
78  *
79  *	Allocations >= ZoneLimit go directly to kmem.
80  *
81  *			API REQUIREMENTS AND SIDE EFFECTS
82  *
83  *    To operate as a drop-in replacement to the FreeBSD-4.x malloc() we
84  *    have remained compatible with the following API requirements:
85  *
86  *    + small power-of-2 sized allocations are power-of-2 aligned (kern_tty)
87  *    + all power-of-2 sized allocations are power-of-2 aligned (twe)
88  *    + malloc(0) is allowed and returns non-NULL (ahc driver)
89  *    + ability to allocate arbitrarily large chunks of memory
90  */
91 
92 #include "opt_vm.h"
93 
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/kernel.h>
97 #include <sys/slaballoc.h>
98 #include <sys/mbuf.h>
99 #include <sys/vmmeter.h>
100 #include <sys/lock.h>
101 #include <sys/thread.h>
102 #include <sys/globaldata.h>
103 #include <sys/sysctl.h>
104 #include <sys/ktr.h>
105 
106 #include <vm/vm.h>
107 #include <vm/vm_param.h>
108 #include <vm/vm_kern.h>
109 #include <vm/vm_extern.h>
110 #include <vm/vm_object.h>
111 #include <vm/pmap.h>
112 #include <vm/vm_map.h>
113 #include <vm/vm_page.h>
114 #include <vm/vm_pageout.h>
115 
116 #include <machine/cpu.h>
117 
118 #include <sys/thread2.h>
119 
120 #define arysize(ary)	(sizeof(ary)/sizeof((ary)[0]))
121 
122 #define MEMORY_STRING	"ptr=%p type=%p size=%d flags=%04x"
123 #define MEMORY_ARG_SIZE	(sizeof(void *) * 2 + sizeof(unsigned long) + 	\
124 			sizeof(int))
125 
126 #if !defined(KTR_MEMORY)
127 #define KTR_MEMORY	KTR_ALL
128 #endif
129 KTR_INFO_MASTER(memory);
130 KTR_INFO(KTR_MEMORY, memory, malloc, 0, MEMORY_STRING, MEMORY_ARG_SIZE);
131 KTR_INFO(KTR_MEMORY, memory, free_zero, 1, MEMORY_STRING, MEMORY_ARG_SIZE);
132 KTR_INFO(KTR_MEMORY, memory, free_ovsz, 2, MEMORY_STRING, MEMORY_ARG_SIZE);
133 KTR_INFO(KTR_MEMORY, memory, free_ovsz_delayed, 3, MEMORY_STRING, MEMORY_ARG_SIZE);
134 KTR_INFO(KTR_MEMORY, memory, free_chunk, 4, MEMORY_STRING, MEMORY_ARG_SIZE);
135 #ifdef SMP
136 KTR_INFO(KTR_MEMORY, memory, free_request, 5, MEMORY_STRING, MEMORY_ARG_SIZE);
137 KTR_INFO(KTR_MEMORY, memory, free_remote, 6, MEMORY_STRING, MEMORY_ARG_SIZE);
138 #endif
139 KTR_INFO(KTR_MEMORY, memory, malloc_beg, 0, "malloc begin", 0);
140 KTR_INFO(KTR_MEMORY, memory, free_beg, 0, "free begin", 0);
141 KTR_INFO(KTR_MEMORY, memory, free_end, 0, "free end", 0);
142 
143 #define logmemory(name, ptr, type, size, flags)				\
144 	KTR_LOG(memory_ ## name, ptr, type, size, flags)
145 #define logmemory_quick(name)						\
146 	KTR_LOG(memory_ ## name)
147 
148 /*
149  * Fixed globals (not per-cpu)
150  */
151 static int ZoneSize;
152 static int ZoneLimit;
153 static int ZonePageCount;
154 static int ZoneMask;
155 struct malloc_type *kmemstatistics;	/* exported to vmstat */
156 static struct kmemusage *kmemusage;
157 static int32_t weirdary[16];
158 
159 static void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags);
160 static void kmem_slab_free(void *ptr, vm_size_t bytes);
161 #if defined(INVARIANTS)
162 static void chunk_mark_allocated(SLZone *z, void *chunk);
163 static void chunk_mark_free(SLZone *z, void *chunk);
164 #endif
165 
166 /*
167  * Misc constants.  Note that allocations that are exact multiples of
168  * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
169  * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists.
170  */
171 #define MIN_CHUNK_SIZE		8		/* in bytes */
172 #define MIN_CHUNK_MASK		(MIN_CHUNK_SIZE - 1)
173 #define ZONE_RELS_THRESH	2		/* threshold number of zones */
174 #define IN_SAME_PAGE_MASK	(~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK)
175 
176 /*
177  * The WEIRD_ADDR is used as known text to copy into free objects to
178  * try to create deterministic failure cases if the data is accessed after
179  * free.
180  */
181 #define WEIRD_ADDR      0xdeadc0de
182 #define MAX_COPY        sizeof(weirdary)
183 #define ZERO_LENGTH_PTR	((void *)-8)
184 
185 /*
186  * Misc global malloc buckets
187  */
188 
189 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
190 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
191 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
192 
193 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
194 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
195 
196 /*
197  * Initialize the slab memory allocator.  We have to choose a zone size based
198  * on available physical memory.  We choose a zone side which is approximately
199  * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of
200  * 128K.  The zone size is limited to the bounds set in slaballoc.h
201  * (typically 32K min, 128K max).
202  */
203 static void kmeminit(void *dummy);
204 
205 SYSINIT(kmem, SI_BOOT1_ALLOCATOR, SI_ORDER_FIRST, kmeminit, NULL)
206 
207 #ifdef INVARIANTS
208 /*
209  * If enabled any memory allocated without M_ZERO is initialized to -1.
210  */
211 static int  use_malloc_pattern;
212 SYSCTL_INT(_debug, OID_AUTO, use_malloc_pattern, CTLFLAG_RW,
213 		&use_malloc_pattern, 0, "");
214 #endif
215 
216 static void
217 kmeminit(void *dummy)
218 {
219     vm_poff_t limsize;
220     int usesize;
221     int i;
222     vm_pindex_t npg;
223 
224     limsize = (vm_poff_t)vmstats.v_page_count * PAGE_SIZE;
225     if (limsize > KvaSize)
226 	limsize = KvaSize;
227 
228     usesize = (int)(limsize / 1024);	/* convert to KB */
229 
230     ZoneSize = ZALLOC_MIN_ZONE_SIZE;
231     while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize)
232 	ZoneSize <<= 1;
233     ZoneLimit = ZoneSize / 4;
234     if (ZoneLimit > ZALLOC_ZONE_LIMIT)
235 	ZoneLimit = ZALLOC_ZONE_LIMIT;
236     ZoneMask = ZoneSize - 1;
237     ZonePageCount = ZoneSize / PAGE_SIZE;
238 
239     npg = KvaSize / PAGE_SIZE;
240     kmemusage = kmem_slab_alloc(npg * sizeof(struct kmemusage),
241 				PAGE_SIZE, M_WAITOK|M_ZERO);
242 
243     for (i = 0; i < arysize(weirdary); ++i)
244 	weirdary[i] = WEIRD_ADDR;
245 
246     if (bootverbose)
247 	kprintf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024);
248 }
249 
250 /*
251  * Initialize a malloc type tracking structure.
252  */
253 void
254 malloc_init(void *data)
255 {
256     struct malloc_type *type = data;
257     vm_poff_t limsize;
258 
259     if (type->ks_magic != M_MAGIC)
260 	panic("malloc type lacks magic");
261 
262     if (type->ks_limit != 0)
263 	return;
264 
265     if (vmstats.v_page_count == 0)
266 	panic("malloc_init not allowed before vm init");
267 
268     limsize = (vm_poff_t)vmstats.v_page_count * PAGE_SIZE;
269     if (limsize > KvaSize)
270 	limsize = KvaSize;
271     type->ks_limit = limsize / 10;
272 
273     type->ks_next = kmemstatistics;
274     kmemstatistics = type;
275 }
276 
277 void
278 malloc_uninit(void *data)
279 {
280     struct malloc_type *type = data;
281     struct malloc_type *t;
282 #ifdef INVARIANTS
283     int i;
284     long ttl;
285 #endif
286 
287     if (type->ks_magic != M_MAGIC)
288 	panic("malloc type lacks magic");
289 
290     if (vmstats.v_page_count == 0)
291 	panic("malloc_uninit not allowed before vm init");
292 
293     if (type->ks_limit == 0)
294 	panic("malloc_uninit on uninitialized type");
295 
296 #ifdef INVARIANTS
297     /*
298      * memuse is only correct in aggregation.  Due to memory being allocated
299      * on one cpu and freed on another individual array entries may be
300      * negative or positive (canceling each other out).
301      */
302     for (i = ttl = 0; i < ncpus; ++i)
303 	ttl += type->ks_memuse[i];
304     if (ttl) {
305 	kprintf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n",
306 	    ttl, type->ks_shortdesc, i);
307     }
308 #endif
309     if (type == kmemstatistics) {
310 	kmemstatistics = type->ks_next;
311     } else {
312 	for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) {
313 	    if (t->ks_next == type) {
314 		t->ks_next = type->ks_next;
315 		break;
316 	    }
317 	}
318     }
319     type->ks_next = NULL;
320     type->ks_limit = 0;
321 }
322 
323 /*
324  * Calculate the zone index for the allocation request size and set the
325  * allocation request size to that particular zone's chunk size.
326  */
327 static __inline int
328 zoneindex(unsigned long *bytes)
329 {
330     unsigned int n = (unsigned int)*bytes;	/* unsigned for shift opt */
331     if (n < 128) {
332 	*bytes = n = (n + 7) & ~7;
333 	return(n / 8 - 1);		/* 8 byte chunks, 16 zones */
334     }
335     if (n < 256) {
336 	*bytes = n = (n + 15) & ~15;
337 	return(n / 16 + 7);
338     }
339     if (n < 8192) {
340 	if (n < 512) {
341 	    *bytes = n = (n + 31) & ~31;
342 	    return(n / 32 + 15);
343 	}
344 	if (n < 1024) {
345 	    *bytes = n = (n + 63) & ~63;
346 	    return(n / 64 + 23);
347 	}
348 	if (n < 2048) {
349 	    *bytes = n = (n + 127) & ~127;
350 	    return(n / 128 + 31);
351 	}
352 	if (n < 4096) {
353 	    *bytes = n = (n + 255) & ~255;
354 	    return(n / 256 + 39);
355 	}
356 	*bytes = n = (n + 511) & ~511;
357 	return(n / 512 + 47);
358     }
359 #if ZALLOC_ZONE_LIMIT > 8192
360     if (n < 16384) {
361 	*bytes = n = (n + 1023) & ~1023;
362 	return(n / 1024 + 55);
363     }
364 #endif
365 #if ZALLOC_ZONE_LIMIT > 16384
366     if (n < 32768) {
367 	*bytes = n = (n + 2047) & ~2047;
368 	return(n / 2048 + 63);
369     }
370 #endif
371     panic("Unexpected byte count %d", n);
372     return(0);
373 }
374 
375 /*
376  * malloc()	(SLAB ALLOCATOR)
377  *
378  *	Allocate memory via the slab allocator.  If the request is too large,
379  *	or if it page-aligned beyond a certain size, we fall back to the
380  *	KMEM subsystem.  A SLAB tracking descriptor must be specified, use
381  *	&SlabMisc if you don't care.
382  *
383  *	M_RNOWAIT	- don't block.
384  *	M_NULLOK	- return NULL instead of blocking.
385  *	M_ZERO		- zero the returned memory.
386  *	M_USE_RESERVE	- allow greater drawdown of the free list
387  *	M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted
388  *
389  * MPSAFE
390  */
391 
392 void *
393 kmalloc(unsigned long size, struct malloc_type *type, int flags)
394 {
395     SLZone *z;
396     SLChunk *chunk;
397     SLGlobalData *slgd;
398     struct globaldata *gd;
399     int zi;
400 #ifdef INVARIANTS
401     int i;
402 #endif
403 
404     logmemory_quick(malloc_beg);
405     gd = mycpu;
406     slgd = &gd->gd_slab;
407 
408     /*
409      * XXX silly to have this in the critical path.
410      */
411     if (type->ks_limit == 0) {
412 	crit_enter();
413 	if (type->ks_limit == 0)
414 	    malloc_init(type);
415 	crit_exit();
416     }
417     ++type->ks_calls;
418 
419     /*
420      * Handle the case where the limit is reached.  Panic if we can't return
421      * NULL.  The original malloc code looped, but this tended to
422      * simply deadlock the computer.
423      *
424      * ks_loosememuse is an up-only limit that is NOT MP-synchronized, used
425      * to determine if a more complete limit check should be done.  The
426      * actual memory use is tracked via ks_memuse[cpu].
427      */
428     while (type->ks_loosememuse >= type->ks_limit) {
429 	int i;
430 	long ttl;
431 
432 	for (i = ttl = 0; i < ncpus; ++i)
433 	    ttl += type->ks_memuse[i];
434 	type->ks_loosememuse = ttl;	/* not MP synchronized */
435 	if (ttl >= type->ks_limit) {
436 	    if (flags & M_NULLOK) {
437 		logmemory(malloc, NULL, type, size, flags);
438 		return(NULL);
439 	    }
440 	    panic("%s: malloc limit exceeded", type->ks_shortdesc);
441 	}
442     }
443 
444     /*
445      * Handle the degenerate size == 0 case.  Yes, this does happen.
446      * Return a special pointer.  This is to maintain compatibility with
447      * the original malloc implementation.  Certain devices, such as the
448      * adaptec driver, not only allocate 0 bytes, they check for NULL and
449      * also realloc() later on.  Joy.
450      */
451     if (size == 0) {
452 	logmemory(malloc, ZERO_LENGTH_PTR, type, size, flags);
453 	return(ZERO_LENGTH_PTR);
454     }
455 
456     /*
457      * Handle hysteresis from prior frees here in malloc().  We cannot
458      * safely manipulate the kernel_map in free() due to free() possibly
459      * being called via an IPI message or from sensitive interrupt code.
460      */
461     while (slgd->NFreeZones > ZONE_RELS_THRESH && (flags & M_RNOWAIT) == 0) {
462 	crit_enter();
463 	if (slgd->NFreeZones > ZONE_RELS_THRESH) {	/* crit sect race */
464 	    z = slgd->FreeZones;
465 	    slgd->FreeZones = z->z_Next;
466 	    --slgd->NFreeZones;
467 	    kmem_slab_free(z, ZoneSize);	/* may block */
468 	}
469 	crit_exit();
470     }
471     /*
472      * XXX handle oversized frees that were queued from free().
473      */
474     while (slgd->FreeOvZones && (flags & M_RNOWAIT) == 0) {
475 	crit_enter();
476 	if ((z = slgd->FreeOvZones) != NULL) {
477 	    KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC);
478 	    slgd->FreeOvZones = z->z_Next;
479 	    kmem_slab_free(z, z->z_ChunkSize);	/* may block */
480 	}
481 	crit_exit();
482     }
483 
484     /*
485      * Handle large allocations directly.  There should not be very many of
486      * these so performance is not a big issue.
487      *
488      * The backend allocator is pretty nasty on a SMP system.   Use the
489      * slab allocator for one and two page-sized chunks even though we lose
490      * some efficiency.  XXX maybe fix mmio and the elf loader instead.
491      */
492     if (size >= ZoneLimit || ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) {
493 	struct kmemusage *kup;
494 
495 	size = round_page(size);
496 	chunk = kmem_slab_alloc(size, PAGE_SIZE, flags);
497 	if (chunk == NULL) {
498 	    logmemory(malloc, NULL, type, size, flags);
499 	    return(NULL);
500 	}
501 	flags &= ~M_ZERO;	/* result already zero'd if M_ZERO was set */
502 	flags |= M_PASSIVE_ZERO;
503 	kup = btokup(chunk);
504 	kup->ku_pagecnt = size / PAGE_SIZE;
505 	kup->ku_cpu = gd->gd_cpuid;
506 	crit_enter();
507 	goto done;
508     }
509 
510     /*
511      * Attempt to allocate out of an existing zone.  First try the free list,
512      * then allocate out of unallocated space.  If we find a good zone move
513      * it to the head of the list so later allocations find it quickly
514      * (we might have thousands of zones in the list).
515      *
516      * Note: zoneindex() will panic of size is too large.
517      */
518     zi = zoneindex(&size);
519     KKASSERT(zi < NZONES);
520     crit_enter();
521     if ((z = slgd->ZoneAry[zi]) != NULL) {
522 	KKASSERT(z->z_NFree > 0);
523 
524 	/*
525 	 * Remove us from the ZoneAry[] when we become empty
526 	 */
527 	if (--z->z_NFree == 0) {
528 	    slgd->ZoneAry[zi] = z->z_Next;
529 	    z->z_Next = NULL;
530 	}
531 
532 	/*
533 	 * Locate a chunk in a free page.  This attempts to localize
534 	 * reallocations into earlier pages without us having to sort
535 	 * the chunk list.  A chunk may still overlap a page boundary.
536 	 */
537 	while (z->z_FirstFreePg < ZonePageCount) {
538 	    if ((chunk = z->z_PageAry[z->z_FirstFreePg]) != NULL) {
539 #ifdef DIAGNOSTIC
540 		/*
541 		 * Diagnostic: c_Next is not total garbage.
542 		 */
543 		KKASSERT(chunk->c_Next == NULL ||
544 			((intptr_t)chunk->c_Next & IN_SAME_PAGE_MASK) ==
545 			((intptr_t)chunk & IN_SAME_PAGE_MASK));
546 #endif
547 #ifdef INVARIANTS
548 		if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd)
549 			panic("chunk %p FFPG %d/%d", chunk, z->z_FirstFreePg, ZonePageCount);
550 		if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart)
551 			panic("chunkNEXT %p %p FFPG %d/%d", chunk, chunk->c_Next, z->z_FirstFreePg, ZonePageCount);
552 		chunk_mark_allocated(z, chunk);
553 #endif
554 		z->z_PageAry[z->z_FirstFreePg] = chunk->c_Next;
555 		goto done;
556 	    }
557 	    ++z->z_FirstFreePg;
558 	}
559 
560 	/*
561 	 * No chunks are available but NFree said we had some memory, so
562 	 * it must be available in the never-before-used-memory area
563 	 * governed by UIndex.  The consequences are very serious if our zone
564 	 * got corrupted so we use an explicit panic rather then a KASSERT.
565 	 */
566 	if (z->z_UIndex + 1 != z->z_NMax)
567 	    z->z_UIndex = z->z_UIndex + 1;
568 	else
569 	    z->z_UIndex = 0;
570 	if (z->z_UIndex == z->z_UEndIndex)
571 	    panic("slaballoc: corrupted zone");
572 	chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
573 	if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
574 	    flags &= ~M_ZERO;
575 	    flags |= M_PASSIVE_ZERO;
576 	}
577 #if defined(INVARIANTS)
578 	chunk_mark_allocated(z, chunk);
579 #endif
580 	goto done;
581     }
582 
583     /*
584      * If all zones are exhausted we need to allocate a new zone for this
585      * index.  Use M_ZERO to take advantage of pre-zerod pages.  Also see
586      * UAlloc use above in regards to M_ZERO.  Note that when we are reusing
587      * a zone from the FreeZones list UAlloc'd data will not be zero'd, and
588      * we do not pre-zero it because we do not want to mess up the L1 cache.
589      *
590      * At least one subsystem, the tty code (see CROUND) expects power-of-2
591      * allocations to be power-of-2 aligned.  We maintain compatibility by
592      * adjusting the base offset below.
593      */
594     {
595 	int off;
596 
597 	if ((z = slgd->FreeZones) != NULL) {
598 	    slgd->FreeZones = z->z_Next;
599 	    --slgd->NFreeZones;
600 	    bzero(z, sizeof(SLZone));
601 	    z->z_Flags |= SLZF_UNOTZEROD;
602 	} else {
603 	    z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO);
604 	    if (z == NULL)
605 		goto fail;
606 	}
607 
608 	/*
609 	 * How big is the base structure?
610 	 */
611 #if defined(INVARIANTS)
612 	/*
613 	 * Make room for z_Bitmap.  An exact calculation is somewhat more
614 	 * complicated so don't make an exact calculation.
615 	 */
616 	off = offsetof(SLZone, z_Bitmap[(ZoneSize / size + 31) / 32]);
617 	bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8);
618 #else
619 	off = sizeof(SLZone);
620 #endif
621 
622 	/*
623 	 * Guarentee power-of-2 alignment for power-of-2-sized chunks.
624 	 * Otherwise just 8-byte align the data.
625 	 */
626 	if ((size | (size - 1)) + 1 == (size << 1))
627 	    off = (off + size - 1) & ~(size - 1);
628 	else
629 	    off = (off + MIN_CHUNK_MASK) & ~MIN_CHUNK_MASK;
630 	z->z_Magic = ZALLOC_SLAB_MAGIC;
631 	z->z_ZoneIndex = zi;
632 	z->z_NMax = (ZoneSize - off) / size;
633 	z->z_NFree = z->z_NMax - 1;
634 	z->z_BasePtr = (char *)z + off;
635 	z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax;
636 	z->z_ChunkSize = size;
637 	z->z_FirstFreePg = ZonePageCount;
638 	z->z_CpuGd = gd;
639 	z->z_Cpu = gd->gd_cpuid;
640 	chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
641 	z->z_Next = slgd->ZoneAry[zi];
642 	slgd->ZoneAry[zi] = z;
643 	if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
644 	    flags &= ~M_ZERO;	/* already zero'd */
645 	    flags |= M_PASSIVE_ZERO;
646 	}
647 #if defined(INVARIANTS)
648 	chunk_mark_allocated(z, chunk);
649 #endif
650 
651 	/*
652 	 * Slide the base index for initial allocations out of the next
653 	 * zone we create so we do not over-weight the lower part of the
654 	 * cpu memory caches.
655 	 */
656 	slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE)
657 				& (ZALLOC_MAX_ZONE_SIZE - 1);
658     }
659 done:
660     ++type->ks_inuse[gd->gd_cpuid];
661     type->ks_memuse[gd->gd_cpuid] += size;
662     type->ks_loosememuse += size;	/* not MP synchronized */
663     crit_exit();
664     if (flags & M_ZERO)
665 	bzero(chunk, size);
666 #ifdef INVARIANTS
667     else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0) {
668 	if (use_malloc_pattern) {
669 	    for (i = 0; i < size; i += sizeof(int)) {
670 		*(int *)((char *)chunk + i) = -1;
671 	    }
672 	}
673 	chunk->c_Next = (void *)-1; /* avoid accidental double-free check */
674     }
675 #endif
676     logmemory(malloc, chunk, type, size, flags);
677     return(chunk);
678 fail:
679     crit_exit();
680     logmemory(malloc, NULL, type, size, flags);
681     return(NULL);
682 }
683 
684 /*
685  * kernel realloc.  (SLAB ALLOCATOR) (MP SAFE)
686  *
687  * Generally speaking this routine is not called very often and we do
688  * not attempt to optimize it beyond reusing the same pointer if the
689  * new size fits within the chunking of the old pointer's zone.
690  */
691 void *
692 krealloc(void *ptr, unsigned long size, struct malloc_type *type, int flags)
693 {
694     SLZone *z;
695     void *nptr;
696     unsigned long osize;
697 
698     KKASSERT((flags & M_ZERO) == 0);	/* not supported */
699 
700     if (ptr == NULL || ptr == ZERO_LENGTH_PTR)
701 	return(kmalloc(size, type, flags));
702     if (size == 0) {
703 	kfree(ptr, type);
704 	return(NULL);
705     }
706 
707     /*
708      * Handle oversized allocations.  XXX we really should require that a
709      * size be passed to free() instead of this nonsense.
710      */
711     {
712 	struct kmemusage *kup;
713 
714 	kup = btokup(ptr);
715 	if (kup->ku_pagecnt) {
716 	    osize = kup->ku_pagecnt << PAGE_SHIFT;
717 	    if (osize == round_page(size))
718 		return(ptr);
719 	    if ((nptr = kmalloc(size, type, flags)) == NULL)
720 		return(NULL);
721 	    bcopy(ptr, nptr, min(size, osize));
722 	    kfree(ptr, type);
723 	    return(nptr);
724 	}
725     }
726 
727     /*
728      * Get the original allocation's zone.  If the new request winds up
729      * using the same chunk size we do not have to do anything.
730      */
731     z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
732     KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
733 
734     zoneindex(&size);
735     if (z->z_ChunkSize == size)
736 	return(ptr);
737 
738     /*
739      * Allocate memory for the new request size.  Note that zoneindex has
740      * already adjusted the request size to the appropriate chunk size, which
741      * should optimize our bcopy().  Then copy and return the new pointer.
742      */
743     if ((nptr = kmalloc(size, type, flags)) == NULL)
744 	return(NULL);
745     bcopy(ptr, nptr, min(size, z->z_ChunkSize));
746     kfree(ptr, type);
747     return(nptr);
748 }
749 
750 /*
751  * Allocate a copy of the specified string.
752  *
753  * (MP SAFE) (MAY BLOCK)
754  */
755 char *
756 kstrdup(const char *str, struct malloc_type *type)
757 {
758     int zlen;	/* length inclusive of terminating NUL */
759     char *nstr;
760 
761     if (str == NULL)
762 	return(NULL);
763     zlen = strlen(str) + 1;
764     nstr = kmalloc(zlen, type, M_WAITOK);
765     bcopy(str, nstr, zlen);
766     return(nstr);
767 }
768 
769 #ifdef SMP
770 /*
771  * free()	(SLAB ALLOCATOR)
772  *
773  *	Free the specified chunk of memory.
774  */
775 static
776 void
777 free_remote(void *ptr)
778 {
779     logmemory(free_remote, ptr, *(struct malloc_type **)ptr, -1, 0);
780     kfree(ptr, *(struct malloc_type **)ptr);
781 }
782 
783 #endif
784 
785 /*
786  * free (SLAB ALLOCATOR)
787  *
788  * Free a memory block previously allocated by malloc.  Note that we do not
789  * attempt to uplodate ks_loosememuse as MP races could prevent us from
790  * checking memory limits in malloc.
791  *
792  * MPSAFE
793  */
794 void
795 kfree(void *ptr, struct malloc_type *type)
796 {
797     SLZone *z;
798     SLChunk *chunk;
799     SLGlobalData *slgd;
800     struct globaldata *gd;
801     int pgno;
802 
803     logmemory_quick(free_beg);
804     gd = mycpu;
805     slgd = &gd->gd_slab;
806 
807     if (ptr == NULL)
808 	panic("trying to free NULL pointer");
809 
810     /*
811      * Handle special 0-byte allocations
812      */
813     if (ptr == ZERO_LENGTH_PTR) {
814 	logmemory(free_zero, ptr, type, -1, 0);
815 	logmemory_quick(free_end);
816 	return;
817     }
818 
819     /*
820      * Handle oversized allocations.  XXX we really should require that a
821      * size be passed to free() instead of this nonsense.
822      *
823      * This code is never called via an ipi.
824      */
825     {
826 	struct kmemusage *kup;
827 	unsigned long size;
828 
829 	kup = btokup(ptr);
830 	if (kup->ku_pagecnt) {
831 	    size = kup->ku_pagecnt << PAGE_SHIFT;
832 	    kup->ku_pagecnt = 0;
833 #ifdef INVARIANTS
834 	    KKASSERT(sizeof(weirdary) <= size);
835 	    bcopy(weirdary, ptr, sizeof(weirdary));
836 #endif
837 	    /*
838 	     * note: we always adjust our cpu's slot, not the originating
839 	     * cpu (kup->ku_cpuid).  The statistics are in aggregate.
840 	     *
841 	     * note: XXX we have still inherited the interrupts-can't-block
842 	     * assumption.  An interrupt thread does not bump
843 	     * gd_intr_nesting_level so check TDF_INTTHREAD.  This is
844 	     * primarily until we can fix softupdate's assumptions about free().
845 	     */
846 	    crit_enter();
847 	    --type->ks_inuse[gd->gd_cpuid];
848 	    type->ks_memuse[gd->gd_cpuid] -= size;
849 	    if (mycpu->gd_intr_nesting_level || (gd->gd_curthread->td_flags & TDF_INTTHREAD)) {
850 		logmemory(free_ovsz_delayed, ptr, type, size, 0);
851 		z = (SLZone *)ptr;
852 		z->z_Magic = ZALLOC_OVSZ_MAGIC;
853 		z->z_Next = slgd->FreeOvZones;
854 		z->z_ChunkSize = size;
855 		slgd->FreeOvZones = z;
856 		crit_exit();
857 	    } else {
858 		crit_exit();
859 		logmemory(free_ovsz, ptr, type, size, 0);
860 		kmem_slab_free(ptr, size);	/* may block */
861 	    }
862 	    logmemory_quick(free_end);
863 	    return;
864 	}
865     }
866 
867     /*
868      * Zone case.  Figure out the zone based on the fact that it is
869      * ZoneSize aligned.
870      */
871     z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
872     KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
873 
874     /*
875      * If we do not own the zone then forward the request to the
876      * cpu that does.  Since the timing is non-critical, a passive
877      * message is sent.
878      */
879     if (z->z_CpuGd != gd) {
880 	*(struct malloc_type **)ptr = type;
881 #ifdef SMP
882 	logmemory(free_request, ptr, type, z->z_ChunkSize, 0);
883 	lwkt_send_ipiq_passive(z->z_CpuGd, free_remote, ptr);
884 #else
885 	panic("Corrupt SLZone");
886 #endif
887 	logmemory_quick(free_end);
888 	return;
889     }
890 
891     logmemory(free_chunk, ptr, type, z->z_ChunkSize, 0);
892 
893     if (type->ks_magic != M_MAGIC)
894 	panic("free: malloc type lacks magic");
895 
896     crit_enter();
897     pgno = ((char *)ptr - (char *)z) >> PAGE_SHIFT;
898     chunk = ptr;
899 
900 #ifdef INVARIANTS
901     /*
902      * Attempt to detect a double-free.  To reduce overhead we only check
903      * if there appears to be link pointer at the base of the data.
904      */
905     if (((intptr_t)chunk->c_Next - (intptr_t)z) >> PAGE_SHIFT == pgno) {
906 	SLChunk *scan;
907 	for (scan = z->z_PageAry[pgno]; scan; scan = scan->c_Next) {
908 	    if (scan == chunk)
909 		panic("Double free at %p", chunk);
910 	}
911     }
912     chunk_mark_free(z, chunk);
913 #endif
914 
915     /*
916      * Put weird data into the memory to detect modifications after freeing,
917      * illegal pointer use after freeing (we should fault on the odd address),
918      * and so forth.  XXX needs more work, see the old malloc code.
919      */
920 #ifdef INVARIANTS
921     if (z->z_ChunkSize < sizeof(weirdary))
922 	bcopy(weirdary, chunk, z->z_ChunkSize);
923     else
924 	bcopy(weirdary, chunk, sizeof(weirdary));
925 #endif
926 
927     /*
928      * Add this free non-zero'd chunk to a linked list for reuse, adjust
929      * z_FirstFreePg.
930      */
931 #ifdef INVARIANTS
932     if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd)
933 	panic("BADFREE %p", chunk);
934 #endif
935     chunk->c_Next = z->z_PageAry[pgno];
936     z->z_PageAry[pgno] = chunk;
937 #ifdef INVARIANTS
938     if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart)
939 	panic("BADFREE2");
940 #endif
941     if (z->z_FirstFreePg > pgno)
942 	z->z_FirstFreePg = pgno;
943 
944     /*
945      * Bump the number of free chunks.  If it becomes non-zero the zone
946      * must be added back onto the appropriate list.
947      */
948     if (z->z_NFree++ == 0) {
949 	z->z_Next = slgd->ZoneAry[z->z_ZoneIndex];
950 	slgd->ZoneAry[z->z_ZoneIndex] = z;
951     }
952 
953     --type->ks_inuse[z->z_Cpu];
954     type->ks_memuse[z->z_Cpu] -= z->z_ChunkSize;
955 
956     /*
957      * If the zone becomes totally free, and there are other zones we
958      * can allocate from, move this zone to the FreeZones list.  Since
959      * this code can be called from an IPI callback, do *NOT* try to mess
960      * with kernel_map here.  Hysteresis will be performed at malloc() time.
961      */
962     if (z->z_NFree == z->z_NMax &&
963 	(z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z)
964     ) {
965 	SLZone **pz;
966 
967 	for (pz = &slgd->ZoneAry[z->z_ZoneIndex]; z != *pz; pz = &(*pz)->z_Next)
968 	    ;
969 	*pz = z->z_Next;
970 	z->z_Magic = -1;
971 	z->z_Next = slgd->FreeZones;
972 	slgd->FreeZones = z;
973 	++slgd->NFreeZones;
974     }
975     logmemory_quick(free_end);
976     crit_exit();
977 }
978 
979 #if defined(INVARIANTS)
980 /*
981  * Helper routines for sanity checks
982  */
983 static
984 void
985 chunk_mark_allocated(SLZone *z, void *chunk)
986 {
987     int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
988     __uint32_t *bitptr;
989 
990     KASSERT(bitdex >= 0 && bitdex < z->z_NMax, ("memory chunk %p bit index %d is illegal", chunk, bitdex));
991     bitptr = &z->z_Bitmap[bitdex >> 5];
992     bitdex &= 31;
993     KASSERT((*bitptr & (1 << bitdex)) == 0, ("memory chunk %p is already allocated!", chunk));
994     *bitptr |= 1 << bitdex;
995 }
996 
997 static
998 void
999 chunk_mark_free(SLZone *z, void *chunk)
1000 {
1001     int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1002     __uint32_t *bitptr;
1003 
1004     KASSERT(bitdex >= 0 && bitdex < z->z_NMax, ("memory chunk %p bit index %d is illegal!", chunk, bitdex));
1005     bitptr = &z->z_Bitmap[bitdex >> 5];
1006     bitdex &= 31;
1007     KASSERT((*bitptr & (1 << bitdex)) != 0, ("memory chunk %p is already free!", chunk));
1008     *bitptr &= ~(1 << bitdex);
1009 }
1010 
1011 #endif
1012 
1013 /*
1014  * kmem_slab_alloc()
1015  *
1016  *	Directly allocate and wire kernel memory in PAGE_SIZE chunks with the
1017  *	specified alignment.  M_* flags are expected in the flags field.
1018  *
1019  *	Alignment must be a multiple of PAGE_SIZE.
1020  *
1021  *	NOTE! XXX For the moment we use vm_map_entry_reserve/release(),
1022  *	but when we move zalloc() over to use this function as its backend
1023  *	we will have to switch to kreserve/krelease and call reserve(0)
1024  *	after the new space is made available.
1025  *
1026  *	Interrupt code which has preempted other code is not allowed to
1027  *	use PQ_CACHE pages.  However, if an interrupt thread is run
1028  *	non-preemptively or blocks and then runs non-preemptively, then
1029  *	it is free to use PQ_CACHE pages.
1030  *
1031  *	This routine will currently obtain the BGL.
1032  *
1033  * MPALMOSTSAFE - acquires mplock
1034  */
1035 static void *
1036 kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags)
1037 {
1038     vm_size_t i;
1039     vm_offset_t addr;
1040     int count, vmflags, base_vmflags;
1041     thread_t td;
1042 
1043     size = round_page(size);
1044     addr = vm_map_min(&kernel_map);
1045 
1046     /*
1047      * Reserve properly aligned space from kernel_map.  RNOWAIT allocations
1048      * cannot block.
1049      */
1050     if (flags & M_RNOWAIT) {
1051 	if (try_mplock() == 0)
1052 	    return(NULL);
1053     } else {
1054 	get_mplock();
1055     }
1056     count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1057     crit_enter();
1058     vm_map_lock(&kernel_map);
1059     if (vm_map_findspace(&kernel_map, addr, size, align, &addr)) {
1060 	vm_map_unlock(&kernel_map);
1061 	if ((flags & M_NULLOK) == 0)
1062 	    panic("kmem_slab_alloc(): kernel_map ran out of space!");
1063 	crit_exit();
1064 	vm_map_entry_release(count);
1065 	rel_mplock();
1066 	return(NULL);
1067     }
1068 
1069     /*
1070      * kernel_object maps 1:1 to kernel_map.
1071      */
1072     vm_object_reference(&kernel_object);
1073     vm_map_insert(&kernel_map, &count,
1074 		    &kernel_object, addr, addr, addr + size,
1075 		    VM_MAPTYPE_NORMAL,
1076 		    VM_PROT_ALL, VM_PROT_ALL,
1077 		    0);
1078 
1079     td = curthread;
1080 
1081     base_vmflags = 0;
1082     if (flags & M_ZERO)
1083         base_vmflags |= VM_ALLOC_ZERO;
1084     if (flags & M_USE_RESERVE)
1085 	base_vmflags |= VM_ALLOC_SYSTEM;
1086     if (flags & M_USE_INTERRUPT_RESERVE)
1087         base_vmflags |= VM_ALLOC_INTERRUPT;
1088     if ((flags & (M_RNOWAIT|M_WAITOK)) == 0)
1089     	panic("kmem_slab_alloc: bad flags %08x (%p)", flags, ((int **)&size)[-1]);
1090 
1091 
1092     /*
1093      * Allocate the pages.  Do not mess with the PG_ZERO flag yet.
1094      */
1095     for (i = 0; i < size; i += PAGE_SIZE) {
1096 	vm_page_t m;
1097 
1098 	/*
1099 	 * VM_ALLOC_NORMAL can only be set if we are not preempting.
1100 	 *
1101 	 * VM_ALLOC_SYSTEM is automatically set if we are preempting and
1102 	 * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is
1103 	 * implied in this case), though I'm sure if we really need to do
1104 	 * that.
1105 	 */
1106 	vmflags = base_vmflags;
1107 	if (flags & M_WAITOK) {
1108 	    if (td->td_preempted)
1109 		vmflags |= VM_ALLOC_SYSTEM;
1110 	    else
1111 		vmflags |= VM_ALLOC_NORMAL;
1112 	}
1113 
1114 	m = vm_page_alloc(&kernel_object, OFF_TO_IDX(addr + i), vmflags);
1115 
1116 	/*
1117 	 * If the allocation failed we either return NULL or we retry.
1118 	 *
1119 	 * If M_WAITOK is specified we wait for more memory and retry.
1120 	 * If M_WAITOK is specified from a preemption we yield instead of
1121 	 * wait.  Livelock will not occur because the interrupt thread
1122 	 * will not be preempting anyone the second time around after the
1123 	 * yield.
1124 	 */
1125 	if (m == NULL) {
1126 	    if (flags & M_WAITOK) {
1127 		if (td->td_preempted) {
1128 		    vm_map_unlock(&kernel_map);
1129 		    lwkt_yield();
1130 		    vm_map_lock(&kernel_map);
1131 		} else {
1132 		    vm_map_unlock(&kernel_map);
1133 		    vm_wait();
1134 		    vm_map_lock(&kernel_map);
1135 		}
1136 		i -= PAGE_SIZE;	/* retry */
1137 		continue;
1138 	    }
1139 
1140 	    /*
1141 	     * We were unable to recover, cleanup and return NULL
1142 	     */
1143 	    while (i != 0) {
1144 		i -= PAGE_SIZE;
1145 		m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i));
1146 		vm_page_free(m);
1147 	    }
1148 	    vm_map_delete(&kernel_map, addr, addr + size, &count);
1149 	    vm_map_unlock(&kernel_map);
1150 	    crit_exit();
1151 	    vm_map_entry_release(count);
1152 	    rel_mplock();
1153 	    return(NULL);
1154 	}
1155     }
1156 
1157     /*
1158      * Success!
1159      *
1160      * Mark the map entry as non-pageable using a routine that allows us to
1161      * populate the underlying pages.
1162      */
1163     vm_map_set_wired_quick(&kernel_map, addr, size, &count);
1164     crit_exit();
1165 
1166     /*
1167      * Enter the pages into the pmap and deal with PG_ZERO and M_ZERO.
1168      */
1169     for (i = 0; i < size; i += PAGE_SIZE) {
1170 	vm_page_t m;
1171 
1172 	m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i));
1173 	m->valid = VM_PAGE_BITS_ALL;
1174 	vm_page_wire(m);
1175 	vm_page_wakeup(m);
1176 	pmap_enter(&kernel_pmap, addr + i, m, VM_PROT_ALL, 1);
1177 	if ((m->flags & PG_ZERO) == 0 && (flags & M_ZERO))
1178 	    bzero((char *)addr + i, PAGE_SIZE);
1179 	vm_page_flag_clear(m, PG_ZERO);
1180 	vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED);
1181     }
1182     vm_map_unlock(&kernel_map);
1183     vm_map_entry_release(count);
1184     rel_mplock();
1185     return((void *)addr);
1186 }
1187 
1188 /*
1189  * kmem_slab_free()
1190  *
1191  * MPALMOSTSAFE - acquires mplock
1192  */
1193 static void
1194 kmem_slab_free(void *ptr, vm_size_t size)
1195 {
1196     get_mplock();
1197     crit_enter();
1198     vm_map_remove(&kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size);
1199     crit_exit();
1200     rel_mplock();
1201 }
1202 
1203