xref: /dragonfly/sys/kern/kern_slaballoc.c (revision 1bf4b486)
1 /*
2  * KERN_SLABALLOC.C	- Kernel SLAB memory allocator (MP SAFE)
3  *
4  * Copyright (c) 2003,2004 The DragonFly Project.  All rights reserved.
5  *
6  * This code is derived from software contributed to The DragonFly Project
7  * by Matthew Dillon <dillon@backplane.com>
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in
17  *    the documentation and/or other materials provided with the
18  *    distribution.
19  * 3. Neither the name of The DragonFly Project nor the names of its
20  *    contributors may be used to endorse or promote products derived
21  *    from this software without specific, prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
27  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  * $DragonFly: src/sys/kern/kern_slaballoc.c,v 1.35 2005/06/20 23:21:34 dillon Exp $
37  *
38  * This module implements a slab allocator drop-in replacement for the
39  * kernel malloc().
40  *
41  * A slab allocator reserves a ZONE for each chunk size, then lays the
42  * chunks out in an array within the zone.  Allocation and deallocation
43  * is nearly instantanious, and fragmentation/overhead losses are limited
44  * to a fixed worst-case amount.
45  *
46  * The downside of this slab implementation is in the chunk size
47  * multiplied by the number of zones.  ~80 zones * 128K = 10MB of VM per cpu.
48  * In a kernel implementation all this memory will be physical so
49  * the zone size is adjusted downward on machines with less physical
50  * memory.  The upside is that overhead is bounded... this is the *worst*
51  * case overhead.
52  *
53  * Slab management is done on a per-cpu basis and no locking or mutexes
54  * are required, only a critical section.  When one cpu frees memory
55  * belonging to another cpu's slab manager an asynchronous IPI message
56  * will be queued to execute the operation.   In addition, both the
57  * high level slab allocator and the low level zone allocator optimize
58  * M_ZERO requests, and the slab allocator does not have to pre initialize
59  * the linked list of chunks.
60  *
61  * XXX Balancing is needed between cpus.  Balance will be handled through
62  * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks.
63  *
64  * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of
65  * the new zone should be restricted to M_USE_RESERVE requests only.
66  *
67  *	Alloc Size	Chunking        Number of zones
68  *	0-127		8		16
69  *	128-255		16		8
70  *	256-511		32		8
71  *	512-1023	64		8
72  *	1024-2047	128		8
73  *	2048-4095	256		8
74  *	4096-8191	512		8
75  *	8192-16383	1024		8
76  *	16384-32767	2048		8
77  *	(if PAGE_SIZE is 4K the maximum zone allocation is 16383)
78  *
79  *	Allocations >= ZoneLimit go directly to kmem.
80  *
81  *			API REQUIREMENTS AND SIDE EFFECTS
82  *
83  *    To operate as a drop-in replacement to the FreeBSD-4.x malloc() we
84  *    have remained compatible with the following API requirements:
85  *
86  *    + small power-of-2 sized allocations are power-of-2 aligned (kern_tty)
87  *    + all power-of-2 sized allocations are power-of-2 aligned (twe)
88  *    + malloc(0) is allowed and returns non-NULL (ahc driver)
89  *    + ability to allocate arbitrarily large chunks of memory
90  */
91 
92 #include "opt_vm.h"
93 
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/kernel.h>
97 #include <sys/slaballoc.h>
98 #include <sys/mbuf.h>
99 #include <sys/vmmeter.h>
100 #include <sys/lock.h>
101 #include <sys/thread.h>
102 #include <sys/globaldata.h>
103 #include <sys/sysctl.h>
104 #include <sys/ktr.h>
105 
106 #include <vm/vm.h>
107 #include <vm/vm_param.h>
108 #include <vm/vm_kern.h>
109 #include <vm/vm_extern.h>
110 #include <vm/vm_object.h>
111 #include <vm/pmap.h>
112 #include <vm/vm_map.h>
113 #include <vm/vm_page.h>
114 #include <vm/vm_pageout.h>
115 
116 #include <machine/cpu.h>
117 
118 #include <sys/thread2.h>
119 
120 #define arysize(ary)	(sizeof(ary)/sizeof((ary)[0]))
121 
122 #define MEMORY_STRING	"ptr=%p type=%p size=%d flags=%04x"
123 #define MEMORY_ARG_SIZE	(sizeof(void *) * 2 + sizeof(unsigned long) + 	\
124 			sizeof(int))
125 
126 #if !defined(KTR_MEMORY)
127 #define KTR_MEMORY	KTR_ALL
128 #endif
129 KTR_INFO_MASTER(memory);
130 KTR_INFO(KTR_MEMORY, memory, malloc, 0, MEMORY_STRING, MEMORY_ARG_SIZE);
131 KTR_INFO(KTR_MEMORY, memory, free_zero, 1, MEMORY_STRING, MEMORY_ARG_SIZE);
132 KTR_INFO(KTR_MEMORY, memory, free_ovsz, 2, MEMORY_STRING, MEMORY_ARG_SIZE);
133 KTR_INFO(KTR_MEMORY, memory, free_ovsz_delayed, 3, MEMORY_STRING, MEMORY_ARG_SIZE);
134 KTR_INFO(KTR_MEMORY, memory, free_chunk, 4, MEMORY_STRING, MEMORY_ARG_SIZE);
135 #ifdef SMP
136 KTR_INFO(KTR_MEMORY, memory, free_request, 5, MEMORY_STRING, MEMORY_ARG_SIZE);
137 KTR_INFO(KTR_MEMORY, memory, free_remote, 6, MEMORY_STRING, MEMORY_ARG_SIZE);
138 #endif
139 
140 #define logmemory(name, ptr, type, size, flags)				\
141 	KTR_LOG(memory_ ## name, ptr, type, size, flags)
142 
143 /*
144  * Fixed globals (not per-cpu)
145  */
146 static int ZoneSize;
147 static int ZoneLimit;
148 static int ZonePageCount;
149 static int ZoneMask;
150 static struct malloc_type *kmemstatistics;
151 static struct kmemusage *kmemusage;
152 static int32_t weirdary[16];
153 
154 static void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags);
155 static void kmem_slab_free(void *ptr, vm_size_t bytes);
156 #if defined(INVARIANTS)
157 static void chunk_mark_allocated(SLZone *z, void *chunk);
158 static void chunk_mark_free(SLZone *z, void *chunk);
159 #endif
160 
161 /*
162  * Misc constants.  Note that allocations that are exact multiples of
163  * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
164  * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists.
165  */
166 #define MIN_CHUNK_SIZE		8		/* in bytes */
167 #define MIN_CHUNK_MASK		(MIN_CHUNK_SIZE - 1)
168 #define ZONE_RELS_THRESH	2		/* threshold number of zones */
169 #define IN_SAME_PAGE_MASK	(~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK)
170 
171 /*
172  * The WEIRD_ADDR is used as known text to copy into free objects to
173  * try to create deterministic failure cases if the data is accessed after
174  * free.
175  */
176 #define WEIRD_ADDR      0xdeadc0de
177 #define MAX_COPY        sizeof(weirdary)
178 #define ZERO_LENGTH_PTR	((void *)-8)
179 
180 /*
181  * Misc global malloc buckets
182  */
183 
184 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
185 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
186 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
187 
188 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
189 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
190 
191 /*
192  * Initialize the slab memory allocator.  We have to choose a zone size based
193  * on available physical memory.  We choose a zone side which is approximately
194  * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of
195  * 128K.  The zone size is limited to the bounds set in slaballoc.h
196  * (typically 32K min, 128K max).
197  */
198 static void kmeminit(void *dummy);
199 
200 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL)
201 
202 #ifdef INVARIANTS
203 /*
204  * If enabled any memory allocated without M_ZERO is initialized to -1.
205  */
206 static int  use_malloc_pattern;
207 SYSCTL_INT(_debug, OID_AUTO, use_malloc_pattern, CTLFLAG_RW,
208 		&use_malloc_pattern, 0, "");
209 #endif
210 
211 static void
212 kmeminit(void *dummy)
213 {
214     vm_poff_t limsize;
215     int usesize;
216     int i;
217     vm_pindex_t npg;
218 
219     limsize = (vm_poff_t)vmstats.v_page_count * PAGE_SIZE;
220     if (limsize > VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS)
221 	limsize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
222 
223     usesize = (int)(limsize / 1024);	/* convert to KB */
224 
225     ZoneSize = ZALLOC_MIN_ZONE_SIZE;
226     while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize)
227 	ZoneSize <<= 1;
228     ZoneLimit = ZoneSize / 4;
229     if (ZoneLimit > ZALLOC_ZONE_LIMIT)
230 	ZoneLimit = ZALLOC_ZONE_LIMIT;
231     ZoneMask = ZoneSize - 1;
232     ZonePageCount = ZoneSize / PAGE_SIZE;
233 
234     npg = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE;
235     kmemusage = kmem_slab_alloc(npg * sizeof(struct kmemusage), PAGE_SIZE, M_WAITOK|M_ZERO);
236 
237     for (i = 0; i < arysize(weirdary); ++i)
238 	weirdary[i] = WEIRD_ADDR;
239 
240     if (bootverbose)
241 	printf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024);
242 }
243 
244 /*
245  * Initialize a malloc type tracking structure.
246  */
247 void
248 malloc_init(void *data)
249 {
250     struct malloc_type *type = data;
251     vm_poff_t limsize;
252 
253     if (type->ks_magic != M_MAGIC)
254 	panic("malloc type lacks magic");
255 
256     if (type->ks_limit != 0)
257 	return;
258 
259     if (vmstats.v_page_count == 0)
260 	panic("malloc_init not allowed before vm init");
261 
262     limsize = (vm_poff_t)vmstats.v_page_count * PAGE_SIZE;
263     if (limsize > VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS)
264 	limsize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
265     type->ks_limit = limsize / 10;
266 
267     type->ks_next = kmemstatistics;
268     kmemstatistics = type;
269 }
270 
271 void
272 malloc_uninit(void *data)
273 {
274     struct malloc_type *type = data;
275     struct malloc_type *t;
276 #ifdef INVARIANTS
277     int i;
278     long ttl;
279 #endif
280 
281     if (type->ks_magic != M_MAGIC)
282 	panic("malloc type lacks magic");
283 
284     if (vmstats.v_page_count == 0)
285 	panic("malloc_uninit not allowed before vm init");
286 
287     if (type->ks_limit == 0)
288 	panic("malloc_uninit on uninitialized type");
289 
290 #ifdef INVARIANTS
291     /*
292      * memuse is only correct in aggregation.  Due to memory being allocated
293      * on one cpu and freed on another individual array entries may be
294      * negative or positive (canceling each other out).
295      */
296     for (i = ttl = 0; i < ncpus; ++i)
297 	ttl += type->ks_memuse[i];
298     if (ttl) {
299 	printf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n",
300 	    ttl, type->ks_shortdesc, i);
301     }
302 #endif
303     if (type == kmemstatistics) {
304 	kmemstatistics = type->ks_next;
305     } else {
306 	for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) {
307 	    if (t->ks_next == type) {
308 		t->ks_next = type->ks_next;
309 		break;
310 	    }
311 	}
312     }
313     type->ks_next = NULL;
314     type->ks_limit = 0;
315 }
316 
317 /*
318  * Calculate the zone index for the allocation request size and set the
319  * allocation request size to that particular zone's chunk size.
320  */
321 static __inline int
322 zoneindex(unsigned long *bytes)
323 {
324     unsigned int n = (unsigned int)*bytes;	/* unsigned for shift opt */
325     if (n < 128) {
326 	*bytes = n = (n + 7) & ~7;
327 	return(n / 8 - 1);		/* 8 byte chunks, 16 zones */
328     }
329     if (n < 256) {
330 	*bytes = n = (n + 15) & ~15;
331 	return(n / 16 + 7);
332     }
333     if (n < 8192) {
334 	if (n < 512) {
335 	    *bytes = n = (n + 31) & ~31;
336 	    return(n / 32 + 15);
337 	}
338 	if (n < 1024) {
339 	    *bytes = n = (n + 63) & ~63;
340 	    return(n / 64 + 23);
341 	}
342 	if (n < 2048) {
343 	    *bytes = n = (n + 127) & ~127;
344 	    return(n / 128 + 31);
345 	}
346 	if (n < 4096) {
347 	    *bytes = n = (n + 255) & ~255;
348 	    return(n / 256 + 39);
349 	}
350 	*bytes = n = (n + 511) & ~511;
351 	return(n / 512 + 47);
352     }
353 #if ZALLOC_ZONE_LIMIT > 8192
354     if (n < 16384) {
355 	*bytes = n = (n + 1023) & ~1023;
356 	return(n / 1024 + 55);
357     }
358 #endif
359 #if ZALLOC_ZONE_LIMIT > 16384
360     if (n < 32768) {
361 	*bytes = n = (n + 2047) & ~2047;
362 	return(n / 2048 + 63);
363     }
364 #endif
365     panic("Unexpected byte count %d", n);
366     return(0);
367 }
368 
369 /*
370  * malloc()	(SLAB ALLOCATOR) (MP SAFE)
371  *
372  *	Allocate memory via the slab allocator.  If the request is too large,
373  *	or if it page-aligned beyond a certain size, we fall back to the
374  *	KMEM subsystem.  A SLAB tracking descriptor must be specified, use
375  *	&SlabMisc if you don't care.
376  *
377  *	M_RNOWAIT	- don't block.
378  *	M_NULLOK	- return NULL instead of blocking.
379  *	M_ZERO		- zero the returned memory.
380  *	M_USE_RESERVE	- allow greater drawdown of the free list
381  *	M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted
382  */
383 void *
384 malloc(unsigned long size, struct malloc_type *type, int flags)
385 {
386     SLZone *z;
387     SLChunk *chunk;
388     SLGlobalData *slgd;
389     struct globaldata *gd;
390     int zi;
391 #ifdef INVARIANTS
392     int i;
393 #endif
394 
395     gd = mycpu;
396     slgd = &gd->gd_slab;
397 
398     /*
399      * XXX silly to have this in the critical path.
400      */
401     if (type->ks_limit == 0) {
402 	crit_enter();
403 	if (type->ks_limit == 0)
404 	    malloc_init(type);
405 	crit_exit();
406     }
407     ++type->ks_calls;
408 
409     /*
410      * Handle the case where the limit is reached.  Panic if we can't return
411      * NULL.  The original malloc code looped, but this tended to
412      * simply deadlock the computer.
413      *
414      * ks_loosememuse is an up-only limit that is NOT MP-synchronized, used
415      * to determine if a more complete limit check should be done.  The
416      * actual memory use is tracked via ks_memuse[cpu].
417      */
418     while (type->ks_loosememuse >= type->ks_limit) {
419 	int i;
420 	long ttl;
421 
422 	for (i = ttl = 0; i < ncpus; ++i)
423 	    ttl += type->ks_memuse[i];
424 	type->ks_loosememuse = ttl;	/* not MP synchronized */
425 	if (ttl >= type->ks_limit) {
426 	    if (flags & M_NULLOK) {
427 		logmemory(malloc, NULL, type, size, flags);
428 		return(NULL);
429 	    }
430 	    panic("%s: malloc limit exceeded", type->ks_shortdesc);
431 	}
432     }
433 
434     /*
435      * Handle the degenerate size == 0 case.  Yes, this does happen.
436      * Return a special pointer.  This is to maintain compatibility with
437      * the original malloc implementation.  Certain devices, such as the
438      * adaptec driver, not only allocate 0 bytes, they check for NULL and
439      * also realloc() later on.  Joy.
440      */
441     if (size == 0) {
442 	logmemory(malloc, ZERO_LENGTH_PTR, type, size, flags);
443 	return(ZERO_LENGTH_PTR);
444     }
445 
446     /*
447      * Handle hysteresis from prior frees here in malloc().  We cannot
448      * safely manipulate the kernel_map in free() due to free() possibly
449      * being called via an IPI message or from sensitive interrupt code.
450      */
451     while (slgd->NFreeZones > ZONE_RELS_THRESH && (flags & M_RNOWAIT) == 0) {
452 	crit_enter();
453 	if (slgd->NFreeZones > ZONE_RELS_THRESH) {	/* crit sect race */
454 	    z = slgd->FreeZones;
455 	    slgd->FreeZones = z->z_Next;
456 	    --slgd->NFreeZones;
457 	    kmem_slab_free(z, ZoneSize);	/* may block */
458 	}
459 	crit_exit();
460     }
461     /*
462      * XXX handle oversized frees that were queued from free().
463      */
464     while (slgd->FreeOvZones && (flags & M_RNOWAIT) == 0) {
465 	crit_enter();
466 	if ((z = slgd->FreeOvZones) != NULL) {
467 	    KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC);
468 	    slgd->FreeOvZones = z->z_Next;
469 	    kmem_slab_free(z, z->z_ChunkSize);	/* may block */
470 	}
471 	crit_exit();
472     }
473 
474     /*
475      * Handle large allocations directly.  There should not be very many of
476      * these so performance is not a big issue.
477      *
478      * Guarentee page alignment for allocations in multiples of PAGE_SIZE
479      */
480     if (size >= ZoneLimit || (size & PAGE_MASK) == 0) {
481 	struct kmemusage *kup;
482 
483 	size = round_page(size);
484 	chunk = kmem_slab_alloc(size, PAGE_SIZE, flags);
485 	if (chunk == NULL) {
486 	    logmemory(malloc, NULL, type, size, flags);
487 	    return(NULL);
488 	}
489 	flags &= ~M_ZERO;	/* result already zero'd if M_ZERO was set */
490 	flags |= M_PASSIVE_ZERO;
491 	kup = btokup(chunk);
492 	kup->ku_pagecnt = size / PAGE_SIZE;
493 	kup->ku_cpu = gd->gd_cpuid;
494 	crit_enter();
495 	goto done;
496     }
497 
498     /*
499      * Attempt to allocate out of an existing zone.  First try the free list,
500      * then allocate out of unallocated space.  If we find a good zone move
501      * it to the head of the list so later allocations find it quickly
502      * (we might have thousands of zones in the list).
503      *
504      * Note: zoneindex() will panic of size is too large.
505      */
506     zi = zoneindex(&size);
507     KKASSERT(zi < NZONES);
508     crit_enter();
509     if ((z = slgd->ZoneAry[zi]) != NULL) {
510 	KKASSERT(z->z_NFree > 0);
511 
512 	/*
513 	 * Remove us from the ZoneAry[] when we become empty
514 	 */
515 	if (--z->z_NFree == 0) {
516 	    slgd->ZoneAry[zi] = z->z_Next;
517 	    z->z_Next = NULL;
518 	}
519 
520 	/*
521 	 * Locate a chunk in a free page.  This attempts to localize
522 	 * reallocations into earlier pages without us having to sort
523 	 * the chunk list.  A chunk may still overlap a page boundary.
524 	 */
525 	while (z->z_FirstFreePg < ZonePageCount) {
526 	    if ((chunk = z->z_PageAry[z->z_FirstFreePg]) != NULL) {
527 #ifdef DIAGNOSTIC
528 		/*
529 		 * Diagnostic: c_Next is not total garbage.
530 		 */
531 		KKASSERT(chunk->c_Next == NULL ||
532 			((intptr_t)chunk->c_Next & IN_SAME_PAGE_MASK) ==
533 			((intptr_t)chunk & IN_SAME_PAGE_MASK));
534 #endif
535 #ifdef INVARIANTS
536 		if ((uintptr_t)chunk < VM_MIN_KERNEL_ADDRESS)
537 			panic("chunk %p FFPG %d/%d", chunk, z->z_FirstFreePg, ZonePageCount);
538 		if (chunk->c_Next && (uintptr_t)chunk->c_Next < VM_MIN_KERNEL_ADDRESS)
539 			panic("chunkNEXT %p %p FFPG %d/%d", chunk, chunk->c_Next, z->z_FirstFreePg, ZonePageCount);
540 		chunk_mark_allocated(z, chunk);
541 #endif
542 		z->z_PageAry[z->z_FirstFreePg] = chunk->c_Next;
543 		goto done;
544 	    }
545 	    ++z->z_FirstFreePg;
546 	}
547 
548 	/*
549 	 * No chunks are available but NFree said we had some memory, so
550 	 * it must be available in the never-before-used-memory area
551 	 * governed by UIndex.  The consequences are very serious if our zone
552 	 * got corrupted so we use an explicit panic rather then a KASSERT.
553 	 */
554 	if (z->z_UIndex + 1 != z->z_NMax)
555 	    z->z_UIndex = z->z_UIndex + 1;
556 	else
557 	    z->z_UIndex = 0;
558 	if (z->z_UIndex == z->z_UEndIndex)
559 	    panic("slaballoc: corrupted zone");
560 	chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
561 	if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
562 	    flags &= ~M_ZERO;
563 	    flags |= M_PASSIVE_ZERO;
564 	}
565 #if defined(INVARIANTS)
566 	chunk_mark_allocated(z, chunk);
567 #endif
568 	goto done;
569     }
570 
571     /*
572      * If all zones are exhausted we need to allocate a new zone for this
573      * index.  Use M_ZERO to take advantage of pre-zerod pages.  Also see
574      * UAlloc use above in regards to M_ZERO.  Note that when we are reusing
575      * a zone from the FreeZones list UAlloc'd data will not be zero'd, and
576      * we do not pre-zero it because we do not want to mess up the L1 cache.
577      *
578      * At least one subsystem, the tty code (see CROUND) expects power-of-2
579      * allocations to be power-of-2 aligned.  We maintain compatibility by
580      * adjusting the base offset below.
581      */
582     {
583 	int off;
584 
585 	if ((z = slgd->FreeZones) != NULL) {
586 	    slgd->FreeZones = z->z_Next;
587 	    --slgd->NFreeZones;
588 	    bzero(z, sizeof(SLZone));
589 	    z->z_Flags |= SLZF_UNOTZEROD;
590 	} else {
591 	    z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO);
592 	    if (z == NULL)
593 		goto fail;
594 	}
595 
596 	/*
597 	 * How big is the base structure?
598 	 */
599 #if defined(INVARIANTS)
600 	/*
601 	 * Make room for z_Bitmap.  An exact calculation is somewhat more
602 	 * complicated so don't make an exact calculation.
603 	 */
604 	off = offsetof(SLZone, z_Bitmap[(ZoneSize / size + 31) / 32]);
605 	bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8);
606 #else
607 	off = sizeof(SLZone);
608 #endif
609 
610 	/*
611 	 * Guarentee power-of-2 alignment for power-of-2-sized chunks.
612 	 * Otherwise just 8-byte align the data.
613 	 */
614 	if ((size | (size - 1)) + 1 == (size << 1))
615 	    off = (off + size - 1) & ~(size - 1);
616 	else
617 	    off = (off + MIN_CHUNK_MASK) & ~MIN_CHUNK_MASK;
618 	z->z_Magic = ZALLOC_SLAB_MAGIC;
619 	z->z_ZoneIndex = zi;
620 	z->z_NMax = (ZoneSize - off) / size;
621 	z->z_NFree = z->z_NMax - 1;
622 	z->z_BasePtr = (char *)z + off;
623 	z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax;
624 	z->z_ChunkSize = size;
625 	z->z_FirstFreePg = ZonePageCount;
626 	z->z_CpuGd = gd;
627 	z->z_Cpu = gd->gd_cpuid;
628 	chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
629 	z->z_Next = slgd->ZoneAry[zi];
630 	slgd->ZoneAry[zi] = z;
631 	if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
632 	    flags &= ~M_ZERO;	/* already zero'd */
633 	    flags |= M_PASSIVE_ZERO;
634 	}
635 #if defined(INVARIANTS)
636 	chunk_mark_allocated(z, chunk);
637 #endif
638 
639 	/*
640 	 * Slide the base index for initial allocations out of the next
641 	 * zone we create so we do not over-weight the lower part of the
642 	 * cpu memory caches.
643 	 */
644 	slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE)
645 				& (ZALLOC_MAX_ZONE_SIZE - 1);
646     }
647 done:
648     ++type->ks_inuse[gd->gd_cpuid];
649     type->ks_memuse[gd->gd_cpuid] += size;
650     type->ks_loosememuse += size;	/* not MP synchronized */
651     crit_exit();
652     if (flags & M_ZERO)
653 	bzero(chunk, size);
654 #ifdef INVARIANTS
655     else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0) {
656 	if (use_malloc_pattern) {
657 	    for (i = 0; i < size; i += sizeof(int)) {
658 		*(int *)((char *)chunk + i) = -1;
659 	    }
660 	}
661 	chunk->c_Next = (void *)-1; /* avoid accidental double-free check */
662     }
663 #endif
664     logmemory(malloc, chunk, type, size, flags);
665     return(chunk);
666 fail:
667     crit_exit();
668     logmemory(malloc, NULL, type, size, flags);
669     return(NULL);
670 }
671 
672 /*
673  * kernel realloc.  (SLAB ALLOCATOR) (MP SAFE)
674  *
675  * Generally speaking this routine is not called very often and we do
676  * not attempt to optimize it beyond reusing the same pointer if the
677  * new size fits within the chunking of the old pointer's zone.
678  */
679 void *
680 realloc(void *ptr, unsigned long size, struct malloc_type *type, int flags)
681 {
682     SLZone *z;
683     void *nptr;
684     unsigned long osize;
685 
686     KKASSERT((flags & M_ZERO) == 0);	/* not supported */
687 
688     if (ptr == NULL || ptr == ZERO_LENGTH_PTR)
689 	return(malloc(size, type, flags));
690     if (size == 0) {
691 	free(ptr, type);
692 	return(NULL);
693     }
694 
695     /*
696      * Handle oversized allocations.  XXX we really should require that a
697      * size be passed to free() instead of this nonsense.
698      */
699     {
700 	struct kmemusage *kup;
701 
702 	kup = btokup(ptr);
703 	if (kup->ku_pagecnt) {
704 	    osize = kup->ku_pagecnt << PAGE_SHIFT;
705 	    if (osize == round_page(size))
706 		return(ptr);
707 	    if ((nptr = malloc(size, type, flags)) == NULL)
708 		return(NULL);
709 	    bcopy(ptr, nptr, min(size, osize));
710 	    free(ptr, type);
711 	    return(nptr);
712 	}
713     }
714 
715     /*
716      * Get the original allocation's zone.  If the new request winds up
717      * using the same chunk size we do not have to do anything.
718      */
719     z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
720     KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
721 
722     zoneindex(&size);
723     if (z->z_ChunkSize == size)
724 	return(ptr);
725 
726     /*
727      * Allocate memory for the new request size.  Note that zoneindex has
728      * already adjusted the request size to the appropriate chunk size, which
729      * should optimize our bcopy().  Then copy and return the new pointer.
730      */
731     if ((nptr = malloc(size, type, flags)) == NULL)
732 	return(NULL);
733     bcopy(ptr, nptr, min(size, z->z_ChunkSize));
734     free(ptr, type);
735     return(nptr);
736 }
737 
738 /*
739  * Allocate a copy of the specified string.
740  *
741  * (MP SAFE) (MAY BLOCK)
742  */
743 char *
744 strdup(const char *str, struct malloc_type *type)
745 {
746     int zlen;	/* length inclusive of terminating NUL */
747     char *nstr;
748 
749     if (str == NULL)
750 	return(NULL);
751     zlen = strlen(str) + 1;
752     nstr = malloc(zlen, type, M_WAITOK);
753     bcopy(str, nstr, zlen);
754     return(nstr);
755 }
756 
757 #ifdef SMP
758 /*
759  * free()	(SLAB ALLOCATOR)
760  *
761  *	Free the specified chunk of memory.
762  */
763 static
764 void
765 free_remote(void *ptr)
766 {
767     logmemory(free_remote, ptr, *(struct malloc_type **)ptr, -1, 0);
768     free(ptr, *(struct malloc_type **)ptr);
769 }
770 
771 #endif
772 
773 /*
774  * free (SLAB ALLOCATOR) (MP SAFE)
775  *
776  * Free a memory block previously allocated by malloc.  Note that we do not
777  * attempt to uplodate ks_loosememuse as MP races could prevent us from
778  * checking memory limits in malloc.
779  */
780 void
781 free(void *ptr, struct malloc_type *type)
782 {
783     SLZone *z;
784     SLChunk *chunk;
785     SLGlobalData *slgd;
786     struct globaldata *gd;
787     int pgno;
788 
789     gd = mycpu;
790     slgd = &gd->gd_slab;
791 
792     if (ptr == NULL)
793 	panic("trying to free NULL pointer");
794 
795     /*
796      * Handle special 0-byte allocations
797      */
798     if (ptr == ZERO_LENGTH_PTR) {
799 	logmemory(free_zero, ptr, type, -1, 0);
800 	return;
801     }
802 
803     /*
804      * Handle oversized allocations.  XXX we really should require that a
805      * size be passed to free() instead of this nonsense.
806      *
807      * This code is never called via an ipi.
808      */
809     {
810 	struct kmemusage *kup;
811 	unsigned long size;
812 
813 	kup = btokup(ptr);
814 	if (kup->ku_pagecnt) {
815 	    size = kup->ku_pagecnt << PAGE_SHIFT;
816 	    kup->ku_pagecnt = 0;
817 #ifdef INVARIANTS
818 	    KKASSERT(sizeof(weirdary) <= size);
819 	    bcopy(weirdary, ptr, sizeof(weirdary));
820 #endif
821 	    /*
822 	     * note: we always adjust our cpu's slot, not the originating
823 	     * cpu (kup->ku_cpuid).  The statistics are in aggregate.
824 	     *
825 	     * note: XXX we have still inherited the interrupts-can't-block
826 	     * assumption.  An interrupt thread does not bump
827 	     * gd_intr_nesting_level so check TDF_INTTHREAD.  This is
828 	     * primarily until we can fix softupdate's assumptions about free().
829 	     */
830 	    crit_enter();
831 	    --type->ks_inuse[gd->gd_cpuid];
832 	    type->ks_memuse[gd->gd_cpuid] -= size;
833 	    if (mycpu->gd_intr_nesting_level || (gd->gd_curthread->td_flags & TDF_INTTHREAD)) {
834 		logmemory(free_ovsz_delayed, ptr, type, size, 0);
835 		z = (SLZone *)ptr;
836 		z->z_Magic = ZALLOC_OVSZ_MAGIC;
837 		z->z_Next = slgd->FreeOvZones;
838 		z->z_ChunkSize = size;
839 		slgd->FreeOvZones = z;
840 		crit_exit();
841 	    } else {
842 		crit_exit();
843 		logmemory(free_ovsz, ptr, type, size, 0);
844 		kmem_slab_free(ptr, size);	/* may block */
845 	    }
846 	    return;
847 	}
848     }
849 
850     /*
851      * Zone case.  Figure out the zone based on the fact that it is
852      * ZoneSize aligned.
853      */
854     z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
855     KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
856 
857     /*
858      * If we do not own the zone then forward the request to the
859      * cpu that does.  Since the timing is non-critical, a passive
860      * message is sent.
861      */
862     if (z->z_CpuGd != gd) {
863 	*(struct malloc_type **)ptr = type;
864 #ifdef SMP
865 	logmemory(free_request, ptr, type, z->z_ChunkSize, 0);
866 	lwkt_send_ipiq_passive(z->z_CpuGd, free_remote, ptr);
867 #else
868 	panic("Corrupt SLZone");
869 #endif
870 	return;
871     }
872 
873     logmemory(free_chunk, ptr, type, z->z_ChunkSize, 0);
874 
875     if (type->ks_magic != M_MAGIC)
876 	panic("free: malloc type lacks magic");
877 
878     crit_enter();
879     pgno = ((char *)ptr - (char *)z) >> PAGE_SHIFT;
880     chunk = ptr;
881 
882 #ifdef INVARIANTS
883     /*
884      * Attempt to detect a double-free.  To reduce overhead we only check
885      * if there appears to be link pointer at the base of the data.
886      */
887     if (((intptr_t)chunk->c_Next - (intptr_t)z) >> PAGE_SHIFT == pgno) {
888 	SLChunk *scan;
889 	for (scan = z->z_PageAry[pgno]; scan; scan = scan->c_Next) {
890 	    if (scan == chunk)
891 		panic("Double free at %p", chunk);
892 	}
893     }
894     chunk_mark_free(z, chunk);
895 #endif
896 
897     /*
898      * Put weird data into the memory to detect modifications after freeing,
899      * illegal pointer use after freeing (we should fault on the odd address),
900      * and so forth.  XXX needs more work, see the old malloc code.
901      */
902 #ifdef INVARIANTS
903     if (z->z_ChunkSize < sizeof(weirdary))
904 	bcopy(weirdary, chunk, z->z_ChunkSize);
905     else
906 	bcopy(weirdary, chunk, sizeof(weirdary));
907 #endif
908 
909     /*
910      * Add this free non-zero'd chunk to a linked list for reuse, adjust
911      * z_FirstFreePg.
912      */
913 #ifdef INVARIANTS
914     if ((uintptr_t)chunk < VM_MIN_KERNEL_ADDRESS)
915 	panic("BADFREE %p", chunk);
916 #endif
917     chunk->c_Next = z->z_PageAry[pgno];
918     z->z_PageAry[pgno] = chunk;
919 #ifdef INVARIANTS
920     if (chunk->c_Next && (uintptr_t)chunk->c_Next < VM_MIN_KERNEL_ADDRESS)
921 	panic("BADFREE2");
922 #endif
923     if (z->z_FirstFreePg > pgno)
924 	z->z_FirstFreePg = pgno;
925 
926     /*
927      * Bump the number of free chunks.  If it becomes non-zero the zone
928      * must be added back onto the appropriate list.
929      */
930     if (z->z_NFree++ == 0) {
931 	z->z_Next = slgd->ZoneAry[z->z_ZoneIndex];
932 	slgd->ZoneAry[z->z_ZoneIndex] = z;
933     }
934 
935     --type->ks_inuse[z->z_Cpu];
936     type->ks_memuse[z->z_Cpu] -= z->z_ChunkSize;
937 
938     /*
939      * If the zone becomes totally free, and there are other zones we
940      * can allocate from, move this zone to the FreeZones list.  Since
941      * this code can be called from an IPI callback, do *NOT* try to mess
942      * with kernel_map here.  Hysteresis will be performed at malloc() time.
943      */
944     if (z->z_NFree == z->z_NMax &&
945 	(z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z)
946     ) {
947 	SLZone **pz;
948 
949 	for (pz = &slgd->ZoneAry[z->z_ZoneIndex]; z != *pz; pz = &(*pz)->z_Next)
950 	    ;
951 	*pz = z->z_Next;
952 	z->z_Magic = -1;
953 	z->z_Next = slgd->FreeZones;
954 	slgd->FreeZones = z;
955 	++slgd->NFreeZones;
956     }
957     crit_exit();
958 }
959 
960 #if defined(INVARIANTS)
961 /*
962  * Helper routines for sanity checks
963  */
964 static
965 void
966 chunk_mark_allocated(SLZone *z, void *chunk)
967 {
968     int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
969     __uint32_t *bitptr;
970 
971     KASSERT(bitdex >= 0 && bitdex < z->z_NMax, ("memory chunk %p bit index %d is illegal", chunk, bitdex));
972     bitptr = &z->z_Bitmap[bitdex >> 5];
973     bitdex &= 31;
974     KASSERT((*bitptr & (1 << bitdex)) == 0, ("memory chunk %p is already allocated!", chunk));
975     *bitptr |= 1 << bitdex;
976 }
977 
978 static
979 void
980 chunk_mark_free(SLZone *z, void *chunk)
981 {
982     int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
983     __uint32_t *bitptr;
984 
985     KASSERT(bitdex >= 0 && bitdex < z->z_NMax, ("memory chunk %p bit index %d is illegal!", chunk, bitdex));
986     bitptr = &z->z_Bitmap[bitdex >> 5];
987     bitdex &= 31;
988     KASSERT((*bitptr & (1 << bitdex)) != 0, ("memory chunk %p is already free!", chunk));
989     *bitptr &= ~(1 << bitdex);
990 }
991 
992 #endif
993 
994 /*
995  * kmem_slab_alloc()	(MP SAFE) (GETS BGL)
996  *
997  *	Directly allocate and wire kernel memory in PAGE_SIZE chunks with the
998  *	specified alignment.  M_* flags are expected in the flags field.
999  *
1000  *	Alignment must be a multiple of PAGE_SIZE.
1001  *
1002  *	NOTE! XXX For the moment we use vm_map_entry_reserve/release(),
1003  *	but when we move zalloc() over to use this function as its backend
1004  *	we will have to switch to kreserve/krelease and call reserve(0)
1005  *	after the new space is made available.
1006  *
1007  *	Interrupt code which has preempted other code is not allowed to
1008  *	use PQ_CACHE pages.  However, if an interrupt thread is run
1009  *	non-preemptively or blocks and then runs non-preemptively, then
1010  *	it is free to use PQ_CACHE pages.
1011  *
1012  *	This routine will currently obtain the BGL.
1013  */
1014 static void *
1015 kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags)
1016 {
1017     vm_size_t i;
1018     vm_offset_t addr;
1019     vm_offset_t offset;
1020     int count, vmflags, base_vmflags;
1021     thread_t td;
1022     vm_map_t map = kernel_map;
1023 
1024     size = round_page(size);
1025     addr = vm_map_min(map);
1026 
1027     /*
1028      * Reserve properly aligned space from kernel_map.  RNOWAIT allocations
1029      * cannot block.
1030      */
1031     if (flags & M_RNOWAIT) {
1032 	if (try_mplock() == 0)
1033 	    return(NULL);
1034     } else {
1035 	get_mplock();
1036     }
1037     count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1038     crit_enter();
1039     vm_map_lock(map);
1040     if (vm_map_findspace(map, vm_map_min(map), size, align, &addr)) {
1041 	vm_map_unlock(map);
1042 	if ((flags & M_NULLOK) == 0)
1043 	    panic("kmem_slab_alloc(): kernel_map ran out of space!");
1044 	crit_exit();
1045 	vm_map_entry_release(count);
1046 	rel_mplock();
1047 	return(NULL);
1048     }
1049     offset = addr - VM_MIN_KERNEL_ADDRESS;
1050     vm_object_reference(kernel_object);
1051     vm_map_insert(map, &count,
1052 		    kernel_object, offset, addr, addr + size,
1053 		    VM_PROT_ALL, VM_PROT_ALL, 0);
1054 
1055     td = curthread;
1056 
1057     base_vmflags = 0;
1058     if (flags & M_ZERO)
1059         base_vmflags |= VM_ALLOC_ZERO;
1060     if (flags & M_USE_RESERVE)
1061 	base_vmflags |= VM_ALLOC_SYSTEM;
1062     if (flags & M_USE_INTERRUPT_RESERVE)
1063         base_vmflags |= VM_ALLOC_INTERRUPT;
1064     if ((flags & (M_RNOWAIT|M_WAITOK)) == 0)
1065     	panic("kmem_slab_alloc: bad flags %08x (%p)", flags, ((int **)&size)[-1]);
1066 
1067 
1068     /*
1069      * Allocate the pages.  Do not mess with the PG_ZERO flag yet.
1070      */
1071     for (i = 0; i < size; i += PAGE_SIZE) {
1072 	vm_page_t m;
1073 	vm_pindex_t idx = OFF_TO_IDX(offset + i);
1074 
1075 	/*
1076 	 * VM_ALLOC_NORMAL can only be set if we are not preempting.
1077 	 *
1078 	 * VM_ALLOC_SYSTEM is automatically set if we are preempting and
1079 	 * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is
1080 	 * implied in this case), though I'm sure if we really need to do
1081 	 * that.
1082 	 */
1083 	vmflags = base_vmflags;
1084 	if (flags & M_WAITOK) {
1085 	    if (td->td_preempted)
1086 		vmflags |= VM_ALLOC_SYSTEM;
1087 	    else
1088 		vmflags |= VM_ALLOC_NORMAL;
1089 	}
1090 
1091 	m = vm_page_alloc(kernel_object, idx, vmflags);
1092 
1093 	/*
1094 	 * If the allocation failed we either return NULL or we retry.
1095 	 *
1096 	 * If M_WAITOK is specified we wait for more memory and retry.
1097 	 * If M_WAITOK is specified from a preemption we yield instead of
1098 	 * wait.  Livelock will not occur because the interrupt thread
1099 	 * will not be preempting anyone the second time around after the
1100 	 * yield.
1101 	 */
1102 	if (m == NULL) {
1103 	    if (flags & M_WAITOK) {
1104 		if (td->td_preempted) {
1105 		    vm_map_unlock(map);
1106 		    lwkt_yield();
1107 		    vm_map_lock(map);
1108 		} else {
1109 		    vm_map_unlock(map);
1110 		    vm_wait();
1111 		    vm_map_lock(map);
1112 		}
1113 		i -= PAGE_SIZE;	/* retry */
1114 		continue;
1115 	    }
1116 
1117 	    /*
1118 	     * We were unable to recover, cleanup and return NULL
1119 	     */
1120 	    while (i != 0) {
1121 		i -= PAGE_SIZE;
1122 		m = vm_page_lookup(kernel_object, OFF_TO_IDX(offset + i));
1123 		vm_page_free(m);
1124 	    }
1125 	    vm_map_delete(map, addr, addr + size, &count);
1126 	    vm_map_unlock(map);
1127 	    crit_exit();
1128 	    vm_map_entry_release(count);
1129 	    rel_mplock();
1130 	    return(NULL);
1131 	}
1132     }
1133 
1134     /*
1135      * Success!
1136      *
1137      * Mark the map entry as non-pageable using a routine that allows us to
1138      * populate the underlying pages.
1139      */
1140     vm_map_set_wired_quick(map, addr, size, &count);
1141     crit_exit();
1142 
1143     /*
1144      * Enter the pages into the pmap and deal with PG_ZERO and M_ZERO.
1145      */
1146     for (i = 0; i < size; i += PAGE_SIZE) {
1147 	vm_page_t m;
1148 
1149 	m = vm_page_lookup(kernel_object, OFF_TO_IDX(offset + i));
1150 	m->valid = VM_PAGE_BITS_ALL;
1151 	vm_page_wire(m);
1152 	vm_page_wakeup(m);
1153 	pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1);
1154 	if ((m->flags & PG_ZERO) == 0 && (flags & M_ZERO))
1155 	    bzero((char *)addr + i, PAGE_SIZE);
1156 	vm_page_flag_clear(m, PG_ZERO);
1157 	vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED);
1158     }
1159     vm_map_unlock(map);
1160     vm_map_entry_release(count);
1161     rel_mplock();
1162     return((void *)addr);
1163 }
1164 
1165 /*
1166  * kmem_slab_free()	(MP SAFE) (GETS BGL)
1167  */
1168 static void
1169 kmem_slab_free(void *ptr, vm_size_t size)
1170 {
1171     get_mplock();
1172     crit_enter();
1173     vm_map_remove(kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size);
1174     crit_exit();
1175     rel_mplock();
1176 }
1177 
1178