xref: /dragonfly/sys/sys/slaballoc.h (revision d8d5b238)
1 /*
2  * KERN_SLABALLOC.H	- Kernel SLAB memory allocator
3  *
4  * Copyright (c) 2003,2004 The DragonFly Project.  All rights reserved.
5  *
6  * This code is derived from software contributed to The DragonFly Project
7  * by Matthew Dillon <dillon@backplane.com>
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in
17  *    the documentation and/or other materials provided with the
18  *    distribution.
19  * 3. Neither the name of The DragonFly Project nor the names of its
20  *    contributors may be used to endorse or promote products derived
21  *    from this software without specific, prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
27  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 
37 #ifndef _SYS_SLABALLOC_H_
38 #define _SYS_SLABALLOC_H_
39 
40 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
41 
42 #ifndef _SYS_STDINT_H_
43 #include <sys/stdint.h>
44 #endif
45 #ifndef _SYS__MALLOC_H_
46 #include <sys/_malloc.h>
47 #endif
48 
49 /*
50  * Note that any allocations which are exact multiples of PAGE_SIZE, or
51  * which are >= ZALLOC_ZONE_LIMIT, will fall through to the kmem subsystem.
52  */
53 #define ZALLOC_ZONE_LIMIT	(16 * 1024)	/* max slab-managed alloc */
54 #define ZALLOC_MIN_ZONE_SIZE	(32 * 1024)	/* minimum zone size */
55 #define ZALLOC_MAX_ZONE_SIZE	(128 * 1024)	/* maximum zone size */
56 #define ZALLOC_SLAB_MAGIC	0x736c6162	/* magic sanity */
57 #define ZALLOC_OVSZ_MAGIC	0x736c6163	/* magic sanity */
58 #define ZALLOC_SLAB_SLIDE	20
59 
60 
61 #if ZALLOC_ZONE_LIMIT == 16384
62 #define NZONES			72
63 #elif ZALLOC_ZONE_LIMIT == 32768
64 #define NZONES			80
65 #else
66 #error "I couldn't figure out NZONES"
67 #endif
68 
69 /*
70  * Chunk structure for free elements
71  */
72 typedef struct SLChunk {
73     struct SLChunk *c_Next;
74 } SLChunk;
75 
76 #if defined(SLAB_DEBUG)
77 /*
78  * Only used for kernels compiled w/SLAB_DEBUG
79  */
80 struct ZSources {
81     const char *file;
82     int line;
83 };
84 
85 #endif
86 
87 /*
88  * The IN-BAND zone header is placed at the beginning of each zone.
89  *
90  * NOTE! All fields are cpu-local except z_RChunks.  Remote cpus free
91  *	 chunks using atomic ops to z_RChunks and then signal local
92  *	 cpus as necessary.
93  */
94 typedef struct SLZone {
95     __int32_t	z_Magic;	/* magic number for sanity check */
96     int		z_Cpu;		/* which cpu owns this zone? */
97     struct globaldata *z_CpuGd;	/* which cpu owns this zone? */
98     TAILQ_ENTRY(SLZone) z_Entry;/* ZoneAry[] if z_NFree!=0, else Free*Zones */
99     void	*z_UNused01;
100     int		z_NFree;	/* total free chunks / ualloc space in zone */
101     int		z_NMax;		/* maximum free chunks */
102     char	*z_BasePtr;	/* pointer to start of chunk array */
103     int		z_UIndex;	/* current initial allocation index */
104     int		z_UEndIndex;	/* last (first) allocation index */
105     int		z_ChunkSize;	/* chunk size for validation */
106     int		z_ZoneIndex;
107     int		z_Flags;
108     SLChunk	*z_LChunks;	/* linked list of chunks current cpu */
109     SLChunk	**z_LChunksp;	/* tailp */
110     SLChunk	*z_RChunks;	/* linked list of chunks remote cpu */
111     int		z_RSignal;	/* signal interlock */
112     int		z_RCount;	/* prevent local destruction w/inflight ipis */
113 #if defined(SLAB_DEBUG)
114 #define SLAB_DEBUG_ENTRIES	32	/* must be power of 2 */
115     struct ZSources z_Sources[SLAB_DEBUG_ENTRIES];
116     struct ZSources z_AltSources[SLAB_DEBUG_ENTRIES];
117 #endif
118 #if defined(INVARIANTS)
119     __uint32_t	z_Bitmap[];	/* bitmap of free chunks for sanity check */
120 #endif
121 } SLZone;
122 
123 #define SLZF_UNOTZEROD		0x0001
124 
125 TAILQ_HEAD(SLZoneList, SLZone);
126 typedef struct SLZoneList SLZoneList;
127 
128 typedef struct SLGlobalData {
129     SLZoneList	ZoneAry[NZONES];	/* linked list of zones NFree > 0 */
130     SLZoneList	FreeZones;		/* whole zones that have become free */
131     SLZoneList	FreeOvZones;		/* oversized zones */
132     int		NFreeZones;		/* free zone count */
133     int		JunkIndex;
134     struct malloc_type ZoneInfo;	/* stats on meta-zones allocated */
135 } SLGlobalData;
136 
137 #endif	/* _KERNEL */
138 
139 #endif	/* _SYS_SLABALLOC_H_ */
140 
141