xref: /dragonfly/sys/sys/_malloc.h (revision 631c21f2)
1 /*
2  * Copyright (c) 2019-2021 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
19  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
20  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
22  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
24  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #ifndef _SYS__MALLOC_H_
31 #define	_SYS__MALLOC_H_
32 
33 /*
34  * Do not include this header outside _KERNEL or _KERNEL_STRUCTURES scopes.
35  * Used in <sys/user.h>.
36  */
37 
38 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
39 #include <sys/cdefs.h>		/* for __cache_align */
40 #include <sys/spinlock.h>	/* for spinlock */
41 #include <sys/exislock.h>	/* for exislock_t */
42 #include <machine/stdint.h>	/* for __* types */
43 #include <machine/param.h>	/* for SMP_MAXCPU */
44 
45 /*
46  * A kmalloc slab (used with KSF_OBJSIZE) holds N fixed-size objects
47  * in a fixed (typically 32KB) block of memory prefaced by the structure.
48  */
49 #define KMALLOC_SLAB_SIZE	(size_t)(128 * 1024)
50 #define KMALLOC_SLAB_MASK	((size_t)(KMALLOC_SLAB_SIZE - 1))
51 
52 #define KMALLOC_SLAB_MAXOBJS	(KMALLOC_SLAB_SIZE / __VM_CACHELINE_SIZE)
53 #define KMALLOC_LOOSE_SIZE	(KMALLOC_SLAB_SIZE * 4)
54 
55 #define KMALLOC_SLAB_MAGIC	0x6b736c62
56 #define KMALLOC_MAXFREEMAGS	4
57 
58 #define KMALLOC_CHECK_DOUBLE_FREE
59 
60 struct kmalloc_slab {
61 	struct spinlock		spin;
62 	struct kmalloc_slab	*next;		/* next mag in list */
63 	struct malloc_type	*type;		/* who does this belong to */
64 	uint32_t		magic;
65 	uint32_t		orig_cpuid;	/* originally allocated on */
66 	size_t			offset;		/* copied from kmalloc_mgt */
67 	size_t			objsize;	/* copied from malloc_type */
68 	size_t			ncount;		/* copied from kmalloc_mgt */
69 	size_t			aindex;		/* start of allocations */
70 	size_t			findex;		/* end of frees */
71 	size_t			xindex;		/* synchronizer */
72 	exislock_t		exis;		/* existential lock state */
73 	struct kmalloc_mgt	*mgt;
74 	uint64_t		bmap[(KMALLOC_SLAB_MAXOBJS + 63) / 64];
75 	void			*fobjs[1];	/* list of free objects */
76 } __cachealign;
77 
78 /*
79  * pcpu slab management structure for kmalloc zone.
80  *
81  * The intent is to try to improve cache characteristics and to reduce
82  * fragmentation by keeping collections localized.  The curmag list
83  * used for allocations is loosely sorted by fullness, with the most-full
84  * magazine at the head and the least-full magazine at the tail.
85  *
86  * Loosely speaking we want to allocate from the most-full magazine to best
87  * reduce fragmentation.
88  *
89  * The kmalloc zone also uses one of these as a global management structure
90  * excess emptymags are regularly moved to the global structure.
91  */
92 struct kmalloc_mgt {
93 	struct spinlock		spin;
94 	struct kmalloc_slab	*active;	/* pcpu */
95 	struct kmalloc_slab	*alternate;	/* pcpu */
96 	struct kmalloc_slab	*partial;	/* global */
97 	struct kmalloc_slab	*full;		/* global */
98 	struct kmalloc_slab	*empty;		/* global */
99 	struct kmalloc_slab	**empty_tailp;	/* global */
100 	size_t			slab_offset;	/* first object in slab */
101 	size_t			slab_count;	/* objects per slab */
102 	size_t			npartial;	/* counts */
103 	size_t			nfull;
104 	size_t			nempty;
105 	size_t			gcache_count;	/* #slabs returned to gcache */
106 	size_t			unused01;
107 	size_t			unused02;
108 } __cachealign;
109 
110 /*
111  * The malloc tracking structure.  Note that per-cpu entries must be
112  * aggregated for accurate statistics, they do not actually break the
113  * stats down by cpu (e.g. the cpu freeing memory will subtract from
114  * its slot, not the originating cpu's slot).
115  *
116  * SMP_MAXCPU is used so modules which use malloc remain compatible
117  * between UP and SMP.
118  *
119  * WARNING: __cachealign typically represents 64 byte alignment, so
120  *	    this structure may be larger than expected.
121  *
122  * WARNING: loosememuse is transfered to ks_loosememuse and zerod
123  *	    often (e.g. uses atomic_swap_long()).  It allows pcpu
124  *	    updates to be taken into account without causing lots
125  *	    of cache ping-pongs
126  */
127 struct kmalloc_use {
128 	__size_t	memuse;
129 	__size_t	inuse;
130 	__int64_t	calls;		/* allocations counter (total) */
131 	__size_t	loosememuse;
132 	struct kmalloc_mgt mgt;		/* pcpu object store */
133 } __cachealign;
134 
135 struct malloc_type {
136 	struct malloc_type *ks_next;	/* next in list */
137 	__size_t	ks_loosememuse;	/* (inaccurate) aggregate memuse */
138 	__size_t	ks_limit;	/* most that are allowed to exist */
139 	__uint64_t	ks_unused0;
140 	__uint32_t	ks_flags;	/* KSF_x flags */
141 	__uint32_t	ks_magic;	/* if it's not magic, don't touch it */
142 	const char	*ks_shortdesc;	/* short description */
143 	__size_t	ks_objsize;	/* single size if non-zero */
144 	struct kmalloc_use *ks_use;
145 	struct kmalloc_use ks_use0;	/* dummy prior to SMP startup */
146 	struct kmalloc_mgt ks_mgt;	/* rollup object store */
147 };
148 
149 typedef	struct malloc_type	*malloc_type_t;
150 
151 #define	MALLOC_DECLARE(type)		\
152 	extern struct malloc_type type[1]	/* ref as ptr */
153 
154 #define KSF_OBJSIZE	0x00000001	/* zone used for one object type/size */
155 #define KSF_POLLING	0x00000002	/* poll in progress */
156 
157 #define KMGD_MAXFREESLABS	128
158 
159 typedef struct KMGlobalData {
160 	struct kmalloc_slab *free_slabs;
161 	struct kmalloc_slab *remote_free_slabs;
162 	size_t		free_count;
163 	void		*reserved[5];
164 } KMGlobalData;
165 
166 #endif
167 
168 #endif /* !_SYS__MALLOC_H_ */
169