xref: /dragonfly/sys/sys/malloc.h (revision 0b2c5ee3)
1 /*
2  * Copyright (c) 1987, 1993, 2021
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the University nor the names of its contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  *	@(#)malloc.h	8.5 (Berkeley) 5/3/95
30  * $FreeBSD: src/sys/sys/malloc.h,v 1.48.2.2 2002/03/16 02:19:16 archie Exp $
31  */
32 
33 #ifndef _SYS_MALLOC_H_
34 #define	_SYS_MALLOC_H_
35 
36 #ifndef _SYS_TYPES_H_
37 #include <sys/types.h>
38 #endif
39 #ifndef _MACHINE_TYPES_H_
40 #include <machine/types.h>	/* vm_paddr_t and __* types */
41 #endif
42 
43 /*
44  * flags to malloc.
45  */
46 #define	M_RNOWAIT	0x0001	/* do not block */
47 #define	M_WAITOK	0x0002	/* wait for resources / alloc from cache */
48 #define	M_ZERO		0x0100	/* bzero() the allocation */
49 #define	M_USE_RESERVE	0x0200	/* can eat into free list reserve */
50 #define	M_NULLOK	0x0400	/* ok to return NULL */
51 #define	M_PASSIVE_ZERO	0x0800	/* (internal to the slab code only) */
52 #define	M_USE_INTERRUPT_RESERVE \
53 			0x1000	/* can exhaust free list entirely */
54 #define	M_POWEROF2	0x2000	/* roundup size to the nearest power of 2 */
55 #define	M_CACHEALIGN	0x4000	/* force CPU cache line alignment */
56 /* GFP_DMA32 0x10000 reserved for drm layer (not handled by kmalloc) */
57 
58 /*
59  * M_NOWAIT has to be a set of flags for equivalence to prior use.
60  *
61  * M_SYSALLOC should be used for any critical infrastructure allocations
62  * made by the kernel proper.
63  *
64  * M_INTNOWAIT should be used for any critical infrastructure allocations
65  * made by interrupts.  Such allocations can still fail but will not fail
66  * as often as M_NOWAIT.
67  *
68  * NOTE ON DRAGONFLY USE OF M_NOWAIT.  In FreeBSD M_NOWAIT allocations
69  * almost always succeed.  In DragonFly, however, there is a good chance
70  * that an allocation will fail.  M_NOWAIT should only be used when
71  * allocations can fail without any serious detriment to the system.
72  *
73  * Note that allocations made from (preempted) interrupts will attempt to
74  * use pages from the VM PAGE CACHE (PQ_CACHE) (i.e. those associated with
75  * objects).  This is automatic.
76  */
77 
78 #define	M_INTNOWAIT	(M_RNOWAIT | M_NULLOK | 			\
79 			 M_USE_RESERVE | M_USE_INTERRUPT_RESERVE)
80 #define	M_SYSNOWAIT	(M_RNOWAIT | M_NULLOK | M_USE_RESERVE)
81 #define	M_INTWAIT	(M_WAITOK | M_USE_RESERVE | M_USE_INTERRUPT_RESERVE)
82 #define	M_SYSWAIT	(M_WAITOK | M_USE_RESERVE)
83 
84 #define	M_NOWAIT	(M_RNOWAIT | M_NULLOK | M_USE_RESERVE)
85 #define	M_SYSALLOC	M_SYSWAIT
86 
87 #define	M_MAGIC		877983977	/* time when first defined :-) */
88 
89 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
90 #include <sys/_malloc.h>		/* struct malloc_type */
91 #ifndef NULL
92 #include <sys/_null.h>			/* ensure NULL is defined */
93 #endif
94 #endif
95 
96 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
97 #define	MALLOC_DEFINE(type, shortdesc, longdesc)			\
98 	struct malloc_type type[1] = {					\
99 	    { NULL, 0, 0, 0, 0, M_MAGIC, shortdesc, 0,			\
100 	      &type[0].ks_use0, { 0, 0, 0, 0 } }			\
101 	};								\
102 	SYSINIT(type##_init, SI_BOOT1_KMALLOC, SI_ORDER_ANY,		\
103 	    malloc_init, type);						\
104 	SYSUNINIT(type##_uninit, SI_BOOT1_KMALLOC, SI_ORDER_ANY,	\
105 	    malloc_uninit, type)
106 
107 #define	MALLOC_DEFINE_OBJ(type, size, shortdesc, longdesc)		\
108 	struct malloc_type type##_obj[1] = {				\
109 	    { NULL, 0, 0, 0, KSF_OBJSIZE, M_MAGIC, shortdesc,		\
110 	      __VM_CACHELINE_ALIGN((size)),				\
111 	      &type##_obj[0].ks_use0, { 0, 0, 0, 0 } }			\
112 	};								\
113 	SYSINIT(type##_init, SI_BOOT1_KMALLOC, SI_ORDER_ANY,		\
114 	    malloc_init, type##_obj);					\
115 	SYSUNINIT(type##_uninit, SI_BOOT1_KMALLOC, SI_ORDER_ANY,	\
116 	    malloc_uninit, type##_obj)
117 
118 #else
119 #define	MALLOC_DEFINE(type, shortdesc, longdesc)			\
120 	struct malloc_type type[1] = {					\
121 	    { NULL, 0, 0, 0, 0, M_MAGIC, shortdesc, 0,			\
122 	      &type[0].ks_use0, { 0, 0, 0, 0 } 				\
123 	}
124 
125 #define	MALLOC_DEFINE_OBJ(type, size, shortdesc, longdesc)		\
126 	struct malloc_type type##_obj[1] = {				\
127 	    { NULL, 0, 0, 0, KSF_OBJSIZE, M_MAGIC, shortdesc,		\
128 	      __VM_CACHELINE_ALIGN((size)),				\
129 	      &type##_obj[0].ks_use0, { 0, 0, 0, 0 } 			\
130 	}
131 #endif
132 
133 #ifdef _KERNEL
134 
135 MALLOC_DECLARE(M_CACHE);
136 MALLOC_DECLARE(M_DEVBUF);
137 MALLOC_DECLARE(M_TEMP);
138 
139 MALLOC_DECLARE(M_IP6OPT); /* for INET6 */
140 MALLOC_DECLARE(M_IP6NDP); /* for INET6 */
141 
142 #endif /* _KERNEL */
143 
144 #ifdef _KERNEL
145 
146 #define	MINALLOCSIZE	sizeof(void *)
147 
148 struct globaldata;
149 
150 /* XXX struct malloc_type is unused for contig*(). */
151 size_t  kmem_lim_size(void);
152 void	*kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags);
153 void	kmem_slab_free(void *ptr, vm_size_t bytes);
154 
155 void	contigfree(void *addr, unsigned long size, struct malloc_type *type)
156 	    __nonnull(1);
157 void	*contigmalloc(unsigned long size, struct malloc_type *type, int flags,
158 		      vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
159 		      unsigned long boundary) __malloclike __heedresult
160 		      __alloc_size(1) __alloc_align(6);
161 void	malloc_init(void *);
162 void	malloc_uninit(void *);
163 void	malloc_kmemstats_poll(void);
164 void	malloc_mgt_init(struct malloc_type *type, struct kmalloc_mgt *mgt,
165 			size_t bytes);
166 void	malloc_mgt_uninit(struct malloc_type *type, struct kmalloc_mgt *mgt);
167 void	malloc_mgt_relocate(struct kmalloc_mgt *smgt, struct kmalloc_mgt *dmgt);
168 int	malloc_mgt_poll(struct malloc_type *type);
169 void	malloc_reinit_ncpus(void);
170 void	kmalloc_raise_limit(struct malloc_type *type, size_t bytes);
171 void	kmalloc_set_unlimited(struct malloc_type *type);
172 void	kmalloc_create(struct malloc_type **typep, const char *descr);
173 void	kmalloc_destroy(struct malloc_type **typep);
174 
175 /*
176  * NOTE: kmalloc_obj*() functiions use distinct malloc_type structures
177  *	 which should not be mixed with non-obj functions.  For this reason,
178  *	 all kmalloc_obj*() functions postpend the '_obj' to the variable
179  *	 name passed into them.  This guarantees that a programmer mistake
180  *	 will cause the compile to fail.
181  */
182 void	_kmalloc_create_obj(struct malloc_type **typep, const char *descr,
183 			size_t objsize);
184 #define kmalloc_create_obj(typep, descr, objsize)	\
185 		_kmalloc_create_obj((typep##_obj), (descr), (objsize))
186 #define kmalloc_destroy_obj(type) kmalloc_destroy((type##_obj))
187 
188 /*
189  * Debug and non-debug kmalloc() prototypes.
190  *
191  * The kmalloc() macro allows M_ZERO to be optimized external to
192  * the kmalloc() function.  When combined with the use a builtin
193  * for bzero() this can get rid of a considerable amount of overhead
194  * for M_ZERO based kmalloc() calls.
195  */
196 #ifdef SLAB_DEBUG
197 void	*_kmalloc_debug(unsigned long size, struct malloc_type *type,
198 			int flags, const char *file, int line)
199 			__malloclike __heedresult __alloc_size(1);
200 void	*_kmalloc_obj_debug(unsigned long size, struct malloc_type *type,
201 			int flags, const char *file, int line)
202 			__malloclike __heedresult __alloc_size(1);
203 void	*krealloc_debug(void *addr, unsigned long size,
204 			struct malloc_type *type, int flags,
205 			const char *file, int line) __heedresult __alloc_size(2);
206 char	*kstrdup_debug(const char *, struct malloc_type *,
207 			const char *file, int line) __malloclike __heedresult;
208 char	*kstrndup_debug(const char *, size_t maxlen, struct malloc_type *,
209 			const char *file, int line) __malloclike __heedresult;
210 
211 #define __kmalloc(size, type, flags) ({					\
212 	void *_malloc_item;						\
213 	size_t _size = (size);						\
214 									\
215 	if (__builtin_constant_p(size) &&				\
216 	    __builtin_constant_p(flags) &&				\
217 	    ((flags) & M_ZERO)) {					\
218 		_malloc_item = _kmalloc_debug(_size, type,		\
219 					    (flags) & ~M_ZERO,		\
220 					    __FILE__, __LINE__);	\
221 		if (((flags) & (M_WAITOK|M_NULLOK)) == M_WAITOK ||	\
222 		    __predict_true(_malloc_item != NULL)) {		\
223 			__builtin_memset(_malloc_item, 0, _size);	\
224 		}							\
225 	} else {							\
226 	    _malloc_item = _kmalloc_debug(_size, type, flags,		\
227 				   __FILE__, __LINE__);			\
228 	}								\
229 	_malloc_item;							\
230 })
231 
232 #define __kmalloc_obj(size, type, flags) ({				\
233 	void *_malloc_item;						\
234 	size_t _size = __VM_CACHELINE_ALIGN(size);			\
235 									\
236 	if (__builtin_constant_p(size) &&				\
237 	    __builtin_constant_p(flags) &&				\
238 	    ((flags) & M_ZERO)) {					\
239 		_malloc_item = _kmalloc_obj_debug(_size, type,		\
240 					    (flags) & ~M_ZERO,		\
241 					    __FILE__, __LINE__);	\
242 		if (((flags) & (M_WAITOK|M_NULLOK)) == M_WAITOK ||	\
243 		    __predict_true(_malloc_item != NULL)) {		\
244 			__builtin_memset(_malloc_item, 0, _size);	\
245 		}							\
246 	} else {							\
247 	    _malloc_item = _kmalloc_obj_debug(_size, type, flags,	\
248 				   __FILE__, __LINE__);			\
249 	}								\
250 	_malloc_item;							\
251 })
252 
253 #define kmalloc(size, type, flags)	__kmalloc(size, type, flags)
254 #define kmalloc_obj(size, type, flags)	__kmalloc_obj(size, type##_obj, flags)
255 
256 /*
257  * These only operate on normal mixed-size zones
258  */
259 #define krealloc(addr, size, type, flags)	\
260 	krealloc_debug(addr, size, type, flags, __FILE__, __LINE__)
261 #define kstrdup(str, type)			\
262 	kstrdup_debug(str, type, __FILE__, __LINE__)
263 #define kstrndup(str, maxlen, type)			\
264 	kstrndup_debug(str, maxlen, type, __FILE__, __LINE__)
265 
266 #else	/* !SLAB_DEBUG */
267 
268 void	*_kmalloc(unsigned long size, struct malloc_type *type, int flags)
269 		 __malloclike __heedresult __alloc_size(1);
270 void	*_kmalloc_obj(unsigned long size, struct malloc_type *type, int flags)
271 		 __malloclike __heedresult __alloc_size(1);
272 
273 static __inline __always_inline void *
274 __kmalloc(size_t _size, struct malloc_type *_type, int _flags)
275 {
276 	if (__builtin_constant_p(_size) && __builtin_constant_p(_flags) &&
277 	    (_flags & M_ZERO)) {
278 		void *_malloc_item;
279 		_malloc_item = _kmalloc(_size, _type, _flags & ~M_ZERO);
280 		if ((_flags & (M_WAITOK|M_NULLOK)) == M_WAITOK ||
281 		    __predict_true(_malloc_item != NULL)) {
282 			__builtin_memset(_malloc_item, 0, _size);
283 		}
284 		return _malloc_item;
285 	}
286 	return (_kmalloc(_size, _type, _flags));
287 }
288 
289 static __inline __always_inline void *
290 __kmalloc_obj(size_t _size, struct malloc_type *_type, int _flags)
291 {
292 	if (__builtin_constant_p(_size) && __builtin_constant_p(_flags) &&
293 	    (_flags & M_ZERO)) {
294 		void *_malloc_item;
295 		_malloc_item = _kmalloc_obj(__VM_CACHELINE_ALIGN(_size),
296 					   _type, _flags & ~M_ZERO);
297 		if ((_flags & (M_WAITOK|M_NULLOK)) == M_WAITOK ||
298 		    __predict_true(_malloc_item != NULL)) {
299 			__builtin_memset(_malloc_item, 0, _size);
300 		}
301 		return _malloc_item;
302 	}
303 	return (_kmalloc_obj(__VM_CACHELINE_ALIGN(_size), _type, _flags));
304 }
305 
306 #define kmalloc(size, type, flags)	\
307 		__kmalloc((size), type, (flags))
308 #define kmalloc_obj(size, type, flags)	\
309 		__kmalloc_obj((size), type##_obj, (flags))
310 
311 /*
312  * These only operate on normal mixed-size zones
313  */
314 void	*krealloc(void *addr, unsigned long size, struct malloc_type *type,
315 		  int flags) __heedresult __alloc_size(2);
316 char	*kstrdup(const char *, struct malloc_type *)
317 		 __malloclike __heedresult;
318 char	*kstrndup(const char *, size_t maxlen, struct malloc_type *)
319 		  __malloclike __heedresult;
320 
321 /*
322  * Just macro the debug versions over to the non-debug versions, this
323  * reduces the need for #ifdef's in kern_slaballoc.c and kern_kmalloc.c.
324  */
325 #define _kmalloc_debug(size, type, flags, file, line)		\
326 	__kmalloc((size), type, (flags))
327 #define _kmalloc_obj_debug(size, type, flags, file, line)	\
328 	__kmalloc_obj((size), type##_obj, (flags))
329 #define krealloc_debug(addr, size, type, flags, file, line)	\
330 	krealloc(addr, size, type, flags)
331 #define kstrdup_debug(str, type, file, line)			\
332 	kstrdup(str, type)
333 #define kstrndup_debug(str, maxlen, type, file, line)		\
334 	kstrndup(str, maxlen, type)
335 #endif /* SLAB_DEBUG */
336 
337 #define kmalloc_obj_raise_limit(type, bytes)	\
338 		kmalloc_raise_limit(type##_obj, bytes)
339 
340 void	_kfree(void *addr, struct malloc_type *type) __nonnull(2);
341 void	_kfree_obj(void *addr, struct malloc_type *type) __nonnull(2);
342 long	kmalloc_limit(struct malloc_type *type);
343 void	slab_cleanup(void);
344 
345 #define kfree(addr, type)	_kfree(addr, type)
346 #define kfree_obj(addr, type)	_kfree_obj(addr, type##_obj)
347 
348 #endif /* _KERNEL */
349 
350 #endif /* !_SYS_MALLOC_H_ */
351