1 /*
2 * Copyright (c) 1987, 1993, 2021
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * @(#)malloc.h 8.5 (Berkeley) 5/3/95
30 * $FreeBSD: src/sys/sys/malloc.h,v 1.48.2.2 2002/03/16 02:19:16 archie Exp $
31 */
32
33 #ifndef _SYS_MALLOC_H_
34 #define _SYS_MALLOC_H_
35
36 #ifndef _SYS_TYPES_H_
37 #include <sys/types.h>
38 #endif
39 #ifndef _MACHINE_TYPES_H_
40 #include <machine/types.h> /* vm_paddr_t and __* types */
41 #endif
42
43 /*
44 * flags to malloc.
45 */
46 #define M_RNOWAIT 0x0001 /* do not block */
47 #define M_WAITOK 0x0002 /* wait for resources / alloc from cache */
48 #define M_ZERO 0x0100 /* bzero() the allocation */
49 #define M_USE_RESERVE 0x0200 /* can eat into free list reserve */
50 #define M_NULLOK 0x0400 /* ok to return NULL */
51 #define M_PASSIVE_ZERO 0x0800 /* (internal to the slab code only) */
52 #define M_USE_INTERRUPT_RESERVE \
53 0x1000 /* can exhaust free list entirely */
54 #define M_POWEROF2 0x2000 /* roundup size to the nearest power of 2 */
55 #define M_CACHEALIGN 0x4000 /* force CPU cache line alignment */
56 /* GFP_DMA32 0x10000 reserved for drm layer (not handled by kmalloc) */
57
58 /*
59 * M_NOWAIT has to be a set of flags for equivalence to prior use.
60 *
61 * M_SYSALLOC should be used for any critical infrastructure allocations
62 * made by the kernel proper.
63 *
64 * M_INTNOWAIT should be used for any critical infrastructure allocations
65 * made by interrupts. Such allocations can still fail but will not fail
66 * as often as M_NOWAIT.
67 *
68 * NOTE ON DRAGONFLY USE OF M_NOWAIT. In FreeBSD M_NOWAIT allocations
69 * almost always succeed. In DragonFly, however, there is a good chance
70 * that an allocation will fail. M_NOWAIT should only be used when
71 * allocations can fail without any serious detriment to the system.
72 *
73 * Note that allocations made from (preempted) interrupts will attempt to
74 * use pages from the VM PAGE CACHE (PQ_CACHE) (i.e. those associated with
75 * objects). This is automatic.
76 */
77
78 #define M_INTNOWAIT (M_RNOWAIT | M_NULLOK | \
79 M_USE_RESERVE | M_USE_INTERRUPT_RESERVE)
80 #define M_SYSNOWAIT (M_RNOWAIT | M_NULLOK | M_USE_RESERVE)
81 #define M_INTWAIT (M_WAITOK | M_USE_RESERVE | M_USE_INTERRUPT_RESERVE)
82 #define M_SYSWAIT (M_WAITOK | M_USE_RESERVE)
83
84 #define M_NOWAIT (M_RNOWAIT | M_NULLOK | M_USE_RESERVE)
85 #define M_SYSALLOC M_SYSWAIT
86
87 #define M_MAGIC 877983977 /* time when first defined :-) */
88
89 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
90 #include <sys/_malloc.h> /* struct malloc_type */
91 #ifndef NULL
92 #include <sys/_null.h> /* ensure NULL is defined */
93 #endif
94 #endif
95
96 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
97 #define MALLOC_DEFINE(type, shortdesc, longdesc) \
98 struct malloc_type type[1] = { \
99 { NULL, 0, 0, 0, 0, M_MAGIC, shortdesc, 0, \
100 &type[0].ks_use0, { 0, 0, 0, 0 } } \
101 }; \
102 SYSINIT(type##_init, SI_BOOT1_KMALLOC, SI_ORDER_ANY, \
103 malloc_init, type); \
104 SYSUNINIT(type##_uninit, SI_BOOT1_KMALLOC, SI_ORDER_ANY, \
105 malloc_uninit, type)
106
107 #define MALLOC_DEFINE_OBJ(type, size, shortdesc, longdesc) \
108 struct malloc_type type##_obj[1] = { \
109 { NULL, 0, 0, 0, KSF_OBJSIZE, M_MAGIC, shortdesc, \
110 __VM_CACHELINE_ALIGN((size)), \
111 &type##_obj[0].ks_use0, { 0, 0, 0, 0 } } \
112 }; \
113 SYSINIT(type##_init, SI_BOOT1_KMALLOC, SI_ORDER_ANY, \
114 malloc_init, type##_obj); \
115 SYSUNINIT(type##_uninit, SI_BOOT1_KMALLOC, SI_ORDER_ANY, \
116 malloc_uninit, type##_obj)
117
118 #else
119 #define MALLOC_DEFINE(type, shortdesc, longdesc) \
120 struct malloc_type type[1] = { \
121 { NULL, 0, 0, 0, 0, M_MAGIC, shortdesc, 0, \
122 &type[0].ks_use0, { 0, 0, 0, 0 } \
123 }
124
125 #define MALLOC_DEFINE_OBJ(type, size, shortdesc, longdesc) \
126 struct malloc_type type##_obj[1] = { \
127 { NULL, 0, 0, 0, KSF_OBJSIZE, M_MAGIC, shortdesc, \
128 __VM_CACHELINE_ALIGN((size)), \
129 &type##_obj[0].ks_use0, { 0, 0, 0, 0 } \
130 }
131 #endif
132
133 #ifdef _KERNEL
134
135 MALLOC_DECLARE(M_CACHE);
136 MALLOC_DECLARE(M_DEVBUF);
137 MALLOC_DECLARE(M_TEMP);
138 MALLOC_DECLARE(M_FPUCTX);
139
140 MALLOC_DECLARE(M_IP6OPT); /* for INET6 */
141 MALLOC_DECLARE(M_IP6NDP); /* for INET6 */
142
143 #endif /* _KERNEL */
144
145 #ifdef _KERNEL
146
147 #define MINALLOCSIZE sizeof(void *)
148
149 struct globaldata;
150
151 /* XXX struct malloc_type is unused for contig*(). */
152 size_t kmem_lim_size(void);
153 void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags);
154 void kmem_slab_free(void *ptr, vm_size_t bytes);
155
156 void contigfree(void *addr, unsigned long size, struct malloc_type *type)
157 __nonnull(1);
158 void *contigmalloc(unsigned long size, struct malloc_type *type, int flags,
159 vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
160 unsigned long boundary) __malloclike __heedresult
161 __alloc_size(1) __alloc_align(6);
162 void malloc_init(void *);
163 void malloc_uninit(void *);
164 void malloc_kmemstats_poll(void);
165 void malloc_mgt_init(struct malloc_type *type, struct kmalloc_mgt *mgt,
166 size_t bytes);
167 void malloc_mgt_uninit(struct malloc_type *type, struct kmalloc_mgt *mgt);
168 void malloc_mgt_relocate(struct kmalloc_mgt *smgt, struct kmalloc_mgt *dmgt);
169 int malloc_mgt_poll(struct malloc_type *type);
170 void malloc_reinit_ncpus(void);
171 void kmalloc_raise_limit(struct malloc_type *type, size_t bytes);
172 void kmalloc_set_unlimited(struct malloc_type *type);
173 void kmalloc_create(struct malloc_type **typep, const char *descr);
174 void kmalloc_destroy(struct malloc_type **typep);
175
176 /*
177 * NOTE: kmalloc_obj*() functiions use distinct malloc_type structures
178 * which should not be mixed with non-obj functions. For this reason,
179 * all kmalloc_obj*() functions postpend the '_obj' to the variable
180 * name passed into them. This guarantees that a programmer mistake
181 * will cause the compile to fail.
182 */
183 void _kmalloc_create_obj(struct malloc_type **typep, const char *descr,
184 size_t objsize);
185 #define kmalloc_create_obj(typep, descr, objsize) \
186 _kmalloc_create_obj((typep##_obj), (descr), (objsize))
187 #define kmalloc_destroy_obj(type) kmalloc_destroy((type##_obj))
188
189 /*
190 * Debug and non-debug kmalloc() prototypes.
191 *
192 * The kmalloc() macro allows M_ZERO to be optimized external to
193 * the kmalloc() function. When combined with the use a builtin
194 * for bzero() this can get rid of a considerable amount of overhead
195 * for M_ZERO based kmalloc() calls.
196 */
197 #ifdef SLAB_DEBUG
198 void *_kmalloc_debug(unsigned long size, struct malloc_type *type,
199 int flags, const char *file, int line)
200 __malloclike __heedresult __alloc_size(1);
201 void *_kmalloc_obj_debug(unsigned long size, struct malloc_type *type,
202 int flags, const char *file, int line)
203 __malloclike __heedresult __alloc_size(1);
204 void *krealloc_debug(void *addr, unsigned long size,
205 struct malloc_type *type, int flags,
206 const char *file, int line) __heedresult __alloc_size(2);
207 char *kstrdup_debug(const char *, struct malloc_type *,
208 const char *file, int line) __malloclike __heedresult;
209 char *kstrndup_debug(const char *, size_t maxlen, struct malloc_type *,
210 const char *file, int line) __malloclike __heedresult;
211
212 #define __kmalloc(size, type, flags) ({ \
213 void *_malloc_item; \
214 size_t _size = (size); \
215 \
216 if (__builtin_constant_p(size) && \
217 __builtin_constant_p(flags) && \
218 ((flags) & M_ZERO)) { \
219 _malloc_item = _kmalloc_debug(_size, type, \
220 (flags) & ~M_ZERO, \
221 __FILE__, __LINE__); \
222 if (((flags) & (M_WAITOK|M_NULLOK)) == M_WAITOK || \
223 __predict_true(_malloc_item != NULL)) { \
224 __builtin_memset(_malloc_item, 0, _size); \
225 } \
226 } else { \
227 _malloc_item = _kmalloc_debug(_size, type, flags, \
228 __FILE__, __LINE__); \
229 } \
230 _malloc_item; \
231 })
232
233 #define __kmalloc_obj(size, type, flags) ({ \
234 void *_malloc_item; \
235 size_t _size = __VM_CACHELINE_ALIGN(size); \
236 \
237 if (__builtin_constant_p(size) && \
238 __builtin_constant_p(flags) && \
239 ((flags) & M_ZERO)) { \
240 _malloc_item = _kmalloc_obj_debug(_size, type, \
241 (flags) & ~M_ZERO, \
242 __FILE__, __LINE__); \
243 if (((flags) & (M_WAITOK|M_NULLOK)) == M_WAITOK || \
244 __predict_true(_malloc_item != NULL)) { \
245 __builtin_memset(_malloc_item, 0, _size); \
246 } \
247 } else { \
248 _malloc_item = _kmalloc_obj_debug(_size, type, flags, \
249 __FILE__, __LINE__); \
250 } \
251 _malloc_item; \
252 })
253
254 #define kmalloc(size, type, flags) __kmalloc(size, type, flags)
255 #define kmalloc_obj(size, type, flags) __kmalloc_obj(size, type##_obj, flags)
256
257 /*
258 * These only operate on normal mixed-size zones
259 */
260 #define krealloc(addr, size, type, flags) \
261 krealloc_debug(addr, size, type, flags, __FILE__, __LINE__)
262 #define kstrdup(str, type) \
263 kstrdup_debug(str, type, __FILE__, __LINE__)
264 #define kstrndup(str, maxlen, type) \
265 kstrndup_debug(str, maxlen, type, __FILE__, __LINE__)
266
267 #else /* !SLAB_DEBUG */
268
269 void *_kmalloc(unsigned long size, struct malloc_type *type, int flags)
270 __malloclike __heedresult __alloc_size(1);
271 void *_kmalloc_obj(unsigned long size, struct malloc_type *type, int flags)
272 __malloclike __heedresult __alloc_size(1);
273
274 static __inline __always_inline void *
__kmalloc(size_t _size,struct malloc_type * _type,int _flags)275 __kmalloc(size_t _size, struct malloc_type *_type, int _flags)
276 {
277 if (__builtin_constant_p(_size) && __builtin_constant_p(_flags) &&
278 (_flags & M_ZERO)) {
279 void *_malloc_item;
280 _malloc_item = _kmalloc(_size, _type, _flags & ~M_ZERO);
281 if ((_flags & (M_WAITOK|M_NULLOK)) == M_WAITOK ||
282 __predict_true(_malloc_item != NULL)) {
283 __builtin_memset(_malloc_item, 0, _size);
284 }
285 return _malloc_item;
286 }
287 return (_kmalloc(_size, _type, _flags));
288 }
289
290 static __inline __always_inline void *
__kmalloc_obj(size_t _size,struct malloc_type * _type,int _flags)291 __kmalloc_obj(size_t _size, struct malloc_type *_type, int _flags)
292 {
293 if (__builtin_constant_p(_size) && __builtin_constant_p(_flags) &&
294 (_flags & M_ZERO)) {
295 void *_malloc_item;
296 _malloc_item = _kmalloc_obj(__VM_CACHELINE_ALIGN(_size),
297 _type, _flags & ~M_ZERO);
298 if ((_flags & (M_WAITOK|M_NULLOK)) == M_WAITOK ||
299 __predict_true(_malloc_item != NULL)) {
300 __builtin_memset(_malloc_item, 0, _size);
301 }
302 return _malloc_item;
303 }
304 return (_kmalloc_obj(__VM_CACHELINE_ALIGN(_size), _type, _flags));
305 }
306
307 #define kmalloc(size, type, flags) \
308 __kmalloc((size), type, (flags))
309 #define kmalloc_obj(size, type, flags) \
310 __kmalloc_obj((size), type##_obj, (flags))
311
312 /*
313 * These only operate on normal mixed-size zones
314 */
315 void *krealloc(void *addr, unsigned long size, struct malloc_type *type,
316 int flags) __heedresult __alloc_size(2);
317 char *kstrdup(const char *, struct malloc_type *)
318 __malloclike __heedresult;
319 char *kstrndup(const char *, size_t maxlen, struct malloc_type *)
320 __malloclike __heedresult;
321
322 /*
323 * Just macro the debug versions over to the non-debug versions, this
324 * reduces the need for #ifdef's in kern_slaballoc.c and kern_kmalloc.c.
325 */
326 #define _kmalloc_debug(size, type, flags, file, line) \
327 __kmalloc((size), type, (flags))
328 #define _kmalloc_obj_debug(size, type, flags, file, line) \
329 __kmalloc_obj((size), type##_obj, (flags))
330 #define krealloc_debug(addr, size, type, flags, file, line) \
331 krealloc(addr, size, type, flags)
332 #define kstrdup_debug(str, type, file, line) \
333 kstrdup(str, type)
334 #define kstrndup_debug(str, maxlen, type, file, line) \
335 kstrndup(str, maxlen, type)
336 #endif /* SLAB_DEBUG */
337
338 #define kmalloc_obj_raise_limit(type, bytes) \
339 kmalloc_raise_limit(type##_obj, bytes)
340 #define kmalloc_obj_set_unlimited(type) \
341 kmalloc_set_unlimited(type##_obj)
342
343 void _kfree(void *addr, struct malloc_type *type) __nonnull(2);
344 void _kfree_obj(void *addr, struct malloc_type *type) __nonnull(2);
345 size_t kmalloc_usable_size(const void *ptr);
346 long kmalloc_limit(struct malloc_type *type);
347 void slab_cleanup(void);
348
349 #define kfree(addr, type) _kfree(addr, type)
350 #define kfree_obj(addr, type) _kfree_obj(addr, type##_obj)
351
352 #endif /* _KERNEL */
353
354 #endif /* !_SYS_MALLOC_H_ */
355