1 #ifndef JEMALLOC_INTERNAL_TYPES_H
2 #define JEMALLOC_INTERNAL_TYPES_H
3 
4 /* Page size index type. */
5 typedef unsigned pszind_t;
6 
7 /* Size class index type. */
8 typedef unsigned szind_t;
9 
10 /* Processor / core id type. */
11 typedef int malloc_cpuid_t;
12 
13 /*
14  * Flags bits:
15  *
16  * a: arena
17  * t: tcache
18  * 0: unused
19  * z: zero
20  * n: alignment
21  *
22  * aaaaaaaa aaaatttt tttttttt 0znnnnnn
23  */
24 #define MALLOCX_ARENA_BITS	12
25 #define MALLOCX_TCACHE_BITS	12
26 #define MALLOCX_LG_ALIGN_BITS	6
27 #define MALLOCX_ARENA_SHIFT	20
28 #define MALLOCX_TCACHE_SHIFT	8
29 #define MALLOCX_ARENA_MASK \
30     (((1 << MALLOCX_ARENA_BITS) - 1) << MALLOCX_ARENA_SHIFT)
31 /* NB: Arena index bias decreases the maximum number of arenas by 1. */
32 #define MALLOCX_ARENA_LIMIT	((1 << MALLOCX_ARENA_BITS) - 1)
33 #define MALLOCX_TCACHE_MASK \
34     (((1 << MALLOCX_TCACHE_BITS) - 1) << MALLOCX_TCACHE_SHIFT)
35 #define MALLOCX_TCACHE_MAX	((1 << MALLOCX_TCACHE_BITS) - 3)
36 #define MALLOCX_LG_ALIGN_MASK	((1 << MALLOCX_LG_ALIGN_BITS) - 1)
37 /* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
38 #define MALLOCX_ALIGN_GET_SPECIFIED(flags)				\
39     (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
40 #define MALLOCX_ALIGN_GET(flags)					\
41     (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
42 #define MALLOCX_ZERO_GET(flags)						\
43     ((bool)(flags & MALLOCX_ZERO))
44 
45 #define MALLOCX_TCACHE_GET(flags)					\
46     (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> MALLOCX_TCACHE_SHIFT)) - 2)
47 #define MALLOCX_ARENA_GET(flags)					\
48     (((unsigned)(((unsigned)flags) >> MALLOCX_ARENA_SHIFT)) - 1)
49 
50 /* Smallest size class to support. */
51 #define TINY_MIN		(1U << LG_TINY_MIN)
52 
53 /*
54  * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
55  * classes).
56  */
57 #ifndef LG_QUANTUM
58 #  if (defined(__i386__) || defined(_M_IX86))
59 #    define LG_QUANTUM		4
60 #  endif
61 #  ifdef __ia64__
62 #    define LG_QUANTUM		4
63 #  endif
64 #  ifdef __alpha__
65 #    define LG_QUANTUM		4
66 #  endif
67 #  if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__))
68 #    define LG_QUANTUM		4
69 #  endif
70 #  if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
71 #    define LG_QUANTUM		4
72 #  endif
73 #  ifdef __arm__
74 #    define LG_QUANTUM		3
75 #  endif
76 #  ifdef __aarch64__
77 #    define LG_QUANTUM		4
78 #  endif
79 #  ifdef __hppa__
80 #    define LG_QUANTUM		4
81 #  endif
82 #  ifdef __m68k__
83 #    define LG_QUANTUM		3
84 #  endif
85 #  ifdef __mips__
86 #    define LG_QUANTUM		3
87 #  endif
88 #  ifdef __nios2__
89 #    define LG_QUANTUM		3
90 #  endif
91 #  ifdef __or1k__
92 #    define LG_QUANTUM		3
93 #  endif
94 #  ifdef __powerpc__
95 #    define LG_QUANTUM		4
96 #  endif
97 #  if defined(__riscv) || defined(__riscv__)
98 #    define LG_QUANTUM		4
99 #  endif
100 #  ifdef __s390__
101 #    define LG_QUANTUM		4
102 #  endif
103 #  if (defined (__SH3E__) || defined(__SH4_SINGLE__) || defined(__SH4__) || \
104 	defined(__SH4_SINGLE_ONLY__))
105 #    define LG_QUANTUM		4
106 #  endif
107 #  ifdef __tile__
108 #    define LG_QUANTUM		4
109 #  endif
110 #  ifdef __le32__
111 #    define LG_QUANTUM		4
112 #  endif
113 #  ifndef LG_QUANTUM
114 #    error "Unknown minimum alignment for architecture; specify via "
115 	 "--with-lg-quantum"
116 #  endif
117 #endif
118 
119 #define QUANTUM			((size_t)(1U << LG_QUANTUM))
120 #define QUANTUM_MASK		(QUANTUM - 1)
121 
122 /* Return the smallest quantum multiple that is >= a. */
123 #define QUANTUM_CEILING(a)						\
124 	(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
125 
126 #define LONG			((size_t)(1U << LG_SIZEOF_LONG))
127 #define LONG_MASK		(LONG - 1)
128 
129 /* Return the smallest long multiple that is >= a. */
130 #define LONG_CEILING(a)							\
131 	(((a) + LONG_MASK) & ~LONG_MASK)
132 
133 #define SIZEOF_PTR		(1U << LG_SIZEOF_PTR)
134 #define PTR_MASK		(SIZEOF_PTR - 1)
135 
136 /* Return the smallest (void *) multiple that is >= a. */
137 #define PTR_CEILING(a)							\
138 	(((a) + PTR_MASK) & ~PTR_MASK)
139 
140 /*
141  * Maximum size of L1 cache line.  This is used to avoid cache line aliasing.
142  * In addition, this controls the spacing of cacheline-spaced size classes.
143  *
144  * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
145  * only handle raw constants.
146  */
147 #define LG_CACHELINE		6
148 #define CACHELINE		64
149 #define CACHELINE_MASK		(CACHELINE - 1)
150 
151 /* Return the smallest cacheline multiple that is >= s. */
152 #define CACHELINE_CEILING(s)						\
153 	(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
154 
155 /* Return the nearest aligned address at or below a. */
156 #define ALIGNMENT_ADDR2BASE(a, alignment)				\
157 	((void *)((uintptr_t)(a) & ((~(alignment)) + 1)))
158 
159 /* Return the offset between a and the nearest aligned address at or below a. */
160 #define ALIGNMENT_ADDR2OFFSET(a, alignment)				\
161 	((size_t)((uintptr_t)(a) & (alignment - 1)))
162 
163 /* Return the smallest alignment multiple that is >= s. */
164 #define ALIGNMENT_CEILING(s, alignment)					\
165 	(((s) + (alignment - 1)) & ((~(alignment)) + 1))
166 
167 /* Declare a variable-length array. */
168 #if __STDC_VERSION__ < 199901L
169 #  ifdef _MSC_VER
170 #    include <malloc.h>
171 #    define alloca _alloca
172 #  else
173 #    ifdef JEMALLOC_HAS_ALLOCA_H
174 #      include <alloca.h>
175 #    else
176 #      include <stdlib.h>
177 #    endif
178 #  endif
179 #  define VARIABLE_ARRAY(type, name, count) \
180 	type *name = alloca(sizeof(type) * (count))
181 #else
182 #  define VARIABLE_ARRAY(type, name, count) type name[(count)]
183 #endif
184 
185 #endif /* JEMALLOC_INTERNAL_TYPES_H */
186