1 #ifndef JEMALLOC_INTERNAL_INLINES_C_H
2 #define JEMALLOC_INTERNAL_INLINES_C_H
3 
4 #include "jemalloc/internal/hook.h"
5 #include "jemalloc/internal/jemalloc_internal_types.h"
6 #include "jemalloc/internal/sz.h"
7 #include "jemalloc/internal/witness.h"
8 
9 /*
10  * Translating the names of the 'i' functions:
11  *   Abbreviations used in the first part of the function name (before
12  *   alloc/dalloc) describe what that function accomplishes:
13  *     a: arena (query)
14  *     s: size (query, or sized deallocation)
15  *     e: extent (query)
16  *     p: aligned (allocates)
17  *     vs: size (query, without knowing that the pointer is into the heap)
18  *     r: rallocx implementation
19  *     x: xallocx implementation
20  *   Abbreviations used in the second part of the function name (after
21  *   alloc/dalloc) describe the arguments it takes
22  *     z: whether to return zeroed memory
23  *     t: accepts a tcache_t * parameter
24  *     m: accepts an arena_t * parameter
25  */
26 
27 JEMALLOC_ALWAYS_INLINE arena_t *
iaalloc(tsdn_t * tsdn,const void * ptr)28 iaalloc(tsdn_t *tsdn, const void *ptr) {
29 	assert(ptr != NULL);
30 
31 	return arena_aalloc(tsdn, ptr);
32 }
33 
34 JEMALLOC_ALWAYS_INLINE size_t
isalloc(tsdn_t * tsdn,const void * ptr)35 isalloc(tsdn_t *tsdn, const void *ptr) {
36 	assert(ptr != NULL);
37 
38 	return arena_salloc(tsdn, ptr);
39 }
40 
41 JEMALLOC_ALWAYS_INLINE void *
iallocztm(tsdn_t * tsdn,size_t size,szind_t ind,bool zero,tcache_t * tcache,bool is_internal,arena_t * arena,bool slow_path)42 iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
43     bool is_internal, arena_t *arena, bool slow_path) {
44 	void *ret;
45 
46 	assert(!is_internal || tcache == NULL);
47 	assert(!is_internal || arena == NULL || arena_is_auto(arena));
48 	if (!tsdn_null(tsdn) && tsd_reentrancy_level_get(tsdn_tsd(tsdn)) == 0) {
49 		witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
50 		    WITNESS_RANK_CORE, 0);
51 	}
52 
53 	ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
54 	if (config_stats && is_internal && likely(ret != NULL)) {
55 		arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret));
56 	}
57 	return ret;
58 }
59 
60 JEMALLOC_ALWAYS_INLINE void *
ialloc(tsd_t * tsd,size_t size,szind_t ind,bool zero,bool slow_path)61 ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) {
62 	return iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd), false,
63 	    NULL, slow_path);
64 }
65 
66 JEMALLOC_ALWAYS_INLINE void *
ipallocztm(tsdn_t * tsdn,size_t usize,size_t alignment,bool zero,tcache_t * tcache,bool is_internal,arena_t * arena)67 ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
68     tcache_t *tcache, bool is_internal, arena_t *arena) {
69 	void *ret;
70 
71 	assert(usize != 0);
72 	assert(usize == sz_sa2u(usize, alignment));
73 	assert(!is_internal || tcache == NULL);
74 	assert(!is_internal || arena == NULL || arena_is_auto(arena));
75 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
76 	    WITNESS_RANK_CORE, 0);
77 
78 	ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache);
79 	assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
80 	if (config_stats && is_internal && likely(ret != NULL)) {
81 		arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret));
82 	}
83 	return ret;
84 }
85 
86 JEMALLOC_ALWAYS_INLINE void *
ipalloct(tsdn_t * tsdn,size_t usize,size_t alignment,bool zero,tcache_t * tcache,arena_t * arena)87 ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
88     tcache_t *tcache, arena_t *arena) {
89 	return ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena);
90 }
91 
92 JEMALLOC_ALWAYS_INLINE void *
ipalloc(tsd_t * tsd,size_t usize,size_t alignment,bool zero)93 ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) {
94 	return ipallocztm(tsd_tsdn(tsd), usize, alignment, zero,
95 	    tcache_get(tsd), false, NULL);
96 }
97 
98 JEMALLOC_ALWAYS_INLINE size_t
ivsalloc(tsdn_t * tsdn,const void * ptr)99 ivsalloc(tsdn_t *tsdn, const void *ptr) {
100 	return arena_vsalloc(tsdn, ptr);
101 }
102 
103 JEMALLOC_ALWAYS_INLINE void
idalloctm(tsdn_t * tsdn,void * ptr,tcache_t * tcache,alloc_ctx_t * alloc_ctx,bool is_internal,bool slow_path)104 idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, alloc_ctx_t *alloc_ctx,
105     bool is_internal, bool slow_path) {
106 	assert(ptr != NULL);
107 	assert(!is_internal || tcache == NULL);
108 	assert(!is_internal || arena_is_auto(iaalloc(tsdn, ptr)));
109 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
110 	    WITNESS_RANK_CORE, 0);
111 	if (config_stats && is_internal) {
112 		arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, ptr));
113 	}
114 	if (!is_internal && !tsdn_null(tsdn) &&
115 	    tsd_reentrancy_level_get(tsdn_tsd(tsdn)) != 0) {
116 		assert(tcache == NULL);
117 	}
118 	arena_dalloc(tsdn, ptr, tcache, alloc_ctx, slow_path);
119 }
120 
121 JEMALLOC_ALWAYS_INLINE void
idalloc(tsd_t * tsd,void * ptr)122 idalloc(tsd_t *tsd, void *ptr) {
123 	idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd), NULL, false, true);
124 }
125 
126 JEMALLOC_ALWAYS_INLINE void
isdalloct(tsdn_t * tsdn,void * ptr,size_t size,tcache_t * tcache,alloc_ctx_t * alloc_ctx,bool slow_path)127 isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
128     alloc_ctx_t *alloc_ctx, bool slow_path) {
129 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
130 	    WITNESS_RANK_CORE, 0);
131 	arena_sdalloc(tsdn, ptr, size, tcache, alloc_ctx, slow_path);
132 }
133 
134 JEMALLOC_ALWAYS_INLINE void *
iralloct_realign(tsdn_t * tsdn,void * ptr,size_t oldsize,size_t size,size_t alignment,bool zero,tcache_t * tcache,arena_t * arena,hook_ralloc_args_t * hook_args)135 iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
136     size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
137     hook_ralloc_args_t *hook_args) {
138 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
139 	    WITNESS_RANK_CORE, 0);
140 	void *p;
141 	size_t usize, copysize;
142 
143 	usize = sz_sa2u(size, alignment);
144 	if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
145 		return NULL;
146 	}
147 	p = ipalloct(tsdn, usize, alignment, zero, tcache, arena);
148 	if (p == NULL) {
149 		return NULL;
150 	}
151 	/*
152 	 * Copy at most size bytes (not size+extra), since the caller has no
153 	 * expectation that the extra bytes will be reliably preserved.
154 	 */
155 	copysize = (size < oldsize) ? size : oldsize;
156 	memcpy(p, ptr, copysize);
157 	hook_invoke_alloc(hook_args->is_realloc
158 	    ? hook_alloc_realloc : hook_alloc_rallocx, p, (uintptr_t)p,
159 	    hook_args->args);
160 	hook_invoke_dalloc(hook_args->is_realloc
161 	    ? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args);
162 	isdalloct(tsdn, ptr, oldsize, tcache, NULL, true);
163 	return p;
164 }
165 
166 /*
167  * is_realloc threads through the knowledge of whether or not this call comes
168  * from je_realloc (as opposed to je_rallocx); this ensures that we pass the
169  * correct entry point into any hooks.
170  * Note that these functions are all force-inlined, so no actual bool gets
171  * passed-around anywhere.
172  */
173 JEMALLOC_ALWAYS_INLINE void *
iralloct(tsdn_t * tsdn,void * ptr,size_t oldsize,size_t size,size_t alignment,bool zero,tcache_t * tcache,arena_t * arena,hook_ralloc_args_t * hook_args)174 iralloct(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t alignment,
175     bool zero, tcache_t *tcache, arena_t *arena, hook_ralloc_args_t *hook_args)
176 {
177 	assert(ptr != NULL);
178 	assert(size != 0);
179 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
180 	    WITNESS_RANK_CORE, 0);
181 
182 	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
183 	    != 0) {
184 		/*
185 		 * Existing object alignment is inadequate; allocate new space
186 		 * and copy.
187 		 */
188 		return iralloct_realign(tsdn, ptr, oldsize, size, alignment,
189 		    zero, tcache, arena, hook_args);
190 	}
191 
192 	return arena_ralloc(tsdn, arena, ptr, oldsize, size, alignment, zero,
193 	    tcache, hook_args);
194 }
195 
196 JEMALLOC_ALWAYS_INLINE void *
iralloc(tsd_t * tsd,void * ptr,size_t oldsize,size_t size,size_t alignment,bool zero,hook_ralloc_args_t * hook_args)197 iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
198     bool zero, hook_ralloc_args_t *hook_args) {
199 	return iralloct(tsd_tsdn(tsd), ptr, oldsize, size, alignment, zero,
200 	    tcache_get(tsd), NULL, hook_args);
201 }
202 
203 JEMALLOC_ALWAYS_INLINE bool
ixalloc(tsdn_t * tsdn,void * ptr,size_t oldsize,size_t size,size_t extra,size_t alignment,bool zero,size_t * newsize)204 ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
205     size_t alignment, bool zero, size_t *newsize) {
206 	assert(ptr != NULL);
207 	assert(size != 0);
208 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
209 	    WITNESS_RANK_CORE, 0);
210 
211 	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
212 	    != 0) {
213 		/* Existing object alignment is inadequate. */
214 		*newsize = oldsize;
215 		return true;
216 	}
217 
218 	return arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero,
219 	    newsize);
220 }
221 
222 #endif /* JEMALLOC_INTERNAL_INLINES_C_H */
223