1 /*
2  *	UCW Library -- Memory Pools (One-Time Allocation)
3  *
4  *	(c) 1997--2001 Martin Mares <mj@ucw.cz>
5  *	(c) 2007 Pavel Charvat <pchar@ucw.cz>
6  *	(c) 2015, 2017 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
7  *
8  *	This software may be freely distributed and used according to the terms
9  *	of the GNU Lesser General Public License.
10  */
11 
12 #undef LOCAL_DEBUG
13 
14 #include <string.h>
15 #include <strings.h>
16 #include <stdlib.h>
17 #include <stdio.h>
18 #include <assert.h>
19 #include "contrib/asan.h"
20 #include "contrib/macros.h"
21 #include "contrib/ucw/mempool.h"
22 
23 /** \todo This shouldn't be precalculated, but computed on load. */
24 #define CPU_PAGE_SIZE 4096
25 
26 /** Align an integer \p s to the nearest higher multiple of \p a (which should be a power of two) **/
27 #define ALIGN_TO(s, a) (((s)+a-1)&~(a-1))
28 #define MP_CHUNK_TAIL ALIGN_TO(sizeof(struct mempool_chunk), CPU_STRUCT_ALIGN)
29 #define MP_SIZE_MAX (~0U - MP_CHUNK_TAIL - CPU_PAGE_SIZE)
30 #define DBG(s, ...)
31 
32 /** \note Imported MMAP backend from bigalloc.c */
33 #define CONFIG_UCW_POOL_IS_MMAP
34 #ifdef CONFIG_UCW_POOL_IS_MMAP
35 #include <sys/mman.h>
36 static void *
page_alloc(uint64_t len)37 page_alloc(uint64_t len)
38 {
39 	if (!len) {
40 		return NULL;
41 	}
42 	if (len > SIZE_MAX) {
43 		return NULL;
44 	}
45 	assert(!(len & (CPU_PAGE_SIZE-1)));
46 	uint8_t *p = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
47 	if (p == (uint8_t*) MAP_FAILED) {
48 		return NULL;
49 	}
50 	return p;
51 }
52 
53 static void
page_free(void * start,uint64_t len)54 page_free(void *start, uint64_t len)
55 {
56 	assert(!(len & (CPU_PAGE_SIZE-1)));
57 	assert(!((uintptr_t) start & (CPU_PAGE_SIZE-1)));
58 	munmap(start, len);
59 }
60 #endif
61 
62 struct mempool_chunk {
63 	struct mempool_chunk *next;
64 	unsigned size;
65 };
66 
67 static unsigned
mp_align_size(unsigned size)68 mp_align_size(unsigned size)
69 {
70 #ifdef CONFIG_UCW_POOL_IS_MMAP
71 	return ALIGN_TO(size + MP_CHUNK_TAIL, CPU_PAGE_SIZE) - MP_CHUNK_TAIL;
72 #else
73 	return ALIGN_TO(size, CPU_STRUCT_ALIGN);
74 #endif
75 }
76 
77 void
mp_init(struct mempool * pool,unsigned chunk_size)78 mp_init(struct mempool *pool, unsigned chunk_size)
79 {
80 	chunk_size = mp_align_size(MAX(sizeof(struct mempool), chunk_size));
81 	*pool = (struct mempool) {
82 		.chunk_size = chunk_size,
83 		.threshold = chunk_size >> 1,
84 		.last_big = &pool->last_big
85 	};
86 }
87 
88 static void *
mp_new_big_chunk(unsigned size)89 mp_new_big_chunk(unsigned size)
90 {
91 	uint8_t *data = malloc(size + MP_CHUNK_TAIL);
92 	if (!data) {
93 		return NULL;
94 	}
95 	ASAN_POISON_MEMORY_REGION(data, size);
96 	struct mempool_chunk *chunk = (struct mempool_chunk *)(data + size);
97 	chunk->size = size;
98 	return chunk;
99 }
100 
101 static void
mp_free_big_chunk(struct mempool_chunk * chunk)102 mp_free_big_chunk(struct mempool_chunk *chunk)
103 {
104 	void *ptr = (uint8_t *)chunk - chunk->size;
105 	ASAN_UNPOISON_MEMORY_REGION(ptr, chunk->size);
106 	free(ptr);
107 }
108 
109 static void *
mp_new_chunk(unsigned size)110 mp_new_chunk(unsigned size)
111 {
112 #ifdef CONFIG_UCW_POOL_IS_MMAP
113 	uint8_t *data = page_alloc(size + MP_CHUNK_TAIL);
114 	if (!data) {
115 		return NULL;
116 	}
117 	ASAN_POISON_MEMORY_REGION(data, size);
118 	struct mempool_chunk *chunk = (struct mempool_chunk *)(data + size);
119 	chunk->size = size;
120 	return chunk;
121 #else
122 	return mp_new_big_chunk(size);
123 #endif
124 }
125 
126 static void
mp_free_chunk(struct mempool_chunk * chunk)127 mp_free_chunk(struct mempool_chunk *chunk)
128 {
129 #ifdef CONFIG_UCW_POOL_IS_MMAP
130 	uint8_t *data = (uint8_t *)chunk - chunk->size;
131 	ASAN_UNPOISON_MEMORY_REGION(data, chunk->size);
132 	page_free(data, chunk->size + MP_CHUNK_TAIL);
133 #else
134 	mp_free_big_chunk(chunk);
135 #endif
136 }
137 
138 struct mempool *
mp_new(unsigned chunk_size)139 mp_new(unsigned chunk_size)
140 {
141 	chunk_size = mp_align_size(MAX(sizeof(struct mempool), chunk_size));
142 	struct mempool_chunk *chunk = mp_new_chunk(chunk_size);
143 	struct mempool *pool = (void *)chunk - chunk_size;
144 	ASAN_UNPOISON_MEMORY_REGION(pool, sizeof(*pool));
145 	DBG("Creating mempool %p with %u bytes long chunks", pool, chunk_size);
146 	chunk->next = NULL;
147 	ASAN_POISON_MEMORY_REGION(chunk, sizeof(struct mempool_chunk));
148 	*pool = (struct mempool) {
149 		.state = { .free = { chunk_size - sizeof(*pool) }, .last = { chunk } },
150 		.chunk_size = chunk_size,
151 		.threshold = chunk_size >> 1,
152 		.last_big = &pool->last_big
153 	};
154 	return pool;
155 }
156 
157 static void
mp_free_chain(struct mempool_chunk * chunk)158 mp_free_chain(struct mempool_chunk *chunk)
159 {
160 	while (chunk) {
161 		ASAN_UNPOISON_MEMORY_REGION(chunk, sizeof(struct mempool_chunk));
162 		struct mempool_chunk *next = chunk->next;
163 		mp_free_chunk(chunk);
164 		chunk = next;
165 	}
166 }
167 
168 static void
mp_free_big_chain(struct mempool_chunk * chunk)169 mp_free_big_chain(struct mempool_chunk *chunk)
170 {
171 	while (chunk) {
172 		ASAN_UNPOISON_MEMORY_REGION(chunk, sizeof(struct mempool_chunk));
173 		struct mempool_chunk *next = chunk->next;
174 		mp_free_big_chunk(chunk);
175 		chunk = next;
176 	}
177 }
178 
179 void
mp_delete(struct mempool * pool)180 mp_delete(struct mempool *pool)
181 {
182 	if (pool == NULL) {
183 		return;
184 	}
185 	DBG("Deleting mempool %p", pool);
186 	mp_free_big_chain(pool->state.last[1]);
187 	mp_free_chain(pool->unused);
188 	mp_free_chain(pool->state.last[0]); // can contain the mempool structure
189 }
190 
191 void
mp_flush(struct mempool * pool)192 mp_flush(struct mempool *pool)
193 {
194 	mp_free_big_chain(pool->state.last[1]);
195 	struct mempool_chunk *chunk = pool->state.last[0], *next;
196 	while (chunk) {
197 		ASAN_UNPOISON_MEMORY_REGION(chunk, sizeof(struct mempool_chunk));
198 		if ((uint8_t *)chunk - chunk->size == (uint8_t *)pool) {
199 			break;
200 		}
201 		next = chunk->next;
202 		chunk->next = pool->unused;
203 		ASAN_POISON_MEMORY_REGION(chunk, sizeof(struct mempool_chunk));
204 		pool->unused = chunk;
205 		chunk = next;
206 	}
207 	pool->state.last[0] = chunk;
208 	if (chunk) {
209 		pool->state.free[0] = chunk->size - sizeof(*pool);
210 		ASAN_POISON_MEMORY_REGION(chunk, sizeof(struct mempool_chunk));
211 	} else {
212 		pool->state.free[0] = 0;
213 	}
214 	pool->state.last[1] = NULL;
215 	pool->state.free[1] = 0;
216 	pool->last_big = &pool->last_big;
217 }
218 
219 static void
mp_stats_chain(struct mempool_chunk * chunk,struct mempool_stats * stats,unsigned idx)220 mp_stats_chain(struct mempool_chunk *chunk, struct mempool_stats *stats, unsigned idx)
221 {
222 	struct mempool_chunk *next;
223 	while (chunk) {
224 		ASAN_UNPOISON_MEMORY_REGION(chunk, sizeof(struct mempool_chunk));
225 		stats->chain_size[idx] += chunk->size + sizeof(*chunk);
226 		stats->chain_count[idx]++;
227 		next = chunk->next;
228 		ASAN_POISON_MEMORY_REGION(chunk, sizeof(struct mempool_chunk));
229 		chunk = next;
230 	}
231 	stats->total_size += stats->chain_size[idx];
232 }
233 
234 void
mp_stats(struct mempool * pool,struct mempool_stats * stats)235 mp_stats(struct mempool *pool, struct mempool_stats *stats)
236 {
237 	bzero(stats, sizeof(*stats));
238 	mp_stats_chain(pool->state.last[0], stats, 0);
239 	mp_stats_chain(pool->state.last[1], stats, 1);
240 	mp_stats_chain(pool->unused, stats, 2);
241 }
242 
243 uint64_t
mp_total_size(struct mempool * pool)244 mp_total_size(struct mempool *pool)
245 {
246 	struct mempool_stats stats;
247 	mp_stats(pool, &stats);
248 	return stats.total_size;
249 }
250 
251 static void *
mp_alloc_internal(struct mempool * pool,unsigned size)252 mp_alloc_internal(struct mempool *pool, unsigned size)
253 {
254 	struct mempool_chunk *chunk;
255 	if (size <= pool->threshold) {
256 		pool->idx = 0;
257 		if (pool->unused) {
258 			chunk = pool->unused;
259 			ASAN_UNPOISON_MEMORY_REGION(chunk, sizeof(struct mempool_chunk));
260 			pool->unused = chunk->next;
261 		} else {
262 			chunk = mp_new_chunk(pool->chunk_size);
263 		}
264 		chunk->next = pool->state.last[0];
265 		ASAN_POISON_MEMORY_REGION(chunk, sizeof(struct mempool_chunk));
266 		pool->state.last[0] = chunk;
267 		pool->state.free[0] = pool->chunk_size - size;
268 		return (uint8_t *)chunk - pool->chunk_size;
269 	} else if (size <= MP_SIZE_MAX) {
270 		pool->idx = 1;
271 		unsigned aligned = ALIGN_TO(size, CPU_STRUCT_ALIGN);
272 		chunk = mp_new_big_chunk(aligned);
273 		if (!chunk) {
274 			return NULL;
275 		}
276 		chunk->next = pool->state.last[1];
277 		ASAN_POISON_MEMORY_REGION(chunk, sizeof(struct mempool_chunk));
278 		pool->state.last[1] = chunk;
279 		pool->state.free[1] = aligned - size;
280 		return pool->last_big = (uint8_t *)chunk - aligned;
281 	} else {
282 		fprintf(stderr, "Cannot allocate %u bytes from a mempool", size);
283 		assert(0);
284 		return NULL;
285 	}
286 }
287 
288 void *
mp_alloc(struct mempool * pool,unsigned size)289 mp_alloc(struct mempool *pool, unsigned size)
290 {
291 	unsigned avail = pool->state.free[0] & ~(CPU_STRUCT_ALIGN - 1);
292 	void *ptr = NULL;
293 	if (size <= avail) {
294 		pool->state.free[0] = avail - size;
295 		ptr = (uint8_t*)pool->state.last[0] - avail;
296 	} else {
297 		ptr = mp_alloc_internal(pool, size);
298 	}
299 	ASAN_UNPOISON_MEMORY_REGION(ptr, size);
300 	return ptr;
301 }
302 
303 void *
mp_alloc_noalign(struct mempool * pool,unsigned size)304 mp_alloc_noalign(struct mempool *pool, unsigned size)
305 {
306 	void *ptr = NULL;
307 	if (size <= pool->state.free[0]) {
308 		ptr = (uint8_t*)pool->state.last[0] - pool->state.free[0];
309 		pool->state.free[0] -= size;
310 	} else {
311 		ptr = mp_alloc_internal(pool, size);
312 	}
313 	ASAN_UNPOISON_MEMORY_REGION(ptr, size);
314 	return ptr;
315 }
316 
317 void *
mp_alloc_zero(struct mempool * pool,unsigned size)318 mp_alloc_zero(struct mempool *pool, unsigned size)
319 {
320 	void *ptr = mp_alloc(pool, size);
321 	bzero(ptr, size);
322 	return ptr;
323 }
324