1 /*
2  *	UCW Library -- Memory Pools
3  *
4  *	(c) 1997--2015 Martin Mares <mj@ucw.cz>
5  *	(c) 2007 Pavel Charvat <pchar@ucw.cz>
6  *	SPDX-License-Identifier: LGPL-2.1-or-later
7  *	Source: https://www.ucw.cz/libucw/
8  */
9 
10 #ifndef _UCW_POOLS_H
11 #define _UCW_POOLS_H
12 
13 #include "lib/defines.h"
14 #include <ucw/alloc.h>
15 #include <ucw/config.h>
16 #include <ucw/lib.h>
17 #include <string.h>
18 
19 #ifdef CONFIG_UCW_CLEAN_ABI
20 #define mp_alloc ucw_mp_alloc
21 #define mp_alloc_internal ucw_mp_alloc_internal
22 #define mp_alloc_noalign ucw_mp_alloc_noalign
23 #define mp_alloc_zero ucw_mp_alloc_zero
24 #define mp_delete ucw_mp_delete
25 #define mp_flush ucw_mp_flush
26 #define mp_grow_internal ucw_mp_grow_internal
27 #define mp_init ucw_mp_init
28 #define mp_memdup ucw_mp_memdup
29 #define mp_multicat ucw_mp_multicat
30 #define mp_new ucw_mp_new
31 #define mp_open ucw_mp_open
32 #define mp_pop ucw_mp_pop
33 #define mp_printf ucw_mp_printf
34 #define mp_printf_append ucw_mp_printf_append
35 #define mp_push ucw_mp_push
36 #define mp_realloc ucw_mp_realloc
37 #define mp_realloc_zero ucw_mp_realloc_zero
38 #define mp_restore ucw_mp_restore
39 #define mp_shrink ucw_mp_shrink
40 #define mp_spread_internal ucw_mp_spread_internal
41 #define mp_start ucw_mp_start
42 #define mp_start_internal ucw_mp_start_internal
43 #define mp_start_noalign ucw_mp_start_noalign
44 #define mp_stats ucw_mp_stats
45 #define mp_str_from_mem ucw_mp_str_from_mem
46 #define mp_strdup ucw_mp_strdup
47 #define mp_strjoin ucw_mp_strjoin
48 #define mp_total_size ucw_mp_total_size
49 #define mp_vprintf ucw_mp_vprintf
50 #define mp_vprintf_append ucw_mp_vprintf_append
51 #endif
52 
53 /***
54  * [[defs]]
55  * Definitions
56  * -----------
57  ***/
58 
59 /**
60  * Memory pool state (see @mp_push(), ...).
61  * You should use this one as an opaque handle only, the insides are internal.
62  **/
63 struct mempool_state {
64   size_t free[2];
65   void *last[2];
66   struct mempool_state *next;
67 };
68 
69 /**
70  * Memory pool.
71  * You should use this one as an opaque handle only, the insides are internal.
72  **/
73 struct mempool {
74   struct ucw_allocator allocator;	// This must be the first element
75   struct mempool_state state;
76   void *unused, *last_big;
77   size_t chunk_size, threshold;
78   uint idx;
79   u64 total_size;
80 };
81 
82 struct mempool_stats {			/** Mempool statistics. See @mp_stats(). **/
83   u64 total_size;			/* Real allocated size in bytes */
84   u64 used_size;			/* Estimated size allocated from mempool to application */
85   uint chain_count[3];			/* Number of allocated chunks in small/big/unused chains */
86   u64 chain_size[3];			/* Size of allocated chunks in small/big/unused chains */
87 };
88 
89 /***
90  * [[basic]]
91  * Basic manipulation
92  * ------------------
93  ***/
94 
95 /**
96  * Initialize a given mempool structure.
97  * @chunk_size must be in the interval `[1, SIZE_MAX / 2]`.
98  * It will allocate memory by this large chunks and take
99  * memory to satisfy requests from them.
100  *
101  * Memory pools can be treated as <<trans:respools,resources>>, see <<trans:res_mempool()>>.
102  **/
103 KR_EXPORT
104 void mp_init(struct mempool *pool, size_t chunk_size);
105 
106 /**
107  * Allocate and initialize a new memory pool.
108  * See @mp_init() for @chunk_size limitations.
109  *
110  * The new mempool structure is allocated on the new mempool.
111  *
112  * Memory pools can be treated as <<trans:respools,resources>>, see <<trans:res_mempool()>>.
113  **/
114 KR_EXPORT
115 struct mempool *mp_new(size_t chunk_size);
116 
117 /**
118  * Cleanup mempool initialized by mp_init or mp_new.
119  * Frees all the memory allocated by this mempool and,
120  * if created by @mp_new(), the @pool itself.
121  **/
122 KR_EXPORT
123 void mp_delete(struct mempool *pool);
124 
125 /**
126  * Frees all data on a memory pool, but leaves it working.
127  * It can keep some of the chunks allocated to serve
128  * further allocation requests. Leaves the @pool alive,
129  * even if it was created with @mp_new().
130  **/
131 KR_EXPORT
132 void mp_flush(struct mempool *pool);
133 
134 /**
135  * Compute some statistics for debug purposes.
136  * See the definition of the <<struct_mempool_stats,mempool_stats structure>>.
137  * This function scans the chunk list, so it can be slow. If you are interested
138  * in total memory consumption only, mp_total_size() is faster.
139  **/
140 void mp_stats(struct mempool *pool, struct mempool_stats *stats);
141 
142 /**
143  * Return how many bytes were allocated by the pool, including unused parts
144  * of chunks. This function runs in constant time.
145  **/
146 u64 mp_total_size(struct mempool *pool);
147 
148 /**
149  * Release unused chunks of memory reserved for further allocation
150  * requests, but stop if mp_total_size() would drop below @min_total_size.
151  **/
152 void mp_shrink(struct mempool *pool, u64 min_total_size);
153 
154 /***
155  * [[alloc]]
156  * Allocation routines
157  * -------------------
158  ***/
159 
160 /* For internal use only, do not call directly */
161 void *mp_alloc_internal(struct mempool *pool, size_t size) LIKE_MALLOC;
162 
163 /**
164  * The function allocates new @size bytes on a given memory pool.
165  * If the @size is zero, the resulting pointer is undefined,
166  * but it may be safely reallocated or used as the parameter
167  * to other functions below.
168  *
169  * The resulting pointer is always aligned to a multiple of
170  * `CPU_STRUCT_ALIGN` bytes and this condition remains true also
171  * after future reallocations.
172  **/
173 KR_EXPORT
174 void *mp_alloc(struct mempool *pool, size_t size);
175 
176 /**
177  * The same as @mp_alloc(), but the result may be unaligned.
178  **/
179 void *mp_alloc_noalign(struct mempool *pool, size_t size);
180 
181 /**
182  * The same as @mp_alloc(), but fills the newly allocated memory with zeroes.
183  **/
184 void *mp_alloc_zero(struct mempool *pool, size_t size);
185 
186 /**
187  * Inlined version of @mp_alloc().
188  **/
mp_alloc_fast(struct mempool * pool,size_t size)189 static inline void *mp_alloc_fast(struct mempool *pool, size_t size)
190 {
191   size_t avail = pool->state.free[0] & ~(size_t)(CPU_STRUCT_ALIGN - 1);
192   if (size <= avail)
193     {
194       pool->state.free[0] = avail - size;
195       return (byte *)pool->state.last[0] - avail;
196     }
197   else
198     return mp_alloc_internal(pool, size);
199 }
200 
201 /**
202  * Inlined version of @mp_alloc_noalign().
203  **/
mp_alloc_fast_noalign(struct mempool * pool,size_t size)204 static inline void *mp_alloc_fast_noalign(struct mempool *pool, size_t size)
205 {
206   if (size <= pool->state.free[0])
207     {
208       void *ptr = (byte *)pool->state.last[0] - pool->state.free[0];
209       pool->state.free[0] -= size;
210       return ptr;
211     }
212   else
213     return mp_alloc_internal(pool, size);
214 }
215 
216 /**
217  * Return a generic allocator representing the given mempool.
218  **/
mp_get_allocator(struct mempool * mp)219 static inline struct ucw_allocator *mp_get_allocator(struct mempool *mp)
220 {
221   return &mp->allocator;
222 }
223 
224 /***
225  * [[gbuf]]
226  * Growing buffers
227  * ---------------
228  *
229  * You do not need to know, how a buffer will need to be large,
230  * you can grow it incrementally to needed size. You can grow only
231  * one buffer at a time on a given mempool.
232  *
233  * Similar functionality is provided by <<growbuf:,growing buffers>> module.
234  ***/
235 
236 /* For internal use only, do not call directly */
237 void *mp_start_internal(struct mempool *pool, size_t size) LIKE_MALLOC;
238 void *mp_grow_internal(struct mempool *pool, size_t size);
239 void *mp_spread_internal(struct mempool *pool, void *p, size_t size);
240 
mp_idx(struct mempool * pool,void * ptr)241 static inline uint mp_idx(struct mempool *pool, void *ptr)
242 {
243   return ptr == pool->last_big;
244 }
245 
246 /**
247  * Open a new growing buffer (at least @size bytes long).
248  * If the @size is zero, the resulting pointer is undefined,
249  * but it may be safely reallocated or used as the parameter
250  * to other functions below.
251  *
252  * The resulting pointer is always aligned to a multiple of
253  * `CPU_STRUCT_ALIGN` bytes and this condition remains true also
254  * after future reallocations. There is an unaligned version as well.
255  *
256  * Keep in mind that you can't make any other pool allocations
257  * before you "close" the growing buffer with @mp_end().
258  */
259 void *mp_start(struct mempool *pool, size_t size);
260 void *mp_start_noalign(struct mempool *pool, size_t size);
261 
262 /**
263  * Inlined version of @mp_start().
264  **/
mp_start_fast(struct mempool * pool,size_t size)265 static inline void *mp_start_fast(struct mempool *pool, size_t size)
266 {
267   size_t avail = pool->state.free[0] & ~(size_t)(CPU_STRUCT_ALIGN - 1);
268   if (size <= avail)
269     {
270       pool->idx = 0;
271       pool->state.free[0] = avail;
272       return (byte *)pool->state.last[0] - avail;
273     }
274   else
275     return mp_start_internal(pool, size);
276 }
277 
278 /**
279  * Inlined version of @mp_start_noalign().
280  **/
mp_start_fast_noalign(struct mempool * pool,size_t size)281 static inline void *mp_start_fast_noalign(struct mempool *pool, size_t size)
282 {
283   if (size <= pool->state.free[0])
284     {
285       pool->idx = 0;
286       return (byte *)pool->state.last[0] - pool->state.free[0];
287     }
288   else
289     return mp_start_internal(pool, size);
290 }
291 
292 /**
293  * Return start pointer of the growing buffer allocated by latest @mp_start() or a similar function.
294  **/
mp_ptr(struct mempool * pool)295 static inline void *mp_ptr(struct mempool *pool)
296 {
297   return (byte *)pool->state.last[pool->idx] - pool->state.free[pool->idx];
298 }
299 
300 /**
301  * Return the number of bytes available for extending the growing buffer.
302  * (Before a reallocation will be needed).
303  **/
mp_avail(struct mempool * pool)304 static inline size_t mp_avail(struct mempool *pool)
305 {
306   return pool->state.free[pool->idx];
307 }
308 
309 /**
310  * Grow the buffer allocated by @mp_start() to be at least @size bytes long
311  * (@size may be less than @mp_avail(), even zero). Reallocated buffer may
312  * change its starting position. The content will be unchanged to the minimum
313  * of the old and new sizes; newly allocated memory will be uninitialized.
314  * Multiple calls to mp_grow() have amortized linear cost wrt. the maximum value of @size. */
mp_grow(struct mempool * pool,size_t size)315 static inline void *mp_grow(struct mempool *pool, size_t size)
316 {
317   return (size <= mp_avail(pool)) ? mp_ptr(pool) : mp_grow_internal(pool, size);
318 }
319 
320 /**
321  * Grow the buffer by at least one byte -- equivalent to <<mp_grow(),`mp_grow`>>`(@pool, @mp_avail(pool) + 1)`.
322  **/
mp_expand(struct mempool * pool)323 static inline void *mp_expand(struct mempool *pool)
324 {
325   return mp_grow_internal(pool, mp_avail(pool) + 1);
326 }
327 
328 /**
329  * Ensure that there is at least @size bytes free after @p,
330  * if not, reallocate and adjust @p.
331  **/
mp_spread(struct mempool * pool,void * p,size_t size)332 static inline void *mp_spread(struct mempool *pool, void *p, size_t size)
333 {
334   return (((size_t)((byte *)pool->state.last[pool->idx] - (byte *)p) >= size) ? p : mp_spread_internal(pool, p, size));
335 }
336 
337 /**
338  * Append a character to the growing buffer. Called with @p pointing after
339  * the last byte in the buffer, returns a pointer after the last byte
340  * of the new (possibly reallocated) buffer.
341  **/
mp_append_char(struct mempool * pool,char * p,uint c)342 static inline char *mp_append_char(struct mempool *pool, char *p, uint c)
343 {
344   p = mp_spread(pool, p, 1);
345   *p++ = c;
346   return p;
347 }
348 
349 /**
350  * Append a memory block to the growing buffer. Called with @p pointing after
351  * the last byte in the buffer, returns a pointer after the last byte
352  * of the new (possibly reallocated) buffer.
353  **/
mp_append_block(struct mempool * pool,void * p,const void * block,size_t size)354 static inline void *mp_append_block(struct mempool *pool, void *p, const void *block, size_t size)
355 {
356   char *q = mp_spread(pool, p, size);
357   memcpy(q, block, size);
358   return q + size;
359 }
360 
361 /**
362  * Append a string to the growing buffer. Called with @p pointing after
363  * the last byte in the buffer, returns a pointer after the last byte
364  * of the new (possibly reallocated) buffer.
365  **/
mp_append_string(struct mempool * pool,void * p,const char * str)366 static inline void *mp_append_string(struct mempool *pool, void *p, const char *str)
367 {
368   return mp_append_block(pool, p, str, strlen(str));
369 }
370 
371 /**
372  * Close the growing buffer. The @end must point just behind the data, you want to keep
373  * allocated (so it can be in the interval `[@mp_ptr(@pool), @mp_ptr(@pool) + @mp_avail(@pool)]`).
374  * Returns a pointer to the beginning of the just closed block.
375  **/
mp_end(struct mempool * pool,void * end)376 static inline void *mp_end(struct mempool *pool, void *end)
377 {
378   void *p = mp_ptr(pool);
379   pool->state.free[pool->idx] = (byte *)pool->state.last[pool->idx] - (byte *)end;
380   return p;
381 }
382 
383 /**
384  * Close the growing buffer as a string. That is, append a zero byte and call mp_end().
385  **/
mp_end_string(struct mempool * pool,void * end)386 static inline char *mp_end_string(struct mempool *pool, void *end)
387 {
388   end = mp_append_char(pool, end, 0);
389   return mp_end(pool, end);
390 }
391 
392 /**
393  * Return size in bytes of the last allocated memory block (with @mp_alloc() or @mp_end()).
394  **/
mp_size(struct mempool * pool,void * ptr)395 static inline size_t mp_size(struct mempool *pool, void *ptr)
396 {
397   uint idx = mp_idx(pool, ptr);
398   return ((byte *)pool->state.last[idx] - (byte *)ptr) - pool->state.free[idx];
399 }
400 
401 /**
402  * Open the last memory block (allocated with @mp_alloc() or @mp_end())
403  * for growing and return its size in bytes. The contents and the start pointer
404  * remain unchanged. Do not forget to call @mp_end() to close it.
405  **/
406 size_t mp_open(struct mempool *pool, void *ptr);
407 
408 /**
409  * Inlined version of @mp_open().
410  **/
mp_open_fast(struct mempool * pool,void * ptr)411 static inline size_t mp_open_fast(struct mempool *pool, void *ptr)
412 {
413   pool->idx = mp_idx(pool, ptr);
414   size_t size = ((byte *)pool->state.last[pool->idx] - (byte *)ptr) - pool->state.free[pool->idx];
415   pool->state.free[pool->idx] += size;
416   return size;
417 }
418 
419 /**
420  * Reallocate the last memory block (allocated with @mp_alloc() or @mp_end())
421  * to the new @size. Behavior is similar to @mp_grow(), but the resulting
422  * block is closed.
423  **/
424 void *mp_realloc(struct mempool *pool, void *ptr, size_t size);
425 
426 /**
427  * The same as @mp_realloc(), but fills the additional bytes (if any) with zeroes.
428  **/
429 void *mp_realloc_zero(struct mempool *pool, void *ptr, size_t size);
430 
431 /**
432  * Inlined version of @mp_realloc().
433  **/
mp_realloc_fast(struct mempool * pool,void * ptr,size_t size)434 static inline void *mp_realloc_fast(struct mempool *pool, void *ptr, size_t size)
435 {
436   mp_open_fast(pool, ptr);
437   ptr = mp_grow(pool, size);
438   mp_end(pool, (byte *)ptr + size);
439   return ptr;
440 }
441 
442 /***
443  * [[store]]
444  * Storing and restoring state
445  * ---------------------------
446  *
447  * Mempools can remember history of what was allocated and return back
448  * in time.
449  ***/
450 
451 /**
452  * Save the current state of a memory pool.
453  * Do not call this function with an opened growing buffer.
454  **/
mp_save(struct mempool * pool,struct mempool_state * state)455 static inline void mp_save(struct mempool *pool, struct mempool_state *state)
456 {
457   *state = pool->state;
458   pool->state.next = state;
459 }
460 
461 /**
462  * Save the current state to a newly allocated mempool_state structure.
463  * Do not call this function with an opened growing buffer.
464  **/
465 struct mempool_state *mp_push(struct mempool *pool);
466 
467 /**
468  * Restore the state saved by @mp_save() or @mp_push() and free all
469  * data allocated after that point (including the state structure itself).
470  * You can't reallocate the last memory block from the saved state.
471  **/
472 void mp_restore(struct mempool *pool, struct mempool_state *state);
473 
474 /**
475  * Inlined version of @mp_restore().
476  **/
mp_restore_fast(struct mempool * pool,struct mempool_state * state)477 static inline void mp_restore_fast(struct mempool *pool, struct mempool_state *state)
478 {
479   if (pool->state.last[0] != state->last[0] || pool->state.last[1] != state->last[1])
480     mp_restore(pool, state);
481   else
482     {
483       pool->state = *state;
484       pool->last_big = &pool->last_big;
485     }
486 }
487 
488 /**
489  * Restore the state saved by the last call to @mp_push().
490  * @mp_pop() and @mp_push() works as a stack so you can push more states safely.
491  **/
492 void mp_pop(struct mempool *pool);
493 
494 
495 /***
496  * [[string]]
497  * String operations
498  * -----------------
499  ***/
500 
501 char *mp_strdup(struct mempool *, const char *) LIKE_MALLOC;		/** Makes a copy of a string on a mempool. Returns NULL for NULL string. **/
502 void *mp_memdup(struct mempool *, const void *, size_t) LIKE_MALLOC;	/** Makes a copy of a memory block on a mempool. **/
503 /**
504  * Concatenates all passed strings. The last parameter must be NULL.
505  * This will concatenate two strings:
506  *
507  *   char *message = mp_multicat(pool, "hello ", "world", NULL);
508  **/
509 char *mp_multicat(struct mempool *, ...) LIKE_MALLOC SENTINEL_CHECK;
510 /**
511  * Concatenates two strings and stores result on @mp.
512  */
mp_strcat(struct mempool * mp,const char * x,const char * y)513 static inline char *LIKE_MALLOC mp_strcat(struct mempool *mp, const char *x, const char *y)
514 {
515   return mp_multicat(mp, x, y, NULL);
516 }
517 /**
518  * Join strings and place @sep between each two neighboring.
519  * @p is the mempool to provide memory, @a is array of strings and @n
520  * tells how many there is of them.
521  **/
522 char *mp_strjoin(struct mempool *p, char **a, uint n, uint sep) LIKE_MALLOC;
523 /**
524  * Convert memory block to a string. Makes a copy of the given memory block
525  * in the mempool @p, adding an extra terminating zero byte at the end.
526  **/
527 char *mp_str_from_mem(struct mempool *p, const void *mem, size_t len) LIKE_MALLOC;
528 
529 
530 /***
531  * [[format]]
532  * Formatted output
533  * ---------------
534  ***/
535 
536 /**
537  * printf() into a in-memory string, allocated on the memory pool.
538  **/
539 KR_EXPORT
540 char *mp_printf(struct mempool *mp, const char *fmt, ...) FORMAT_CHECK(printf,2,3) LIKE_MALLOC;
541 /**
542  * Like @mp_printf(), but uses `va_list` for parameters.
543  **/
544 char *mp_vprintf(struct mempool *mp, const char *fmt, va_list args) LIKE_MALLOC;
545 /**
546  * Like @mp_printf(), but it appends the data at the end of string
547  * pointed to by @ptr. The string is @mp_open()ed, so you have to
548  * provide something that can be.
549  *
550  * Returns pointer to the beginning of the string (the pointer may have
551  * changed due to reallocation).
552  *
553  * In some versions of LibUCW, this function was called mp_append_printf(). However,
554  * this name turned out to be confusing -- unlike other appending functions, this one is
555  * not called on an opened growing buffer. The old name will be preserved for backward
556  * compatibility for the time being.
557  **/
558 KR_EXPORT
559 char *mp_printf_append(struct mempool *mp, char *ptr, const char *fmt, ...) FORMAT_CHECK(printf,3,4);
560 #define mp_append_printf mp_printf_append
561 /**
562  * Like @mp_printf_append(), but uses `va_list` for parameters.
563  *
564  * In some versions of LibUCW, this function was called mp_append_vprintf(). However,
565  * this name turned out to be confusing -- unlike other appending functions, this one is
566  * not called on an opened growing buffer. The old name will be preserved for backward
567  * compatibility for the time being.
568  **/
569 char *mp_vprintf_append(struct mempool *mp, char *ptr, const char *fmt, va_list args);
570 #define mp_append_vprintf mp_vprintf_append
571 
572 #endif
573