1 /*
2 * include/haproxy/pool.h
3 * Memory management definitions..
4 *
5 * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation, version 2.1
10 * exclusively.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #ifndef _HAPROXY_POOL_H
23 #define _HAPROXY_POOL_H
24
25 #include <string.h>
26
27 #include <haproxy/api.h>
28 #include <haproxy/freq_ctr.h>
29 #include <haproxy/list.h>
30 #include <haproxy/pool-os.h>
31 #include <haproxy/pool-t.h>
32 #include <haproxy/thread.h>
33
34 /* This registers a call to create_pool_callback(ptr, name, size) */
35 #define REGISTER_POOL(ptr, name, size) \
36 INITCALL3(STG_POOL, create_pool_callback, (ptr), (name), (size))
37
38 /* This macro declares a pool head <ptr> and registers its creation */
39 #define DECLARE_POOL(ptr, name, size) \
40 struct pool_head *(ptr) __read_mostly = NULL; \
41 REGISTER_POOL(&ptr, name, size)
42
43 /* This macro declares a static pool head <ptr> and registers its creation */
44 #define DECLARE_STATIC_POOL(ptr, name, size) \
45 static struct pool_head *(ptr) __read_mostly; \
46 REGISTER_POOL(&ptr, name, size)
47
48 /* poison each newly allocated area with this byte if >= 0 */
49 extern int mem_poison_byte;
50
51 void *pool_get_from_os(struct pool_head *pool);
52 void pool_put_to_os(struct pool_head *pool, void *ptr);
53 void *pool_alloc_nocache(struct pool_head *pool);
54 void pool_free_nocache(struct pool_head *pool, void *ptr);
55 void dump_pools_to_trash();
56 void dump_pools(void);
57 int pool_total_failures();
58 unsigned long pool_total_allocated();
59 unsigned long pool_total_used();
60 void pool_flush(struct pool_head *pool);
61 void pool_gc(struct pool_head *pool_ctx);
62 struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags);
63 void create_pool_callback(struct pool_head **ptr, char *name, unsigned int size);
64 void *pool_destroy(struct pool_head *pool);
65 void pool_destroy_all();
66 int mem_should_fail(const struct pool_head *pool);
67
68
69 #ifdef CONFIG_HAP_POOLS
70
71 /****************** Thread-local cache management ******************/
72
73 extern THREAD_LOCAL size_t pool_cache_bytes; /* total cache size */
74 extern THREAD_LOCAL size_t pool_cache_count; /* #cache objects */
75
76 void pool_evict_from_local_cache(struct pool_head *pool);
77 void pool_evict_from_local_caches();
78 void pool_put_to_cache(struct pool_head *pool, void *ptr);
79
80 /* returns true if the pool is considered to have too many free objects */
pool_is_crowded(const struct pool_head * pool)81 static inline int pool_is_crowded(const struct pool_head *pool)
82 {
83 return pool->allocated >= swrate_avg(pool->needed_avg + pool->needed_avg / 4, POOL_AVG_SAMPLES) &&
84 (int)(pool->allocated - pool->used) >= pool->minavail;
85 }
86
87
88 #if defined(CONFIG_HAP_NO_GLOBAL_POOLS)
89
90 /* this is essentially used with local caches and a fast malloc library,
91 * which may sometimes be faster than the local shared pools because it
92 * will maintain its own per-thread arenas.
93 */
pool_get_from_shared_cache(struct pool_head * pool)94 static inline void *pool_get_from_shared_cache(struct pool_head *pool)
95 {
96 return NULL;
97 }
98
pool_put_to_shared_cache(struct pool_head * pool,void * ptr)99 static inline void pool_put_to_shared_cache(struct pool_head *pool, void *ptr)
100 {
101 pool_free_nocache(pool, ptr);
102 }
103
104 #elif defined(CONFIG_HAP_LOCKLESS_POOLS)
105
106 /****************** Lockless pools implementation ******************/
107
108 /*
109 * Returns a pointer to type <type> taken from the pool <pool_type> if
110 * available, otherwise returns NULL. No malloc() is attempted, and poisonning
111 * is never performed. The purpose is to get the fastest possible allocation.
112 */
pool_get_from_shared_cache(struct pool_head * pool)113 static inline void *pool_get_from_shared_cache(struct pool_head *pool)
114 {
115 void *ret;
116
117 /* we'll need to reference the first element to figure the next one. We
118 * must temporarily lock it so that nobody allocates then releases it,
119 * or the dereference could fail.
120 */
121 ret = pool->free_list;
122 do {
123 while (unlikely(ret == POOL_BUSY)) {
124 __ha_cpu_relax();
125 ret = _HA_ATOMIC_LOAD(&pool->free_list);
126 }
127 if (ret == NULL)
128 return ret;
129 } while (unlikely((ret = _HA_ATOMIC_XCHG(&pool->free_list, POOL_BUSY)) == POOL_BUSY));
130
131 if (unlikely(ret == NULL)) {
132 _HA_ATOMIC_STORE(&pool->free_list, NULL);
133 goto out;
134 }
135
136 /* this releases the lock */
137 _HA_ATOMIC_STORE(&pool->free_list, *POOL_LINK(pool, ret));
138 _HA_ATOMIC_INC(&pool->used);
139
140 #ifdef DEBUG_MEMORY_POOLS
141 /* keep track of where the element was allocated from */
142 *POOL_LINK(pool, ret) = (void *)pool;
143 #endif
144
145 out:
146 __ha_barrier_atomic_store();
147 return ret;
148 }
149
150 /* Locklessly add item <ptr> to pool <pool>, then update the pool used count.
151 * Both the pool and the pointer must be valid. Use pool_free() for normal
152 * operations.
153 */
pool_put_to_shared_cache(struct pool_head * pool,void * ptr)154 static inline void pool_put_to_shared_cache(struct pool_head *pool, void *ptr)
155 {
156 void **free_list;
157
158 _HA_ATOMIC_DEC(&pool->used);
159
160 if (unlikely(pool_is_crowded(pool))) {
161 pool_put_to_os(pool, ptr);
162 } else {
163 free_list = _HA_ATOMIC_LOAD(&pool->free_list);
164 do {
165 while (unlikely(free_list == POOL_BUSY)) {
166 __ha_cpu_relax();
167 free_list = _HA_ATOMIC_LOAD(&pool->free_list);
168 }
169 _HA_ATOMIC_STORE(POOL_LINK(pool, ptr), (void *)free_list);
170 __ha_barrier_atomic_store();
171 } while (!_HA_ATOMIC_CAS(&pool->free_list, &free_list, ptr));
172 __ha_barrier_atomic_store();
173 }
174 swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
175 }
176
177 #else /* CONFIG_HAP_LOCKLESS_POOLS */
178
179 /****************** Locked pools implementation ******************/
180
181 /*
182 * Returns a pointer to type <type> taken from the pool <pool_type> if
183 * available, otherwise returns NULL. No malloc() is attempted, and poisonning
184 * is never performed. The purpose is to get the fastest possible allocation.
185 * This version takes the pool's lock in order to do this.
186 */
pool_get_from_shared_cache(struct pool_head * pool)187 static inline void *pool_get_from_shared_cache(struct pool_head *pool)
188 {
189 void *p;
190
191 HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
192 if ((p = pool->free_list) != NULL)
193 pool->free_list = *POOL_LINK(pool, p);
194 HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
195 if (p)
196 _HA_ATOMIC_INC(&pool->used);
197
198 #ifdef DEBUG_MEMORY_POOLS
199 if (p) {
200 /* keep track of where the element was allocated from */
201 *POOL_LINK(pool, p) = (void *)pool;
202 }
203 #endif
204 return p;
205 }
206
207 /* unconditionally stores the object as-is into the global pool. The object
208 * must not be NULL. Use pool_free() instead.
209 */
pool_put_to_shared_cache(struct pool_head * pool,void * ptr)210 static inline void pool_put_to_shared_cache(struct pool_head *pool, void *ptr)
211 {
212 _HA_ATOMIC_DEC(&pool->used);
213
214 HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
215 if (!pool_is_crowded(pool)) {
216 *POOL_LINK(pool, ptr) = (void *)pool->free_list;
217 pool->free_list = (void *)ptr;
218 ptr = NULL;
219 }
220 HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
221
222 if (ptr) {
223 /* still not freed */
224 pool_put_to_os(pool, ptr);
225 }
226 swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
227 }
228
229 #endif /* CONFIG_HAP_LOCKLESS_POOLS */
230
231 /* These are generic cache-aware wrappers that allocate/free from/to the local
232 * cache first, then from the second level if it exists.
233 */
234
235 /* Tries to retrieve an object from the local pool cache corresponding to pool
236 * <pool>. If none is available, tries to allocate from the shared cache, and
237 * returns NULL if nothing is available.
238 */
pool_get_from_cache(struct pool_head * pool)239 static inline void *pool_get_from_cache(struct pool_head *pool)
240 {
241 struct pool_cache_item *item;
242 struct pool_cache_head *ph;
243
244 ph = &pool->cache[tid];
245 if (LIST_ISEMPTY(&ph->list))
246 return pool_get_from_shared_cache(pool);
247
248 item = LIST_NEXT(&ph->list, typeof(item), by_pool);
249 ph->count--;
250 pool_cache_bytes -= pool->size;
251 pool_cache_count--;
252 LIST_DELETE(&item->by_pool);
253 LIST_DELETE(&item->by_lru);
254 #ifdef DEBUG_MEMORY_POOLS
255 /* keep track of where the element was allocated from */
256 *POOL_LINK(pool, item) = (void *)pool;
257 #endif
258 return item;
259 }
260
261 #else /* CONFIG_HAP_POOLS */
262
263 /* no cache pools implementation */
264
pool_get_from_cache(struct pool_head * pool)265 static inline void *pool_get_from_cache(struct pool_head *pool)
266 {
267 return NULL;
268 }
269
pool_put_to_cache(struct pool_head * pool,void * ptr)270 static inline void pool_put_to_cache(struct pool_head *pool, void *ptr)
271 {
272 pool_free_nocache(pool, ptr);
273 }
274
275 #endif /* CONFIG_HAP_POOLS */
276
277
278 /****************** Common high-level code ******************/
279
280 /*
281 * Returns a pointer to type <type> taken from the pool <pool_type> or
282 * dynamically allocated. In the first case, <pool_type> is updated to point to
283 * the next element in the list. <flags> is a binary-OR of POOL_F_* flags.
284 * Prefer using pool_alloc() which does the right thing without flags.
285 */
__pool_alloc(struct pool_head * pool,unsigned int flags)286 static inline void *__pool_alloc(struct pool_head *pool, unsigned int flags)
287 {
288 void *p = NULL;
289
290 #ifdef DEBUG_FAIL_ALLOC
291 if (!(flags & POOL_F_NO_FAIL) && mem_should_fail(pool))
292 return NULL;
293 #endif
294
295 if (!p)
296 p = pool_get_from_cache(pool);
297 if (!p)
298 p = pool_alloc_nocache(pool);
299
300 if (p) {
301 if (flags & POOL_F_MUST_ZERO)
302 memset(p, 0, pool->size);
303 else if (!(flags & POOL_F_NO_POISON) && mem_poison_byte >= 0)
304 memset(p, mem_poison_byte, pool->size);
305 }
306 return p;
307 }
308
309 /*
310 * Returns a pointer to type <type> taken from the pool <pool_type> or
311 * dynamically allocated. Memory poisonning is performed if enabled.
312 */
pool_alloc(struct pool_head * pool)313 static inline void *pool_alloc(struct pool_head *pool)
314 {
315 return __pool_alloc(pool, 0);
316 }
317
318 /*
319 * Returns a pointer to type <type> taken from the pool <pool_type> or
320 * dynamically allocated. The area is zeroed.
321 */
pool_zalloc(struct pool_head * pool)322 static inline void *pool_zalloc(struct pool_head *pool)
323 {
324 return __pool_alloc(pool, POOL_F_MUST_ZERO);
325 }
326
327 /*
328 * Puts a memory area back to the corresponding pool.
329 * Items are chained directly through a pointer that
330 * is written in the beginning of the memory area, so
331 * there's no need for any carrier cell. This implies
332 * that each memory area is at least as big as one
333 * pointer. Just like with the libc's free(), nothing
334 * is done if <ptr> is NULL.
335 */
pool_free(struct pool_head * pool,void * ptr)336 static inline void pool_free(struct pool_head *pool, void *ptr)
337 {
338 if (likely(ptr != NULL)) {
339 #ifdef DEBUG_MEMORY_POOLS
340 /* we'll get late corruption if we refill to the wrong pool or double-free */
341 if (*POOL_LINK(pool, ptr) != (void *)pool)
342 ABORT_NOW();
343 #endif
344 if (unlikely(mem_poison_byte >= 0))
345 memset(ptr, mem_poison_byte, pool->size);
346
347 pool_put_to_cache(pool, ptr);
348 }
349 }
350
351 #endif /* _HAPROXY_POOL_H */
352
353 /*
354 * Local variables:
355 * c-indent-level: 8
356 * c-basic-offset: 8
357 * End:
358 */
359