1 /*
2  * Memory management functions.
3  *
4  * Copyright 2000-2007 Willy Tarreau <w@1wt.eu>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  *
11  */
12 #include <errno.h>
13 
14 #include <haproxy/activity-t.h>
15 #include <haproxy/api.h>
16 #include <haproxy/applet-t.h>
17 #include <haproxy/cfgparse.h>
18 #include <haproxy/channel.h>
19 #include <haproxy/cli.h>
20 #include <haproxy/errors.h>
21 #include <haproxy/global.h>
22 #include <haproxy/list.h>
23 #include <haproxy/pool.h>
24 #include <haproxy/stats-t.h>
25 #include <haproxy/stream_interface.h>
26 #include <haproxy/thread.h>
27 #include <haproxy/tools.h>
28 
29 
30 #ifdef CONFIG_HAP_LOCAL_POOLS
31 /* These are the most common pools, expected to be initialized first. These
32  * ones are allocated from an array, allowing to map them to an index.
33  */
34 struct pool_head pool_base_start[MAX_BASE_POOLS] = { };
35 unsigned int pool_base_count = 0;
36 
37 /* These ones are initialized per-thread on startup by init_pools() */
38 struct pool_cache_head pool_cache[MAX_THREADS][MAX_BASE_POOLS];
39 THREAD_LOCAL size_t pool_cache_bytes = 0;                /* total cache size */
40 THREAD_LOCAL size_t pool_cache_count = 0;                /* #cache objects   */
41 #endif
42 
43 static struct list pools = LIST_HEAD_INIT(pools);
44 int mem_poison_byte = -1;
45 
46 #ifdef DEBUG_FAIL_ALLOC
47 static int mem_fail_rate = 0;
48 static int mem_should_fail(const struct pool_head *);
49 #endif
50 
51 /* Try to find an existing shared pool with the same characteristics and
52  * returns it, otherwise creates this one. NULL is returned if no memory
53  * is available for a new creation. Two flags are supported :
54  *   - MEM_F_SHARED to indicate that the pool may be shared with other users
55  *   - MEM_F_EXACT to indicate that the size must not be rounded up
56  */
create_pool(char * name,unsigned int size,unsigned int flags)57 struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags)
58 {
59 	struct pool_head *pool;
60 	struct pool_head *entry;
61 	struct list *start;
62 	unsigned int align;
63 	int idx __maybe_unused;
64 
65 	/* We need to store a (void *) at the end of the chunks. Since we know
66 	 * that the malloc() function will never return such a small size,
67 	 * let's round the size up to something slightly bigger, in order to
68 	 * ease merging of entries. Note that the rounding is a power of two.
69 	 * This extra (void *) is not accounted for in the size computation
70 	 * so that the visible parts outside are not affected.
71 	 *
72 	 * Note: for the LRU cache, we need to store 2 doubly-linked lists.
73 	 */
74 
75 	if (!(flags & MEM_F_EXACT)) {
76 		align = 4 * sizeof(void *); // 2 lists = 4 pointers min
77 		size  = ((size + POOL_EXTRA + align - 1) & -align) - POOL_EXTRA;
78 	}
79 
80 	/* TODO: thread: we do not lock pool list for now because all pools are
81 	 * created during HAProxy startup (so before threads creation) */
82 	start = &pools;
83 	pool = NULL;
84 
85 	list_for_each_entry(entry, &pools, list) {
86 		if (entry->size == size) {
87 			/* either we can share this place and we take it, or
88 			 * we look for a shareable one or for the next position
89 			 * before which we will insert a new one.
90 			 */
91 			if ((flags & entry->flags & MEM_F_SHARED)
92 #ifdef DEBUG_DONT_SHARE_POOLS
93 			    && strcmp(name, entry->name) == 0
94 #endif
95 			    ) {
96 				/* we can share this one */
97 				pool = entry;
98 				DPRINTF(stderr, "Sharing %s with %s\n", name, pool->name);
99 				break;
100 			}
101 		}
102 		else if (entry->size > size) {
103 			/* insert before this one */
104 			start = &entry->list;
105 			break;
106 		}
107 	}
108 
109 	if (!pool) {
110 #ifdef CONFIG_HAP_LOCAL_POOLS
111 		if (pool_base_count < MAX_BASE_POOLS)
112 			pool = &pool_base_start[pool_base_count++];
113 
114 		if (!pool) {
115 			/* look for a freed entry */
116 			for (entry = pool_base_start; entry != pool_base_start + MAX_BASE_POOLS; entry++) {
117 				if (!entry->size) {
118 					pool = entry;
119 					break;
120 				}
121 			}
122 		}
123 #endif
124 
125 		if (!pool)
126 			pool = calloc(1, sizeof(*pool));
127 
128 		if (!pool)
129 			return NULL;
130 		if (name)
131 			strlcpy2(pool->name, name, sizeof(pool->name));
132 		pool->size = size;
133 		pool->flags = flags;
134 		LIST_ADDQ(start, &pool->list);
135 
136 #ifdef CONFIG_HAP_LOCAL_POOLS
137 		/* update per-thread pool cache if necessary */
138 		idx = pool_get_index(pool);
139 		if (idx >= 0) {
140 			int thr;
141 
142 			for (thr = 0; thr < MAX_THREADS; thr++)
143 				pool_cache[thr][idx].size = size;
144 		}
145 #endif
146 		HA_SPIN_INIT(&pool->lock);
147 	}
148 	pool->users++;
149 	return pool;
150 }
151 
152 #ifdef CONFIG_HAP_LOCAL_POOLS
153 /* Evicts some of the oldest objects from the local cache, pushing them to the
154  * global pool.
155  */
pool_evict_from_cache()156 void pool_evict_from_cache()
157 {
158 	struct pool_cache_item *item;
159 	struct pool_cache_head *ph;
160 
161 	do {
162 		item = LIST_PREV(&ti->pool_lru_head, struct pool_cache_item *, by_lru);
163 		/* note: by definition we remove oldest objects so they also are the
164 		 * oldest in their own pools, thus their next is the pool's head.
165 		 */
166 		ph = LIST_NEXT(&item->by_pool, struct pool_cache_head *, list);
167 		LIST_DEL(&item->by_pool);
168 		LIST_DEL(&item->by_lru);
169 		ph->count--;
170 		pool_cache_count--;
171 		pool_cache_bytes -= ph->size;
172 		__pool_free(pool_base_start + (ph - pool_cache[tid]), item);
173 	} while (pool_cache_bytes > CONFIG_HAP_POOL_CACHE_SIZE * 7 / 8);
174 }
175 #endif
176 
177 #ifdef CONFIG_HAP_LOCKLESS_POOLS
178 /* Allocates new entries for pool <pool> until there are at least <avail> + 1
179  * available, then returns the last one for immediate use, so that at least
180  * <avail> are left available in the pool upon return. NULL is returned if the
181  * last entry could not be allocated. It's important to note that at least one
182  * allocation is always performed even if there are enough entries in the pool.
183  * A call to the garbage collector is performed at most once in case malloc()
184  * returns an error, before returning NULL.
185  */
__pool_refill_alloc(struct pool_head * pool,unsigned int avail)186 void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail)
187 {
188 	void *ptr = NULL, **free_list;
189 	int failed = 0;
190 	int size = pool->size;
191 	int limit = pool->limit;
192 	int allocated = pool->allocated, allocated_orig = allocated;
193 
194 	/* stop point */
195 	avail += pool->used;
196 
197 	while (1) {
198 		if (limit && allocated >= limit) {
199 			_HA_ATOMIC_ADD(&pool->allocated, allocated - allocated_orig);
200 			activity[tid].pool_fail++;
201 			return NULL;
202 		}
203 
204 		swrate_add_scaled(&pool->needed_avg, POOL_AVG_SAMPLES, pool->allocated, POOL_AVG_SAMPLES/4);
205 
206 		ptr = pool_alloc_area(size + POOL_EXTRA);
207 		if (!ptr) {
208 			_HA_ATOMIC_ADD(&pool->failed, 1);
209 			if (failed) {
210 				activity[tid].pool_fail++;
211 				return NULL;
212 			}
213 			failed++;
214 			pool_gc(pool);
215 			continue;
216 		}
217 		if (++allocated > avail)
218 			break;
219 
220 		free_list = _HA_ATOMIC_LOAD(&pool->free_list);
221 		do {
222 			while (unlikely(free_list == POOL_BUSY)) {
223 				pl_cpu_relax();
224 				free_list = _HA_ATOMIC_LOAD(&pool->free_list);
225 			}
226 			_HA_ATOMIC_STORE(POOL_LINK(pool, ptr), (void *)free_list);
227 			__ha_barrier_atomic_store();
228 		} while (!_HA_ATOMIC_CAS(&pool->free_list, &free_list, ptr));
229 		__ha_barrier_atomic_store();
230 	}
231 	__ha_barrier_atomic_store();
232 
233 	_HA_ATOMIC_ADD(&pool->allocated, allocated - allocated_orig);
234 	_HA_ATOMIC_ADD(&pool->used, 1);
235 
236 #ifdef DEBUG_MEMORY_POOLS
237 	/* keep track of where the element was allocated from */
238 	*POOL_LINK(pool, ptr) = (void *)pool;
239 #endif
240 	return ptr;
241 }
pool_refill_alloc(struct pool_head * pool,unsigned int avail)242 void *pool_refill_alloc(struct pool_head *pool, unsigned int avail)
243 {
244 	void *ptr;
245 
246 	ptr = __pool_refill_alloc(pool, avail);
247 	return ptr;
248 }
249 /*
250  * This function frees whatever can be freed in pool <pool>.
251  */
pool_flush(struct pool_head * pool)252 void pool_flush(struct pool_head *pool)
253 {
254 	void **next, *temp;
255 	int removed = 0;
256 
257 	if (!pool)
258 		return;
259 
260 	/* The loop below atomically detaches the head of the free list and
261 	 * replaces it with a NULL. Then the list can be released.
262 	 */
263 	next = pool->free_list;
264 	do {
265 		while (unlikely(next == POOL_BUSY)) {
266 			pl_cpu_relax();
267 			next = _HA_ATOMIC_LOAD(&pool->free_list);
268 		}
269 		if (next == NULL)
270 			return;
271 	} while (unlikely((next = _HA_ATOMIC_XCHG(&pool->free_list, POOL_BUSY)) == POOL_BUSY));
272 	_HA_ATOMIC_STORE(&pool->free_list, NULL);
273 	__ha_barrier_atomic_store();
274 
275 	while (next) {
276 		temp = next;
277 		next = *POOL_LINK(pool, temp);
278 		removed++;
279 		pool_free_area(temp, pool->size + POOL_EXTRA);
280 	}
281 	_HA_ATOMIC_SUB(&pool->allocated, removed);
282 	/* here, we should have pool->allocated == pool->used */
283 }
284 
285 /*
286  * This function frees whatever can be freed in all pools, but respecting
287  * the minimum thresholds imposed by owners. It makes sure to be alone to
288  * run by using thread_isolate(). <pool_ctx> is unused.
289  */
pool_gc(struct pool_head * pool_ctx)290 void pool_gc(struct pool_head *pool_ctx)
291 {
292 	struct pool_head *entry;
293 	int isolated = thread_isolated();
294 
295 	if (!isolated)
296 		thread_isolate();
297 
298 	list_for_each_entry(entry, &pools, list) {
299 		void *temp;
300 		//qfprintf(stderr, "Flushing pool %s\n", entry->name);
301 		while (entry->free_list &&
302 		       (int)(entry->allocated - entry->used) > (int)entry->minavail) {
303 			temp = entry->free_list;
304 			entry->free_list = *POOL_LINK(entry, temp);
305 			entry->allocated--;
306 			pool_free_area(temp, entry->size + POOL_EXTRA);
307 		}
308 	}
309 
310 	if (!isolated)
311 		thread_release();
312 }
313 
314 #else /* CONFIG_HAP_LOCKLESS_POOLS */
315 
316 /* Allocates new entries for pool <pool> until there are at least <avail> + 1
317  * available, then returns the last one for immediate use, so that at least
318  * <avail> are left available in the pool upon return. NULL is returned if the
319  * last entry could not be allocated. It's important to note that at least one
320  * allocation is always performed even if there are enough entries in the pool.
321  * A call to the garbage collector is performed at most once in case malloc()
322  * returns an error, before returning NULL.
323  */
__pool_refill_alloc(struct pool_head * pool,unsigned int avail)324 void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail)
325 {
326 	void *ptr = NULL;
327 	int failed = 0;
328 
329 #ifdef DEBUG_FAIL_ALLOC
330 	if (mem_should_fail(pool))
331 		return NULL;
332 #endif
333 	/* stop point */
334 	avail += pool->used;
335 
336 	while (1) {
337 		if (pool->limit && pool->allocated >= pool->limit) {
338 			activity[tid].pool_fail++;
339 			return NULL;
340 		}
341 
342 		swrate_add_scaled(&pool->needed_avg, POOL_AVG_SAMPLES, pool->allocated, POOL_AVG_SAMPLES/4);
343 		HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
344 		ptr = pool_alloc_area(pool->size + POOL_EXTRA);
345 #ifdef DEBUG_MEMORY_POOLS
346 		/* keep track of where the element was allocated from. This
347 		 * is done out of the lock so that the system really allocates
348 		 * the data without harming other threads waiting on the lock.
349 		 */
350 		if (ptr)
351 			*POOL_LINK(pool, ptr) = (void *)pool;
352 #endif
353 		HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
354 		if (!ptr) {
355 			pool->failed++;
356 			if (failed) {
357 				activity[tid].pool_fail++;
358 				return NULL;
359 			}
360 			failed++;
361 			pool_gc(pool);
362 			continue;
363 		}
364 		if (++pool->allocated > avail)
365 			break;
366 
367 		*POOL_LINK(pool, ptr) = (void *)pool->free_list;
368 		pool->free_list = ptr;
369 	}
370 	pool->used++;
371 	return ptr;
372 }
pool_refill_alloc(struct pool_head * pool,unsigned int avail)373 void *pool_refill_alloc(struct pool_head *pool, unsigned int avail)
374 {
375 	void *ptr;
376 
377 	HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
378 	ptr = __pool_refill_alloc(pool, avail);
379 	HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
380 	return ptr;
381 }
382 /*
383  * This function frees whatever can be freed in pool <pool>.
384  */
pool_flush(struct pool_head * pool)385 void pool_flush(struct pool_head *pool)
386 {
387 	void *temp, **next;
388 
389 	if (!pool)
390 		return;
391 
392 	HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
393 	next = pool->free_list;
394 	while (next) {
395 		temp = next;
396 		next = *POOL_LINK(pool, temp);
397 		pool->allocated--;
398 	}
399 
400 	next = pool->free_list;
401 	pool->free_list = NULL;
402 	HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
403 
404 	while (next) {
405 		temp = next;
406 		next = *POOL_LINK(pool, temp);
407 		pool_free_area(temp, pool->size + POOL_EXTRA);
408 	}
409 	/* here, we should have pool->allocated == pool->used */
410 }
411 
412 /*
413  * This function frees whatever can be freed in all pools, but respecting
414  * the minimum thresholds imposed by owners. It makes sure to be alone to
415  * run by using thread_isolate(). <pool_ctx> is unused.
416  */
pool_gc(struct pool_head * pool_ctx)417 void pool_gc(struct pool_head *pool_ctx)
418 {
419 	struct pool_head *entry;
420 	int isolated = thread_isolated();
421 
422 	if (!isolated)
423 		thread_isolate();
424 
425 	list_for_each_entry(entry, &pools, list) {
426 		void *temp;
427 		//qfprintf(stderr, "Flushing pool %s\n", entry->name);
428 		while (entry->free_list &&
429 		       (int)(entry->allocated - entry->used) > (int)entry->minavail) {
430 			temp = entry->free_list;
431 			entry->free_list = *POOL_LINK(entry, temp);
432 			entry->allocated--;
433 			pool_free_area(temp, entry->size + POOL_EXTRA);
434 		}
435 	}
436 
437 	if (!isolated)
438 		thread_release();
439 }
440 #endif
441 
442 /*
443  * This function destroys a pool by freeing it completely, unless it's still
444  * in use. This should be called only under extreme circumstances. It always
445  * returns NULL if the resulting pool is empty, easing the clearing of the old
446  * pointer, otherwise it returns the pool.
447  * .
448  */
pool_destroy(struct pool_head * pool)449 void *pool_destroy(struct pool_head *pool)
450 {
451 	if (pool) {
452 		pool_flush(pool);
453 		if (pool->used)
454 			return pool;
455 		pool->users--;
456 		if (!pool->users) {
457 			LIST_DEL(&pool->list);
458 #ifndef CONFIG_HAP_LOCKLESS_POOLS
459 			HA_SPIN_DESTROY(&pool->lock);
460 #endif
461 
462 #ifdef CONFIG_HAP_LOCAL_POOLS
463 			if ((pool - pool_base_start) < MAX_BASE_POOLS)
464 				memset(pool, 0, sizeof(*pool));
465 			else
466 #endif
467 				free(pool);
468 		}
469 	}
470 	return NULL;
471 }
472 
473 /* This destroys all pools on exit. It is *not* thread safe. */
pool_destroy_all()474 void pool_destroy_all()
475 {
476 	struct pool_head *entry, *back;
477 
478 	list_for_each_entry_safe(entry, back, &pools, list)
479 		pool_destroy(entry);
480 }
481 
482 /* This function dumps memory usage information into the trash buffer. */
dump_pools_to_trash()483 void dump_pools_to_trash()
484 {
485 	struct pool_head *entry;
486 	unsigned long allocated, used;
487 	int nbpools;
488 
489 	allocated = used = nbpools = 0;
490 	chunk_printf(&trash, "Dumping pools usage. Use SIGQUIT to flush them.\n");
491 	list_for_each_entry(entry, &pools, list) {
492 #ifndef CONFIG_HAP_LOCKLESS_POOLS
493 		HA_SPIN_LOCK(POOL_LOCK, &entry->lock);
494 #endif
495 		chunk_appendf(&trash, "  - Pool %s (%u bytes) : %u allocated (%u bytes), %u used, needed_avg %u, %u failures, %u users, @%p=%02d%s\n",
496 			 entry->name, entry->size, entry->allocated,
497 		         entry->size * entry->allocated, entry->used,
498 		         swrate_avg(entry->needed_avg, POOL_AVG_SAMPLES), entry->failed,
499 			 entry->users, entry, (int)pool_get_index(entry),
500 			 (entry->flags & MEM_F_SHARED) ? " [SHARED]" : "");
501 
502 		allocated += entry->allocated * entry->size;
503 		used += entry->used * entry->size;
504 		nbpools++;
505 #ifndef CONFIG_HAP_LOCKLESS_POOLS
506 		HA_SPIN_UNLOCK(POOL_LOCK, &entry->lock);
507 #endif
508 	}
509 	chunk_appendf(&trash, "Total: %d pools, %lu bytes allocated, %lu used.\n",
510 		 nbpools, allocated, used);
511 }
512 
513 /* Dump statistics on pools usage. */
dump_pools(void)514 void dump_pools(void)
515 {
516 	dump_pools_to_trash();
517 	qfprintf(stderr, "%s", trash.area);
518 }
519 
520 /* This function returns the total number of failed pool allocations */
pool_total_failures()521 int pool_total_failures()
522 {
523 	struct pool_head *entry;
524 	int failed = 0;
525 
526 	list_for_each_entry(entry, &pools, list)
527 		failed += entry->failed;
528 	return failed;
529 }
530 
531 /* This function returns the total amount of memory allocated in pools (in bytes) */
pool_total_allocated()532 unsigned long pool_total_allocated()
533 {
534 	struct pool_head *entry;
535 	unsigned long allocated = 0;
536 
537 	list_for_each_entry(entry, &pools, list)
538 		allocated += entry->allocated * entry->size;
539 	return allocated;
540 }
541 
542 /* This function returns the total amount of memory used in pools (in bytes) */
pool_total_used()543 unsigned long pool_total_used()
544 {
545 	struct pool_head *entry;
546 	unsigned long used = 0;
547 
548 	list_for_each_entry(entry, &pools, list)
549 		used += entry->used * entry->size;
550 	return used;
551 }
552 
553 /* This function dumps memory usage information onto the stream interface's
554  * read buffer. It returns 0 as long as it does not complete, non-zero upon
555  * completion. No state is used.
556  */
cli_io_handler_dump_pools(struct appctx * appctx)557 static int cli_io_handler_dump_pools(struct appctx *appctx)
558 {
559 	struct stream_interface *si = appctx->owner;
560 
561 	dump_pools_to_trash();
562 	if (ci_putchk(si_ic(si), &trash) == -1) {
563 		si_rx_room_blk(si);
564 		return 0;
565 	}
566 	return 1;
567 }
568 
569 /* callback used to create early pool <name> of size <size> and store the
570  * resulting pointer into <ptr>. If the allocation fails, it quits with after
571  * emitting an error message.
572  */
create_pool_callback(struct pool_head ** ptr,char * name,unsigned int size)573 void create_pool_callback(struct pool_head **ptr, char *name, unsigned int size)
574 {
575 	*ptr = create_pool(name, size, MEM_F_SHARED);
576 	if (!*ptr) {
577 		ha_alert("Failed to allocate pool '%s' of size %u : %s. Aborting.\n",
578 			 name, size, strerror(errno));
579 		exit(1);
580 	}
581 }
582 
583 /* Initializes all per-thread arrays on startup */
init_pools()584 static void init_pools()
585 {
586 #ifdef CONFIG_HAP_LOCAL_POOLS
587 	int thr, idx;
588 
589 	for (thr = 0; thr < MAX_THREADS; thr++) {
590 		for (idx = 0; idx < MAX_BASE_POOLS; idx++) {
591 			LIST_INIT(&pool_cache[thr][idx].list);
592 			pool_cache[thr][idx].size = 0;
593 		}
594 		LIST_INIT(&ha_thread_info[thr].pool_lru_head);
595 	}
596 #endif
597 }
598 
599 INITCALL0(STG_PREPARE, init_pools);
600 
601 /* register cli keywords */
602 static struct cli_kw_list cli_kws = {{ },{
603 	{ { "show", "pools",  NULL }, "show pools     : report information about the memory pools usage", NULL, cli_io_handler_dump_pools },
604 	{{},}
605 }};
606 
607 INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
608 
609 #ifdef DEBUG_FAIL_ALLOC
610 #define MEM_FAIL_MAX_CHAR 32
611 #define MEM_FAIL_MAX_STR 128
612 static int mem_fail_cur_idx;
613 static char mem_fail_str[MEM_FAIL_MAX_CHAR * MEM_FAIL_MAX_STR];
614 __decl_thread(static HA_SPINLOCK_T mem_fail_lock);
615 
mem_should_fail(const struct pool_head * pool)616 int mem_should_fail(const struct pool_head *pool)
617 {
618 	int ret = 0;
619 	int n;
620 
621 	if (mem_fail_rate > 0 && !(global.mode & MODE_STARTING)) {
622 		int randnb = ha_random() % 100;
623 
624 		if (mem_fail_rate > randnb)
625 			ret = 1;
626 		else
627 			ret = 0;
628 	}
629 	HA_SPIN_LOCK(POOL_LOCK, &mem_fail_lock);
630 	n = snprintf(&mem_fail_str[mem_fail_cur_idx * MEM_FAIL_MAX_CHAR],
631 	    MEM_FAIL_MAX_CHAR - 2,
632 	    "%d %.18s %d %d", mem_fail_cur_idx, pool->name, ret, tid);
633 	while (n < MEM_FAIL_MAX_CHAR - 1)
634 		mem_fail_str[mem_fail_cur_idx * MEM_FAIL_MAX_CHAR + n++] = ' ';
635 	if (mem_fail_cur_idx < MEM_FAIL_MAX_STR - 1)
636 		mem_fail_str[mem_fail_cur_idx * MEM_FAIL_MAX_CHAR + n] = '\n';
637 	else
638 		mem_fail_str[mem_fail_cur_idx * MEM_FAIL_MAX_CHAR + n] = 0;
639 	mem_fail_cur_idx++;
640 	if (mem_fail_cur_idx == MEM_FAIL_MAX_STR)
641 		mem_fail_cur_idx = 0;
642 	HA_SPIN_UNLOCK(POOL_LOCK, &mem_fail_lock);
643 	return ret;
644 
645 }
646 
647 /* config parser for global "tune.fail-alloc" */
mem_parse_global_fail_alloc(char ** args,int section_type,struct proxy * curpx,struct proxy * defpx,const char * file,int line,char ** err)648 static int mem_parse_global_fail_alloc(char **args, int section_type, struct proxy *curpx,
649                                       struct proxy *defpx, const char *file, int line,
650                                       char **err)
651 {
652 	if (too_many_args(1, args, err, NULL))
653 		return -1;
654 	mem_fail_rate = atoi(args[1]);
655 	if (mem_fail_rate < 0 || mem_fail_rate > 100) {
656 	    memprintf(err, "'%s' expects a numeric value between 0 and 100.", args[0]);
657 	    return -1;
658 	}
659 	return 0;
660 }
661 #endif
662 
663 /* register global config keywords */
664 static struct cfg_kw_list mem_cfg_kws = {ILH, {
665 #ifdef DEBUG_FAIL_ALLOC
666 	{ CFG_GLOBAL, "tune.fail-alloc", mem_parse_global_fail_alloc },
667 #endif
668 	{ 0, NULL, NULL }
669 }};
670 
671 INITCALL1(STG_REGISTER, cfg_register_keywords, &mem_cfg_kws);
672 
673 /*
674  * Local variables:
675  *  c-indent-level: 8
676  *  c-basic-offset: 8
677  * End:
678  */
679