1 /*
2 ** Zabbix
3 ** Copyright (C) 2001-2021 Zabbix SIA
4 **
5 ** This program is free software; you can redistribute it and/or modify
6 ** it under the terms of the GNU General Public License as published by
7 ** the Free Software Foundation; either version 2 of the License, or
8 ** (at your option) any later version.
9 **
10 ** This program is distributed in the hope that it will be useful,
11 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
12 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 ** GNU General Public License for more details.
14 **
15 ** You should have received a copy of the GNU General Public License
16 ** along with this program; if not, write to the Free Software
17 ** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
18 **/
19 
20 #include "common.h"
21 #include "mutexs.h"
22 #include "log.h"
23 
24 #include "memalloc.h"
25 
26 /******************************************************************************
27  *                                                                            *
28  *                     Some information on memory layout                      *
29  *                  ---------------------------------------                   *
30  *                                                                            *
31  *                                                                            *
32  * (*) chunk: a contiguous piece of memory that is either free or used        *
33  *                                                                            *
34  *                                                                            *
35  *                    +-------- size of + --------------+                     *
36  *                    |       (8 bytes) |               |                     *
37  *                    |                 v               |                     *
38  *                    |                                 |                     *
39  *                    |    +- allocatable memory --+    |                     *
40  *                    |    | (user data goes here) |    |                     *
41  *                    v    v                       v    v                     *
42  *                                                                            *
43  *                |--------|----------------...----|--------|                 *
44  *                                                                            *
45  *                ^        ^                       ^        ^                 *
46  *            8-aligned    |                       |    8-aligned             *
47  *                                                                            *
48  *                     8-aligned               8-aligned                      *
49  *                                                                            *
50  *                                                                            *
51  *     when a chunk is used, `size' fields have MEM_FLG_USED bit set          *
52  *                                                                            *
53  *     when a chunk is free, the first 2 * ZBX_PTR_SIZE bytes of allocatable  *
54  *     memory contain pointers to the previous and next chunks, in that order *
55  *                                                                            *
56  *     notes:                                                                 *
57  *                                                                            *
58  *         - user data is nicely 8-aligned                                    *
59  *                                                                            *
60  *         - size is kept on both left and right ends for quick merging       *
61  *           (when freeing a chunk, we can quickly see if the previous        *
62  *           and next chunks are free, those will not have MEM_FLG_USED)      *
63  *                                                                            *
64  *                                                                            *
65  * (*) free chunks are stored in doubly-linked lists according to their sizes *
66  *                                                                            *
67  *     a typical situation is thus as follows (1 used chunk, 2 free chunks)   *
68  *                                                                            *
69  *                                                                            *
70  *  +--------------------------- shared memory ----------------------------+  *
71  *  |                         (can be misaligned)                          |  *
72  *  |                                                                      |  *
73  *  |                                                                      |  *
74  *  |  +------ chunk A ------+------ chunk B -----+------ chunk C ------+  |  *
75  *  |  |       (free)        |       (used)       |       (free)        |  |  *
76  *  |  |                     |                    |                     |  |  *
77  *  v  v                     v                    v                     v  v  *
78  *           prevnext              user data            prevnext              *
79  *  #--|----|--------...|----|----|---....---|----|----|--------...|----|--#  *
80  *           NULL  |                                     |  NULL              *
81  *     ^           |                              ^      |              ^     *
82  *     |           |                              |      |              |     *
83  *     |           +------------------------------+      |              |     *
84  *     |                                                 |              |     *
85  *     +-------------------------------------------------+              |     *
86  *     |                                                                |     *
87  *                                                                            *
88  *  lo_bound             `size' fields in chunk B                   hi_bound  *
89  *  (aligned)            have MEM_FLG_USED bit set                 (aligned)  *
90  *                                                                            *
91  ******************************************************************************/
92 
93 static void	*ALIGN4(void *ptr);
94 static void	*ALIGN8(void *ptr);
95 static void	*ALIGNPTR(void *ptr);
96 
97 static zbx_uint64_t	mem_proper_alloc_size(zbx_uint64_t size);
98 static int	mem_bucket_by_size(zbx_uint64_t size);
99 
100 static void	mem_set_chunk_size(void *chunk, zbx_uint64_t size);
101 static void	mem_set_used_chunk_size(void *chunk, zbx_uint64_t size);
102 
103 static void	*mem_get_prev_chunk(void *chunk);
104 static void	mem_set_prev_chunk(void *chunk, void *prev);
105 static void	*mem_get_next_chunk(void *chunk);
106 static void	mem_set_next_chunk(void *chunk, void *next);
107 static void	**mem_ptr_to_prev_field(void *chunk);
108 static void	**mem_ptr_to_next_field(void *chunk, void **first_chunk);
109 
110 static void	mem_link_chunk(zbx_mem_info_t *info, void *chunk);
111 static void	mem_unlink_chunk(zbx_mem_info_t *info, void *chunk);
112 
113 static void	*__mem_malloc(zbx_mem_info_t *info, zbx_uint64_t size);
114 static void	*__mem_realloc(zbx_mem_info_t *info, void *old, zbx_uint64_t size);
115 static void	__mem_free(zbx_mem_info_t *info, void *ptr);
116 
117 #define MEM_SIZE_FIELD		sizeof(zbx_uint64_t)
118 
119 #define MEM_FLG_USED		((__UINT64_C(1))<<63)
120 
121 #define FREE_CHUNK(ptr)		(((*(zbx_uint64_t *)(ptr)) & MEM_FLG_USED) == 0)
122 #define CHUNK_SIZE(ptr)		((*(zbx_uint64_t *)(ptr)) & ~MEM_FLG_USED)
123 
124 #define MEM_MIN_SIZE		__UINT64_C(128)
125 #define MEM_MAX_SIZE		__UINT64_C(0x1000000000)	/* 64 GB */
126 
127 #define MEM_MIN_ALLOC	24	/* should be a multiple of 8 and at least (2 * ZBX_PTR_SIZE) */
128 
129 #define MEM_MIN_BUCKET_SIZE	MEM_MIN_ALLOC
130 #define MEM_MAX_BUCKET_SIZE	256 /* starting from this size all free chunks are put into the same bucket */
131 #define MEM_BUCKET_COUNT	((MEM_MAX_BUCKET_SIZE - MEM_MIN_BUCKET_SIZE) / 8 + 1)
132 
133 /* helper functions */
134 
ALIGN4(void * ptr)135 static void	*ALIGN4(void *ptr)
136 {
137 	return (void *)((uintptr_t)((char *)ptr + 3) & (uintptr_t)~3);
138 }
139 
ALIGN8(void * ptr)140 static void	*ALIGN8(void *ptr)
141 {
142 	return (void *)((uintptr_t)((char *)ptr + 7) & (uintptr_t)~7);
143 }
144 
ALIGNPTR(void * ptr)145 static void	*ALIGNPTR(void *ptr)
146 {
147 	if (4 == ZBX_PTR_SIZE)
148 		return ALIGN4(ptr);
149 	if (8 == ZBX_PTR_SIZE)
150 		return ALIGN8(ptr);
151 	assert(0);
152 }
153 
mem_proper_alloc_size(zbx_uint64_t size)154 static zbx_uint64_t	mem_proper_alloc_size(zbx_uint64_t size)
155 {
156 	if (size >= MEM_MIN_ALLOC)
157 		return size + ((8 - (size & 7)) & 7);	/* allocate in multiples of 8... */
158 	else
159 		return MEM_MIN_ALLOC;			/* ...and at least MEM_MIN_ALLOC */
160 }
161 
mem_bucket_by_size(zbx_uint64_t size)162 static int	mem_bucket_by_size(zbx_uint64_t size)
163 {
164 	if (size < MEM_MIN_BUCKET_SIZE)
165 		return 0;
166 	if (size < MEM_MAX_BUCKET_SIZE)
167 		return (size - MEM_MIN_BUCKET_SIZE) >> 3;
168 	return MEM_BUCKET_COUNT - 1;
169 }
170 
mem_set_chunk_size(void * chunk,zbx_uint64_t size)171 static void	mem_set_chunk_size(void *chunk, zbx_uint64_t size)
172 {
173 	*(zbx_uint64_t *)chunk = size;
174 	*(zbx_uint64_t *)((char *)chunk + MEM_SIZE_FIELD + size) = size;
175 }
176 
mem_set_used_chunk_size(void * chunk,zbx_uint64_t size)177 static void	mem_set_used_chunk_size(void *chunk, zbx_uint64_t size)
178 {
179 	*(zbx_uint64_t *)chunk = MEM_FLG_USED | size;
180 	*(zbx_uint64_t *)((char *)chunk + MEM_SIZE_FIELD + size) = MEM_FLG_USED | size;
181 }
182 
mem_get_prev_chunk(void * chunk)183 static void	*mem_get_prev_chunk(void *chunk)
184 {
185 	return *(void **)((char *)chunk + MEM_SIZE_FIELD);
186 }
187 
mem_set_prev_chunk(void * chunk,void * prev)188 static void	mem_set_prev_chunk(void *chunk, void *prev)
189 {
190 	*(void **)((char *)chunk + MEM_SIZE_FIELD) = prev;
191 }
192 
mem_get_next_chunk(void * chunk)193 static void	*mem_get_next_chunk(void *chunk)
194 {
195 	return *(void **)((char *)chunk + MEM_SIZE_FIELD + ZBX_PTR_SIZE);
196 }
197 
mem_set_next_chunk(void * chunk,void * next)198 static void	mem_set_next_chunk(void *chunk, void *next)
199 {
200 	*(void **)((char *)chunk + MEM_SIZE_FIELD + ZBX_PTR_SIZE) = next;
201 }
202 
mem_ptr_to_prev_field(void * chunk)203 static void	**mem_ptr_to_prev_field(void *chunk)
204 {
205 	return (NULL != chunk ? (void **)((char *)chunk + MEM_SIZE_FIELD) : NULL);
206 }
207 
mem_ptr_to_next_field(void * chunk,void ** first_chunk)208 static void	**mem_ptr_to_next_field(void *chunk, void **first_chunk)
209 {
210 	return (NULL != chunk ? (void **)((char *)chunk + MEM_SIZE_FIELD + ZBX_PTR_SIZE) : first_chunk);
211 }
212 
mem_link_chunk(zbx_mem_info_t * info,void * chunk)213 static void	mem_link_chunk(zbx_mem_info_t *info, void *chunk)
214 {
215 	int	index;
216 
217 	index = mem_bucket_by_size(CHUNK_SIZE(chunk));
218 
219 	if (NULL != info->buckets[index])
220 		mem_set_prev_chunk(info->buckets[index], chunk);
221 
222 	mem_set_prev_chunk(chunk, NULL);
223 	mem_set_next_chunk(chunk, info->buckets[index]);
224 
225 	info->buckets[index] = chunk;
226 }
227 
mem_unlink_chunk(zbx_mem_info_t * info,void * chunk)228 static void	mem_unlink_chunk(zbx_mem_info_t *info, void *chunk)
229 {
230 	int	index;
231 	void	*prev_chunk, *next_chunk;
232 	void	**next_in_prev_chunk, **prev_in_next_chunk;
233 
234 	index = mem_bucket_by_size(CHUNK_SIZE(chunk));
235 
236 	prev_chunk = mem_get_prev_chunk(chunk);
237 	next_chunk = mem_get_next_chunk(chunk);
238 
239 	next_in_prev_chunk = mem_ptr_to_next_field(prev_chunk, &info->buckets[index]);
240 	prev_in_next_chunk = mem_ptr_to_prev_field(next_chunk);
241 
242 	*next_in_prev_chunk = next_chunk;
243 	if (NULL != prev_in_next_chunk)
244 		*prev_in_next_chunk = prev_chunk;
245 }
246 
247 /* private memory functions */
248 
__mem_malloc(zbx_mem_info_t * info,zbx_uint64_t size)249 static void	*__mem_malloc(zbx_mem_info_t *info, zbx_uint64_t size)
250 {
251 	int		index;
252 	void		*chunk;
253 	zbx_uint64_t	chunk_size;
254 
255 	size = mem_proper_alloc_size(size);
256 
257 	/* try to find an appropriate chunk in special buckets */
258 
259 	index = mem_bucket_by_size(size);
260 
261 	while (index < MEM_BUCKET_COUNT - 1 && NULL == info->buckets[index])
262 		index++;
263 
264 	chunk = info->buckets[index];
265 
266 	if (index == MEM_BUCKET_COUNT - 1)
267 	{
268 		/* otherwise, find a chunk big enough according to first-fit strategy */
269 
270 		int		counter = 0;
271 		zbx_uint64_t	skip_min = __UINT64_C(0xffffffffffffffff), skip_max = __UINT64_C(0);
272 
273 		while (NULL != chunk && CHUNK_SIZE(chunk) < size)
274 		{
275 			counter++;
276 			skip_min = MIN(skip_min, CHUNK_SIZE(chunk));
277 			skip_max = MAX(skip_max, CHUNK_SIZE(chunk));
278 			chunk = mem_get_next_chunk(chunk);
279 		}
280 
281 		/* don't log errors if malloc can return null in low memory situations */
282 		if (0 == info->allow_oom)
283 		{
284 			if (NULL == chunk)
285 			{
286 				zabbix_log(LOG_LEVEL_CRIT, "__mem_malloc: skipped %d asked " ZBX_FS_UI64 " skip_min "
287 						ZBX_FS_UI64 " skip_max " ZBX_FS_UI64,
288 						counter, size, skip_min, skip_max);
289 			}
290 			else if (counter >= 100)
291 			{
292 				zabbix_log(LOG_LEVEL_DEBUG, "__mem_malloc: skipped %d asked " ZBX_FS_UI64 " skip_min "
293 						ZBX_FS_UI64 " skip_max " ZBX_FS_UI64 " size " ZBX_FS_UI64, counter,
294 						size, skip_min, skip_max, CHUNK_SIZE(chunk));
295 			}
296 		}
297 	}
298 
299 	if (NULL == chunk)
300 		return NULL;
301 
302 	chunk_size = CHUNK_SIZE(chunk);
303 	mem_unlink_chunk(info, chunk);
304 
305 	/* either use the full chunk or split it */
306 
307 	if (chunk_size < size + 2 * MEM_SIZE_FIELD + MEM_MIN_ALLOC)
308 	{
309 		info->used_size += chunk_size;
310 		info->free_size -= chunk_size;
311 
312 		mem_set_used_chunk_size(chunk, chunk_size);
313 	}
314 	else
315 	{
316 		void		*new_chunk;
317 		zbx_uint64_t	new_chunk_size;
318 
319 		new_chunk = (void *)((char *)chunk + MEM_SIZE_FIELD + size + MEM_SIZE_FIELD);
320 		new_chunk_size = chunk_size - size - 2 * MEM_SIZE_FIELD;
321 		mem_set_chunk_size(new_chunk, new_chunk_size);
322 		mem_link_chunk(info, new_chunk);
323 
324 		info->used_size += size;
325 		info->free_size -= chunk_size;
326 		info->free_size += new_chunk_size;
327 
328 		mem_set_used_chunk_size(chunk, size);
329 	}
330 
331 	return chunk;
332 }
333 
__mem_realloc(zbx_mem_info_t * info,void * old,zbx_uint64_t size)334 static void	*__mem_realloc(zbx_mem_info_t *info, void *old, zbx_uint64_t size)
335 {
336 	void		*chunk, *new_chunk, *next_chunk;
337 	zbx_uint64_t	chunk_size, new_chunk_size;
338 	int		next_free;
339 
340 	size = mem_proper_alloc_size(size);
341 
342 	chunk = (void *)((char *)old - MEM_SIZE_FIELD);
343 	chunk_size = CHUNK_SIZE(chunk);
344 
345 	next_chunk = (void *)((char *)chunk + MEM_SIZE_FIELD + chunk_size + MEM_SIZE_FIELD);
346 	next_free = (next_chunk < info->hi_bound && FREE_CHUNK(next_chunk));
347 
348 	if (size <= chunk_size)
349 	{
350 		/* do not reallocate if not much is freed */
351 		/* we are likely to want more memory again */
352 		if (size > chunk_size / 4)
353 			return chunk;
354 
355 		if (next_free)
356 		{
357 			/* merge with next chunk */
358 
359 			info->used_size -= chunk_size - size;
360 			info->free_size += chunk_size - size;
361 
362 			new_chunk = (void *)((char *)chunk + MEM_SIZE_FIELD + size + MEM_SIZE_FIELD);
363 			new_chunk_size = CHUNK_SIZE(next_chunk) + (chunk_size - size);
364 
365 			mem_unlink_chunk(info, next_chunk);
366 
367 			mem_set_chunk_size(new_chunk, new_chunk_size);
368 			mem_link_chunk(info, new_chunk);
369 
370 			mem_set_used_chunk_size(chunk, size);
371 		}
372 		else
373 		{
374 			/* split the current one */
375 
376 			info->used_size -= chunk_size - size;
377 			info->free_size += chunk_size - size - 2 * MEM_SIZE_FIELD;
378 
379 			new_chunk = (void *)((char *)chunk + MEM_SIZE_FIELD + size + MEM_SIZE_FIELD);
380 			new_chunk_size = chunk_size - size - 2 * MEM_SIZE_FIELD;
381 
382 			mem_set_chunk_size(new_chunk, new_chunk_size);
383 			mem_link_chunk(info, new_chunk);
384 
385 			mem_set_used_chunk_size(chunk, size);
386 		}
387 
388 		return chunk;
389 	}
390 
391 	if (next_free && chunk_size + 2 * MEM_SIZE_FIELD + CHUNK_SIZE(next_chunk) >= size)
392 	{
393 		info->used_size -= chunk_size;
394 		info->free_size += chunk_size + 2 * MEM_SIZE_FIELD;
395 
396 		chunk_size += 2 * MEM_SIZE_FIELD + CHUNK_SIZE(next_chunk);
397 
398 		mem_unlink_chunk(info, next_chunk);
399 
400 		/* either use the full next_chunk or split it */
401 
402 		if (chunk_size < size + 2 * MEM_SIZE_FIELD + MEM_MIN_ALLOC)
403 		{
404 			info->used_size += chunk_size;
405 			info->free_size -= chunk_size;
406 
407 			mem_set_used_chunk_size(chunk, chunk_size);
408 		}
409 		else
410 		{
411 			new_chunk = (void *)((char *)chunk + MEM_SIZE_FIELD + size + MEM_SIZE_FIELD);
412 			new_chunk_size = chunk_size - size - 2 * MEM_SIZE_FIELD;
413 			mem_set_chunk_size(new_chunk, new_chunk_size);
414 			mem_link_chunk(info, new_chunk);
415 
416 			info->used_size += size;
417 			info->free_size -= chunk_size;
418 			info->free_size += new_chunk_size;
419 
420 			mem_set_used_chunk_size(chunk, size);
421 		}
422 
423 		return chunk;
424 	}
425 	else if (NULL != (new_chunk = __mem_malloc(info, size)))
426 	{
427 		memcpy((char *)new_chunk + MEM_SIZE_FIELD, (char *)chunk + MEM_SIZE_FIELD, chunk_size);
428 
429 		__mem_free(info, old);
430 
431 		return new_chunk;
432 	}
433 	else
434 	{
435 		void	*tmp = NULL;
436 
437 		/* check if there would be enough space if the current chunk */
438 		/* would be freed before allocating a new one                */
439 		new_chunk_size = chunk_size;
440 
441 		if (0 != next_free)
442 			new_chunk_size += CHUNK_SIZE(next_chunk) + 2 * MEM_SIZE_FIELD;
443 
444 		if (info->lo_bound < chunk && FREE_CHUNK((char *)chunk - MEM_SIZE_FIELD))
445 			new_chunk_size += CHUNK_SIZE((char *)chunk - MEM_SIZE_FIELD) + 2 * MEM_SIZE_FIELD;
446 
447 		if (size > new_chunk_size)
448 			return NULL;
449 
450 		tmp = zbx_malloc(tmp, chunk_size);
451 
452 		memcpy(tmp, (char *)chunk + MEM_SIZE_FIELD, chunk_size);
453 
454 		__mem_free(info, old);
455 
456 		if (NULL == (new_chunk = __mem_malloc(info, size)))
457 		{
458 			THIS_SHOULD_NEVER_HAPPEN;
459 			exit(EXIT_FAILURE);
460 		}
461 
462 		memcpy((char *)new_chunk + MEM_SIZE_FIELD, tmp, chunk_size);
463 
464 		zbx_free(tmp);
465 
466 		return new_chunk;
467 	}
468 }
469 
__mem_free(zbx_mem_info_t * info,void * ptr)470 static void	__mem_free(zbx_mem_info_t *info, void *ptr)
471 {
472 	void		*chunk;
473 	void		*prev_chunk, *next_chunk;
474 	zbx_uint64_t	chunk_size;
475 	int		prev_free, next_free;
476 
477 	chunk = (void *)((char *)ptr - MEM_SIZE_FIELD);
478 	chunk_size = CHUNK_SIZE(chunk);
479 
480 	info->used_size -= chunk_size;
481 	info->free_size += chunk_size;
482 
483 	/* see if we can merge with previous and next chunks */
484 
485 	next_chunk = (void *)((char *)chunk + MEM_SIZE_FIELD + chunk_size + MEM_SIZE_FIELD);
486 
487 	prev_free = (info->lo_bound < chunk && FREE_CHUNK((char *)chunk - MEM_SIZE_FIELD));
488 	next_free = (next_chunk < info->hi_bound && FREE_CHUNK(next_chunk));
489 
490 	if (prev_free && next_free)
491 	{
492 		info->free_size += 4 * MEM_SIZE_FIELD;
493 
494 		prev_chunk = (char *)chunk - MEM_SIZE_FIELD - CHUNK_SIZE((char *)chunk - MEM_SIZE_FIELD) -
495 				MEM_SIZE_FIELD;
496 
497 		chunk_size += 4 * MEM_SIZE_FIELD + CHUNK_SIZE(prev_chunk) + CHUNK_SIZE(next_chunk);
498 
499 		mem_unlink_chunk(info, prev_chunk);
500 		mem_unlink_chunk(info, next_chunk);
501 
502 		chunk = prev_chunk;
503 		mem_set_chunk_size(chunk, chunk_size);
504 		mem_link_chunk(info, chunk);
505 	}
506 	else if (prev_free)
507 	{
508 		info->free_size += 2 * MEM_SIZE_FIELD;
509 
510 		prev_chunk = (void *)((char *)chunk - MEM_SIZE_FIELD - CHUNK_SIZE((char *)chunk - MEM_SIZE_FIELD) -
511 				MEM_SIZE_FIELD);
512 
513 		chunk_size += 2 * MEM_SIZE_FIELD + CHUNK_SIZE(prev_chunk);
514 
515 		mem_unlink_chunk(info, prev_chunk);
516 
517 		chunk = prev_chunk;
518 		mem_set_chunk_size(chunk, chunk_size);
519 		mem_link_chunk(info, chunk);
520 	}
521 	else if (next_free)
522 	{
523 		info->free_size += 2 * MEM_SIZE_FIELD;
524 
525 		chunk_size += 2 * MEM_SIZE_FIELD + CHUNK_SIZE(next_chunk);
526 
527 		mem_unlink_chunk(info, next_chunk);
528 
529 		mem_set_chunk_size(chunk, chunk_size);
530 		mem_link_chunk(info, chunk);
531 	}
532 	else
533 	{
534 		mem_set_chunk_size(chunk, chunk_size);
535 		mem_link_chunk(info, chunk);
536 	}
537 }
538 
539 /* public memory interface */
540 
zbx_mem_create(zbx_mem_info_t ** info,zbx_uint64_t size,const char * descr,const char * param,int allow_oom,char ** error)541 int	zbx_mem_create(zbx_mem_info_t **info, zbx_uint64_t size, const char *descr, const char *param, int allow_oom,
542 		char **error)
543 {
544 	const char		*__function_name = "zbx_mem_create";
545 
546 	int			shm_id, index, ret = FAIL;
547 	void			*base;
548 
549 	descr = ZBX_NULL2STR(descr);
550 	param = ZBX_NULL2STR(param);
551 
552 	zabbix_log(LOG_LEVEL_DEBUG, "In %s() param:'%s' size:" ZBX_FS_SIZE_T, __function_name, param,
553 			(zbx_fs_size_t)size);
554 
555 	/* allocate shared memory */
556 
557 	if (4 != ZBX_PTR_SIZE && 8 != ZBX_PTR_SIZE)
558 	{
559 		*error = zbx_dsprintf(*error, "failed assumption about pointer size (" ZBX_FS_SIZE_T " not in {4, 8})",
560 				(zbx_fs_size_t)ZBX_PTR_SIZE);
561 		goto out;
562 	}
563 
564 	if (!(MEM_MIN_SIZE <= size && size <= MEM_MAX_SIZE))
565 	{
566 		*error = zbx_dsprintf(*error, "requested size " ZBX_FS_SIZE_T " not within bounds [" ZBX_FS_UI64
567 				" <= size <= " ZBX_FS_UI64 "]", (zbx_fs_size_t)size, MEM_MIN_SIZE, MEM_MAX_SIZE);
568 		goto out;
569 	}
570 
571 	if (-1 == (shm_id = shmget(IPC_PRIVATE, size, 0600)))
572 	{
573 		*error = zbx_dsprintf(*error, "cannot get private shared memory of size " ZBX_FS_SIZE_T " for %s: %s",
574 				(zbx_fs_size_t)size, descr, zbx_strerror(errno));
575 		goto out;
576 	}
577 
578 	if ((void *)(-1) == (base = shmat(shm_id, NULL, 0)))
579 	{
580 		*error = zbx_dsprintf(*error, "cannot attach shared memory for %s: %s", descr, zbx_strerror(errno));
581 		goto out;
582 	}
583 
584 	if (-1 == shmctl(shm_id, IPC_RMID, NULL))
585 		zbx_error("cannot mark shared memory %d for destruction: %s", shm_id, zbx_strerror(errno));
586 
587 	ret = SUCCEED;
588 
589 	/* allocate zbx_mem_info_t structure, its buckets, and description inside shared memory */
590 
591 	*info = (zbx_mem_info_t *)ALIGN8(base);
592 	(*info)->shm_id = shm_id;
593 	(*info)->orig_size = size;
594 	size -= (char *)(*info + 1) - (char *)base;
595 
596 	base = (void *)(*info + 1);
597 
598 	(*info)->buckets = (void **)ALIGNPTR(base);
599 	memset((*info)->buckets, 0, MEM_BUCKET_COUNT * ZBX_PTR_SIZE);
600 	size -= (char *)((*info)->buckets + MEM_BUCKET_COUNT) - (char *)base;
601 	base = (void *)((*info)->buckets + MEM_BUCKET_COUNT);
602 
603 	zbx_strlcpy((char *)base, descr, size);
604 	(*info)->mem_descr = (char *)base;
605 	size -= strlen(descr) + 1;
606 	base = (void *)((char *)base + strlen(descr) + 1);
607 
608 	zbx_strlcpy((char *)base, param, size);
609 	(*info)->mem_param = (char *)base;
610 	size -= strlen(param) + 1;
611 	base = (void *)((char *)base + strlen(param) + 1);
612 
613 	(*info)->allow_oom = allow_oom;
614 
615 	/* prepare shared memory for further allocation by creating one big chunk */
616 	(*info)->lo_bound = ALIGN8(base);
617 	(*info)->hi_bound = ALIGN8((char *)base + size - 8);
618 
619 	(*info)->total_size = (zbx_uint64_t)((char *)((*info)->hi_bound) - (char *)((*info)->lo_bound) -
620 			2 * MEM_SIZE_FIELD);
621 
622 	index = mem_bucket_by_size((*info)->total_size);
623 	(*info)->buckets[index] = (*info)->lo_bound;
624 	mem_set_chunk_size((*info)->buckets[index], (*info)->total_size);
625 	mem_set_prev_chunk((*info)->buckets[index], NULL);
626 	mem_set_next_chunk((*info)->buckets[index], NULL);
627 
628 	(*info)->used_size = 0;
629 	(*info)->free_size = (*info)->total_size;
630 
631 	zabbix_log(LOG_LEVEL_DEBUG, "valid user addresses: [%p, %p] total size: " ZBX_FS_SIZE_T,
632 			(void *)((char *)(*info)->lo_bound + MEM_SIZE_FIELD),
633 			(void *)((char *)(*info)->hi_bound - MEM_SIZE_FIELD),
634 			(zbx_fs_size_t)(*info)->total_size);
635 out:
636 	zabbix_log(LOG_LEVEL_DEBUG, "End of %s()", __function_name);
637 
638 	return ret;
639 }
640 
__zbx_mem_malloc(const char * file,int line,zbx_mem_info_t * info,const void * old,size_t size)641 void	*__zbx_mem_malloc(const char *file, int line, zbx_mem_info_t *info, const void *old, size_t size)
642 {
643 	const char	*__function_name = "__zbx_mem_malloc";
644 
645 	void		*chunk;
646 
647 	if (NULL != old)
648 	{
649 		zabbix_log(LOG_LEVEL_CRIT, "[file:%s,line:%d] %s(): allocating already allocated memory",
650 				file, line, __function_name);
651 		exit(EXIT_FAILURE);
652 	}
653 
654 	if (0 == size || size > MEM_MAX_SIZE)
655 	{
656 		zabbix_log(LOG_LEVEL_CRIT, "[file:%s,line:%d] %s(): asking for a bad number of bytes (" ZBX_FS_SIZE_T
657 				")", file, line, __function_name, (zbx_fs_size_t)size);
658 		exit(EXIT_FAILURE);
659 	}
660 
661 	chunk = __mem_malloc(info, size);
662 
663 	if (NULL == chunk)
664 	{
665 		if (1 == info->allow_oom)
666 			return NULL;
667 
668 		zabbix_log(LOG_LEVEL_CRIT, "[file:%s,line:%d] %s(): out of memory (requested " ZBX_FS_SIZE_T " bytes)",
669 				file, line, __function_name, (zbx_fs_size_t)size);
670 		zabbix_log(LOG_LEVEL_CRIT, "[file:%s,line:%d] %s(): please increase %s configuration parameter",
671 				file, line, __function_name, info->mem_param);
672 		zbx_mem_dump_stats(LOG_LEVEL_CRIT, info);
673 		zbx_backtrace();
674 		exit(EXIT_FAILURE);
675 	}
676 
677 	return (void *)((char *)chunk + MEM_SIZE_FIELD);
678 }
679 
__zbx_mem_realloc(const char * file,int line,zbx_mem_info_t * info,void * old,size_t size)680 void	*__zbx_mem_realloc(const char *file, int line, zbx_mem_info_t *info, void *old, size_t size)
681 {
682 	const char	*__function_name = "__zbx_mem_realloc";
683 
684 	void		*chunk;
685 
686 	if (0 == size || size > MEM_MAX_SIZE)
687 	{
688 		zabbix_log(LOG_LEVEL_CRIT, "[file:%s,line:%d] %s(): asking for a bad number of bytes (" ZBX_FS_SIZE_T
689 				")", file, line, __function_name, (zbx_fs_size_t)size);
690 		exit(EXIT_FAILURE);
691 	}
692 
693 	if (NULL == old)
694 		chunk = __mem_malloc(info, size);
695 	else
696 		chunk = __mem_realloc(info, old, size);
697 
698 	if (NULL == chunk)
699 	{
700 		if (1 == info->allow_oom)
701 			return NULL;
702 
703 		zabbix_log(LOG_LEVEL_CRIT, "[file:%s,line:%d] %s(): out of memory (requested " ZBX_FS_SIZE_T " bytes)",
704 				file, line, __function_name, (zbx_fs_size_t)size);
705 		zabbix_log(LOG_LEVEL_CRIT, "[file:%s,line:%d] %s(): please increase %s configuration parameter",
706 				file, line, __function_name, info->mem_param);
707 		zbx_mem_dump_stats(LOG_LEVEL_CRIT, info);
708 		zbx_backtrace();
709 		exit(EXIT_FAILURE);
710 	}
711 
712 	return (void *)((char *)chunk + MEM_SIZE_FIELD);
713 }
714 
__zbx_mem_free(const char * file,int line,zbx_mem_info_t * info,void * ptr)715 void	__zbx_mem_free(const char *file, int line, zbx_mem_info_t *info, void *ptr)
716 {
717 	const char	*__function_name = "__zbx_mem_free";
718 
719 	if (NULL == ptr)
720 	{
721 		zabbix_log(LOG_LEVEL_CRIT, "[file:%s,line:%d] %s(): freeing a NULL pointer",
722 				file, line, __function_name);
723 		exit(EXIT_FAILURE);
724 	}
725 
726 	__mem_free(info, ptr);
727 }
728 
zbx_mem_clear(zbx_mem_info_t * info)729 void	zbx_mem_clear(zbx_mem_info_t *info)
730 {
731 	const char	*__function_name = "zbx_mem_clear";
732 
733 	int		index;
734 
735 	zabbix_log(LOG_LEVEL_DEBUG, "In %s()", __function_name);
736 
737 	memset(info->buckets, 0, MEM_BUCKET_COUNT * ZBX_PTR_SIZE);
738 	index = mem_bucket_by_size(info->total_size);
739 	info->buckets[index] = info->lo_bound;
740 	mem_set_chunk_size(info->buckets[index], info->total_size);
741 	mem_set_prev_chunk(info->buckets[index], NULL);
742 	mem_set_next_chunk(info->buckets[index], NULL);
743 	info->used_size = 0;
744 	info->free_size = info->total_size;
745 
746 	zabbix_log(LOG_LEVEL_DEBUG, "End of %s()", __function_name);
747 }
748 
zbx_mem_dump_stats(int level,zbx_mem_info_t * info)749 void	zbx_mem_dump_stats(int level, zbx_mem_info_t *info)
750 {
751 	void		*chunk;
752 	int		index;
753 	zbx_uint64_t	counter, total, overhead, total_free = 0;
754 	zbx_uint64_t	min_size = __UINT64_C(0xffffffffffffffff), max_size = __UINT64_C(0);
755 
756 	zabbix_log(level, "=== memory statistics for %s ===", info->mem_descr);
757 
758 	for (index = 0; index < MEM_BUCKET_COUNT; index++)
759 	{
760 		counter = 0;
761 		chunk = info->buckets[index];
762 
763 		while (NULL != chunk)
764 		{
765 			counter++;
766 			min_size = MIN(min_size, CHUNK_SIZE(chunk));
767 			max_size = MAX(max_size, CHUNK_SIZE(chunk));
768 			chunk = mem_get_next_chunk(chunk);
769 		}
770 
771 		if (counter > 0)
772 		{
773 			total_free += counter;
774 			zabbix_log(level, "free chunks of size %2s %3d bytes: %8llu",
775 					index == MEM_BUCKET_COUNT - 1 ? ">=" : "",
776 					MEM_MIN_BUCKET_SIZE + 8 * index, (unsigned long long)counter);
777 		}
778 	}
779 
780 	zabbix_log(level, "min chunk size: %10llu bytes", (unsigned long long)min_size);
781 	zabbix_log(level, "max chunk size: %10llu bytes", (unsigned long long)max_size);
782 
783 	overhead = info->total_size - info->used_size - info->free_size;
784 	total = overhead / (2 * MEM_SIZE_FIELD) + 1;
785 	zabbix_log(level, "memory of total size %llu bytes fragmented into %llu chunks",
786 			(unsigned long long)info->total_size, (unsigned long long)total);
787 	zabbix_log(level, "of those, %10llu bytes are in %8llu free chunks",
788 			(unsigned long long)info->free_size, (unsigned long long)total_free);
789 	zabbix_log(level, "of those, %10llu bytes are in %8llu used chunks",
790 			(unsigned long long)info->used_size, (unsigned long long)(total - total_free));
791 	zabbix_log(level, "of those, %10llu bytes are used by allocation overhead", (unsigned long long)overhead);
792 
793 	zabbix_log(level, "================================");
794 }
795 
zbx_mem_required_size(int chunks_num,const char * descr,const char * param)796 size_t	zbx_mem_required_size(int chunks_num, const char *descr, const char *param)
797 {
798 	const char	*__function_name = "zbx_mem_required_size";
799 
800 	size_t		size = 0;
801 
802 	zabbix_log(LOG_LEVEL_DEBUG, "In %s() size:" ZBX_FS_SIZE_T " chunks_num:%d descr:'%s' param:'%s'",
803 			__function_name, (zbx_fs_size_t)size, chunks_num, descr, param);
804 
805 	/* shared memory of what size should we allocate so that there is a guarantee */
806 	/* that we will be able to get ourselves 'chunks_num' pieces of memory with a */
807 	/* total size of 'size', given that we also have to store 'descr' and 'param'? */
808 
809 	size += 7;					/* ensure we allocate enough to 8-align zbx_mem_info_t */
810 	size += sizeof(zbx_mem_info_t);
811 	size += ZBX_PTR_SIZE - 1;			/* ensure we allocate enough to align bucket pointers */
812 	size += ZBX_PTR_SIZE * MEM_BUCKET_COUNT;
813 	size += strlen(descr) + 1;
814 	size += strlen(param) + 1;
815 	size += (MEM_SIZE_FIELD - 1) + 8;		/* ensure we allocate enough to align the first chunk */
816 	size += (MEM_SIZE_FIELD - 1) + 8;		/* ensure we allocate enough to align right size field */
817 
818 	size += (chunks_num - 1) * MEM_SIZE_FIELD * 2;	/* each additional chunk requires 16 bytes of overhead */
819 	size += chunks_num * (MEM_MIN_ALLOC - 1);	/* each chunk has size of at least MEM_MIN_ALLOC bytes */
820 
821 	zabbix_log(LOG_LEVEL_DEBUG, "End of %s() size:" ZBX_FS_SIZE_T, __function_name, (zbx_fs_size_t)size);
822 
823 	return size;
824 }
825 
zbx_mem_required_chunk_size(zbx_uint64_t size)826 zbx_uint64_t	zbx_mem_required_chunk_size(zbx_uint64_t size)
827 {
828 	if (0 == size)
829 		return 0;
830 
831 	return mem_proper_alloc_size(size) + MEM_SIZE_FIELD * 2;
832 }
833