1 /*
2 ** Zabbix
3 ** Copyright (C) 2001-2021 Zabbix SIA
4 **
5 ** This program is free software; you can redistribute it and/or modify
6 ** it under the terms of the GNU General Public License as published by
7 ** the Free Software Foundation; either version 2 of the License, or
8 ** (at your option) any later version.
9 **
10 ** This program is distributed in the hope that it will be useful,
11 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
12 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 ** GNU General Public License for more details.
14 **
15 ** You should have received a copy of the GNU General Public License
16 ** along with this program; if not, write to the Free Software
17 ** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 **/
19
20 #include "common.h"
21 #include "mutexs.h"
22 #include "log.h"
23
24 #include "memalloc.h"
25
26 /******************************************************************************
27 * *
28 * Some information on memory layout *
29 * --------------------------------------- *
30 * *
31 * *
32 * (*) chunk: a contiguous piece of memory that is either free or used *
33 * *
34 * *
35 * +-------- size of + --------------+ *
36 * | (8 bytes) | | *
37 * | v | *
38 * | | *
39 * | +- allocatable memory --+ | *
40 * | | (user data goes here) | | *
41 * v v v v *
42 * *
43 * |--------|----------------...----|--------| *
44 * *
45 * ^ ^ ^ ^ *
46 * 8-aligned | | 8-aligned *
47 * *
48 * 8-aligned 8-aligned *
49 * *
50 * *
51 * when a chunk is used, `size' fields have MEM_FLG_USED bit set *
52 * *
53 * when a chunk is free, the first 2 * ZBX_PTR_SIZE bytes of allocatable *
54 * memory contain pointers to the previous and next chunks, in that order *
55 * *
56 * notes: *
57 * *
58 * - user data is nicely 8-aligned *
59 * *
60 * - size is kept on both left and right ends for quick merging *
61 * (when freeing a chunk, we can quickly see if the previous *
62 * and next chunks are free, those will not have MEM_FLG_USED) *
63 * *
64 * *
65 * (*) free chunks are stored in doubly-linked lists according to their sizes *
66 * *
67 * a typical situation is thus as follows (1 used chunk, 2 free chunks) *
68 * *
69 * *
70 * +--------------------------- shared memory ----------------------------+ *
71 * | (can be misaligned) | *
72 * | | *
73 * | | *
74 * | +------ chunk A ------+------ chunk B -----+------ chunk C ------+ | *
75 * | | (free) | (used) | (free) | | *
76 * | | | | | | *
77 * v v v v v v *
78 * prevnext user data prevnext *
79 * #--|----|--------...|----|----|---....---|----|----|--------...|----|--# *
80 * NULL | | NULL *
81 * ^ | ^ | ^ *
82 * | | | | | *
83 * | +------------------------------+ | | *
84 * | | | *
85 * +-------------------------------------------------+ | *
86 * | | *
87 * *
88 * lo_bound `size' fields in chunk B hi_bound *
89 * (aligned) have MEM_FLG_USED bit set (aligned) *
90 * *
91 ******************************************************************************/
92
93 static void *ALIGN4(void *ptr);
94 static void *ALIGN8(void *ptr);
95 static void *ALIGNPTR(void *ptr);
96
97 static zbx_uint64_t mem_proper_alloc_size(zbx_uint64_t size);
98 static int mem_bucket_by_size(zbx_uint64_t size);
99
100 static void mem_set_chunk_size(void *chunk, zbx_uint64_t size);
101 static void mem_set_used_chunk_size(void *chunk, zbx_uint64_t size);
102
103 static void *mem_get_prev_chunk(void *chunk);
104 static void mem_set_prev_chunk(void *chunk, void *prev);
105 static void *mem_get_next_chunk(void *chunk);
106 static void mem_set_next_chunk(void *chunk, void *next);
107 static void **mem_ptr_to_prev_field(void *chunk);
108 static void **mem_ptr_to_next_field(void *chunk, void **first_chunk);
109
110 static void mem_link_chunk(zbx_mem_info_t *info, void *chunk);
111 static void mem_unlink_chunk(zbx_mem_info_t *info, void *chunk);
112
113 static void *__mem_malloc(zbx_mem_info_t *info, zbx_uint64_t size);
114 static void *__mem_realloc(zbx_mem_info_t *info, void *old, zbx_uint64_t size);
115 static void __mem_free(zbx_mem_info_t *info, void *ptr);
116
117 #define MEM_SIZE_FIELD sizeof(zbx_uint64_t)
118
119 #define MEM_FLG_USED ((__UINT64_C(1))<<63)
120
121 #define FREE_CHUNK(ptr) (((*(zbx_uint64_t *)(ptr)) & MEM_FLG_USED) == 0)
122 #define CHUNK_SIZE(ptr) ((*(zbx_uint64_t *)(ptr)) & ~MEM_FLG_USED)
123
124 #define MEM_MIN_SIZE __UINT64_C(128)
125 #define MEM_MAX_SIZE __UINT64_C(0x1000000000) /* 64 GB */
126
127 /* helper functions */
128
ALIGN4(void * ptr)129 static void *ALIGN4(void *ptr)
130 {
131 return (void *)((uintptr_t)((char *)ptr + 3) & (uintptr_t)~3);
132 }
133
ALIGN8(void * ptr)134 static void *ALIGN8(void *ptr)
135 {
136 return (void *)((uintptr_t)((char *)ptr + 7) & (uintptr_t)~7);
137 }
138
ALIGNPTR(void * ptr)139 static void *ALIGNPTR(void *ptr)
140 {
141 if (4 == ZBX_PTR_SIZE)
142 return ALIGN4(ptr);
143 if (8 == ZBX_PTR_SIZE)
144 return ALIGN8(ptr);
145 assert(0);
146 }
147
mem_proper_alloc_size(zbx_uint64_t size)148 static zbx_uint64_t mem_proper_alloc_size(zbx_uint64_t size)
149 {
150 if (size >= MEM_MIN_ALLOC)
151 return size + ((8 - (size & 7)) & 7); /* allocate in multiples of 8... */
152 else
153 return MEM_MIN_ALLOC; /* ...and at least MEM_MIN_ALLOC */
154 }
155
mem_bucket_by_size(zbx_uint64_t size)156 static int mem_bucket_by_size(zbx_uint64_t size)
157 {
158 if (size < MEM_MIN_BUCKET_SIZE)
159 return 0;
160 if (size < MEM_MAX_BUCKET_SIZE)
161 return (size - MEM_MIN_BUCKET_SIZE) >> 3;
162 return MEM_BUCKET_COUNT - 1;
163 }
164
mem_set_chunk_size(void * chunk,zbx_uint64_t size)165 static void mem_set_chunk_size(void *chunk, zbx_uint64_t size)
166 {
167 *(zbx_uint64_t *)chunk = size;
168 *(zbx_uint64_t *)((char *)chunk + MEM_SIZE_FIELD + size) = size;
169 }
170
mem_set_used_chunk_size(void * chunk,zbx_uint64_t size)171 static void mem_set_used_chunk_size(void *chunk, zbx_uint64_t size)
172 {
173 *(zbx_uint64_t *)chunk = MEM_FLG_USED | size;
174 *(zbx_uint64_t *)((char *)chunk + MEM_SIZE_FIELD + size) = MEM_FLG_USED | size;
175 }
176
mem_get_prev_chunk(void * chunk)177 static void *mem_get_prev_chunk(void *chunk)
178 {
179 return *(void **)((char *)chunk + MEM_SIZE_FIELD);
180 }
181
mem_set_prev_chunk(void * chunk,void * prev)182 static void mem_set_prev_chunk(void *chunk, void *prev)
183 {
184 *(void **)((char *)chunk + MEM_SIZE_FIELD) = prev;
185 }
186
mem_get_next_chunk(void * chunk)187 static void *mem_get_next_chunk(void *chunk)
188 {
189 return *(void **)((char *)chunk + MEM_SIZE_FIELD + ZBX_PTR_SIZE);
190 }
191
mem_set_next_chunk(void * chunk,void * next)192 static void mem_set_next_chunk(void *chunk, void *next)
193 {
194 *(void **)((char *)chunk + MEM_SIZE_FIELD + ZBX_PTR_SIZE) = next;
195 }
196
mem_ptr_to_prev_field(void * chunk)197 static void **mem_ptr_to_prev_field(void *chunk)
198 {
199 return (NULL != chunk ? (void **)((char *)chunk + MEM_SIZE_FIELD) : NULL);
200 }
201
mem_ptr_to_next_field(void * chunk,void ** first_chunk)202 static void **mem_ptr_to_next_field(void *chunk, void **first_chunk)
203 {
204 return (NULL != chunk ? (void **)((char *)chunk + MEM_SIZE_FIELD + ZBX_PTR_SIZE) : first_chunk);
205 }
206
mem_link_chunk(zbx_mem_info_t * info,void * chunk)207 static void mem_link_chunk(zbx_mem_info_t *info, void *chunk)
208 {
209 int index;
210
211 index = mem_bucket_by_size(CHUNK_SIZE(chunk));
212
213 if (NULL != info->buckets[index])
214 mem_set_prev_chunk(info->buckets[index], chunk);
215
216 mem_set_prev_chunk(chunk, NULL);
217 mem_set_next_chunk(chunk, info->buckets[index]);
218
219 info->buckets[index] = chunk;
220 }
221
mem_unlink_chunk(zbx_mem_info_t * info,void * chunk)222 static void mem_unlink_chunk(zbx_mem_info_t *info, void *chunk)
223 {
224 int index;
225 void *prev_chunk, *next_chunk;
226 void **next_in_prev_chunk, **prev_in_next_chunk;
227
228 index = mem_bucket_by_size(CHUNK_SIZE(chunk));
229
230 prev_chunk = mem_get_prev_chunk(chunk);
231 next_chunk = mem_get_next_chunk(chunk);
232
233 next_in_prev_chunk = mem_ptr_to_next_field(prev_chunk, &info->buckets[index]);
234 prev_in_next_chunk = mem_ptr_to_prev_field(next_chunk);
235
236 *next_in_prev_chunk = next_chunk;
237 if (NULL != prev_in_next_chunk)
238 *prev_in_next_chunk = prev_chunk;
239 }
240
241 /* private memory functions */
242
__mem_malloc(zbx_mem_info_t * info,zbx_uint64_t size)243 static void *__mem_malloc(zbx_mem_info_t *info, zbx_uint64_t size)
244 {
245 int index;
246 void *chunk;
247 zbx_uint64_t chunk_size;
248
249 size = mem_proper_alloc_size(size);
250
251 /* try to find an appropriate chunk in special buckets */
252
253 index = mem_bucket_by_size(size);
254
255 while (index < MEM_BUCKET_COUNT - 1 && NULL == info->buckets[index])
256 index++;
257
258 chunk = info->buckets[index];
259
260 if (index == MEM_BUCKET_COUNT - 1)
261 {
262 /* otherwise, find a chunk big enough according to first-fit strategy */
263
264 int counter = 0;
265 zbx_uint64_t skip_min = __UINT64_C(0xffffffffffffffff), skip_max = __UINT64_C(0);
266
267 while (NULL != chunk && CHUNK_SIZE(chunk) < size)
268 {
269 counter++;
270 skip_min = MIN(skip_min, CHUNK_SIZE(chunk));
271 skip_max = MAX(skip_max, CHUNK_SIZE(chunk));
272 chunk = mem_get_next_chunk(chunk);
273 }
274
275 /* don't log errors if malloc can return null in low memory situations */
276 if (0 == info->allow_oom)
277 {
278 if (NULL == chunk)
279 {
280 zabbix_log(LOG_LEVEL_CRIT, "__mem_malloc: skipped %d asked " ZBX_FS_UI64 " skip_min "
281 ZBX_FS_UI64 " skip_max " ZBX_FS_UI64,
282 counter, size, skip_min, skip_max);
283 }
284 else if (counter >= 100)
285 {
286 zabbix_log(LOG_LEVEL_DEBUG, "__mem_malloc: skipped %d asked " ZBX_FS_UI64 " skip_min "
287 ZBX_FS_UI64 " skip_max " ZBX_FS_UI64 " size " ZBX_FS_UI64, counter,
288 size, skip_min, skip_max, CHUNK_SIZE(chunk));
289 }
290 }
291 }
292
293 if (NULL == chunk)
294 return NULL;
295
296 chunk_size = CHUNK_SIZE(chunk);
297 mem_unlink_chunk(info, chunk);
298
299 /* either use the full chunk or split it */
300
301 if (chunk_size < size + 2 * MEM_SIZE_FIELD + MEM_MIN_ALLOC)
302 {
303 info->used_size += chunk_size;
304 info->free_size -= chunk_size;
305
306 mem_set_used_chunk_size(chunk, chunk_size);
307 }
308 else
309 {
310 void *new_chunk;
311 zbx_uint64_t new_chunk_size;
312
313 new_chunk = (void *)((char *)chunk + MEM_SIZE_FIELD + size + MEM_SIZE_FIELD);
314 new_chunk_size = chunk_size - size - 2 * MEM_SIZE_FIELD;
315 mem_set_chunk_size(new_chunk, new_chunk_size);
316 mem_link_chunk(info, new_chunk);
317
318 info->used_size += size;
319 info->free_size -= chunk_size;
320 info->free_size += new_chunk_size;
321
322 mem_set_used_chunk_size(chunk, size);
323 }
324
325 return chunk;
326 }
327
__mem_realloc(zbx_mem_info_t * info,void * old,zbx_uint64_t size)328 static void *__mem_realloc(zbx_mem_info_t *info, void *old, zbx_uint64_t size)
329 {
330 void *chunk, *new_chunk, *next_chunk;
331 zbx_uint64_t chunk_size, new_chunk_size;
332 int next_free;
333
334 size = mem_proper_alloc_size(size);
335
336 chunk = (void *)((char *)old - MEM_SIZE_FIELD);
337 chunk_size = CHUNK_SIZE(chunk);
338
339 next_chunk = (void *)((char *)chunk + MEM_SIZE_FIELD + chunk_size + MEM_SIZE_FIELD);
340 next_free = (next_chunk < info->hi_bound && FREE_CHUNK(next_chunk));
341
342 if (size <= chunk_size)
343 {
344 /* do not reallocate if not much is freed */
345 /* we are likely to want more memory again */
346 if (size > chunk_size / 4)
347 return chunk;
348
349 if (next_free)
350 {
351 /* merge with next chunk */
352
353 info->used_size -= chunk_size - size;
354 info->free_size += chunk_size - size;
355
356 new_chunk = (void *)((char *)chunk + MEM_SIZE_FIELD + size + MEM_SIZE_FIELD);
357 new_chunk_size = CHUNK_SIZE(next_chunk) + (chunk_size - size);
358
359 mem_unlink_chunk(info, next_chunk);
360
361 mem_set_chunk_size(new_chunk, new_chunk_size);
362 mem_link_chunk(info, new_chunk);
363
364 mem_set_used_chunk_size(chunk, size);
365 }
366 else
367 {
368 /* split the current one */
369
370 info->used_size -= chunk_size - size;
371 info->free_size += chunk_size - size - 2 * MEM_SIZE_FIELD;
372
373 new_chunk = (void *)((char *)chunk + MEM_SIZE_FIELD + size + MEM_SIZE_FIELD);
374 new_chunk_size = chunk_size - size - 2 * MEM_SIZE_FIELD;
375
376 mem_set_chunk_size(new_chunk, new_chunk_size);
377 mem_link_chunk(info, new_chunk);
378
379 mem_set_used_chunk_size(chunk, size);
380 }
381
382 return chunk;
383 }
384
385 if (next_free && chunk_size + 2 * MEM_SIZE_FIELD + CHUNK_SIZE(next_chunk) >= size)
386 {
387 info->used_size -= chunk_size;
388 info->free_size += chunk_size + 2 * MEM_SIZE_FIELD;
389
390 chunk_size += 2 * MEM_SIZE_FIELD + CHUNK_SIZE(next_chunk);
391
392 mem_unlink_chunk(info, next_chunk);
393
394 /* either use the full next_chunk or split it */
395
396 if (chunk_size < size + 2 * MEM_SIZE_FIELD + MEM_MIN_ALLOC)
397 {
398 info->used_size += chunk_size;
399 info->free_size -= chunk_size;
400
401 mem_set_used_chunk_size(chunk, chunk_size);
402 }
403 else
404 {
405 new_chunk = (void *)((char *)chunk + MEM_SIZE_FIELD + size + MEM_SIZE_FIELD);
406 new_chunk_size = chunk_size - size - 2 * MEM_SIZE_FIELD;
407 mem_set_chunk_size(new_chunk, new_chunk_size);
408 mem_link_chunk(info, new_chunk);
409
410 info->used_size += size;
411 info->free_size -= chunk_size;
412 info->free_size += new_chunk_size;
413
414 mem_set_used_chunk_size(chunk, size);
415 }
416
417 return chunk;
418 }
419 else if (NULL != (new_chunk = __mem_malloc(info, size)))
420 {
421 memcpy((char *)new_chunk + MEM_SIZE_FIELD, (char *)chunk + MEM_SIZE_FIELD, chunk_size);
422
423 __mem_free(info, old);
424
425 return new_chunk;
426 }
427 else
428 {
429 void *tmp = NULL;
430
431 /* check if there would be enough space if the current chunk */
432 /* would be freed before allocating a new one */
433 new_chunk_size = chunk_size;
434
435 if (0 != next_free)
436 new_chunk_size += CHUNK_SIZE(next_chunk) + 2 * MEM_SIZE_FIELD;
437
438 if (info->lo_bound < chunk && FREE_CHUNK((char *)chunk - MEM_SIZE_FIELD))
439 new_chunk_size += CHUNK_SIZE((char *)chunk - MEM_SIZE_FIELD) + 2 * MEM_SIZE_FIELD;
440
441 if (size > new_chunk_size)
442 return NULL;
443
444 tmp = zbx_malloc(tmp, chunk_size);
445
446 memcpy(tmp, (char *)chunk + MEM_SIZE_FIELD, chunk_size);
447
448 __mem_free(info, old);
449
450 if (NULL == (new_chunk = __mem_malloc(info, size)))
451 {
452 THIS_SHOULD_NEVER_HAPPEN;
453 exit(EXIT_FAILURE);
454 }
455
456 memcpy((char *)new_chunk + MEM_SIZE_FIELD, tmp, chunk_size);
457
458 zbx_free(tmp);
459
460 return new_chunk;
461 }
462 }
463
__mem_free(zbx_mem_info_t * info,void * ptr)464 static void __mem_free(zbx_mem_info_t *info, void *ptr)
465 {
466 void *chunk;
467 void *prev_chunk, *next_chunk;
468 zbx_uint64_t chunk_size;
469 int prev_free, next_free;
470
471 chunk = (void *)((char *)ptr - MEM_SIZE_FIELD);
472 chunk_size = CHUNK_SIZE(chunk);
473
474 info->used_size -= chunk_size;
475 info->free_size += chunk_size;
476
477 /* see if we can merge with previous and next chunks */
478
479 next_chunk = (void *)((char *)chunk + MEM_SIZE_FIELD + chunk_size + MEM_SIZE_FIELD);
480
481 prev_free = (info->lo_bound < chunk && FREE_CHUNK((char *)chunk - MEM_SIZE_FIELD));
482 next_free = (next_chunk < info->hi_bound && FREE_CHUNK(next_chunk));
483
484 if (prev_free && next_free)
485 {
486 info->free_size += 4 * MEM_SIZE_FIELD;
487
488 prev_chunk = (char *)chunk - MEM_SIZE_FIELD - CHUNK_SIZE((char *)chunk - MEM_SIZE_FIELD) -
489 MEM_SIZE_FIELD;
490
491 chunk_size += 4 * MEM_SIZE_FIELD + CHUNK_SIZE(prev_chunk) + CHUNK_SIZE(next_chunk);
492
493 mem_unlink_chunk(info, prev_chunk);
494 mem_unlink_chunk(info, next_chunk);
495
496 chunk = prev_chunk;
497 mem_set_chunk_size(chunk, chunk_size);
498 mem_link_chunk(info, chunk);
499 }
500 else if (prev_free)
501 {
502 info->free_size += 2 * MEM_SIZE_FIELD;
503
504 prev_chunk = (void *)((char *)chunk - MEM_SIZE_FIELD - CHUNK_SIZE((char *)chunk - MEM_SIZE_FIELD) -
505 MEM_SIZE_FIELD);
506
507 chunk_size += 2 * MEM_SIZE_FIELD + CHUNK_SIZE(prev_chunk);
508
509 mem_unlink_chunk(info, prev_chunk);
510
511 chunk = prev_chunk;
512 mem_set_chunk_size(chunk, chunk_size);
513 mem_link_chunk(info, chunk);
514 }
515 else if (next_free)
516 {
517 info->free_size += 2 * MEM_SIZE_FIELD;
518
519 chunk_size += 2 * MEM_SIZE_FIELD + CHUNK_SIZE(next_chunk);
520
521 mem_unlink_chunk(info, next_chunk);
522
523 mem_set_chunk_size(chunk, chunk_size);
524 mem_link_chunk(info, chunk);
525 }
526 else
527 {
528 mem_set_chunk_size(chunk, chunk_size);
529 mem_link_chunk(info, chunk);
530 }
531 }
532
533 /* public memory interface */
534
zbx_mem_create(zbx_mem_info_t ** info,zbx_uint64_t size,const char * descr,const char * param,int allow_oom,char ** error)535 int zbx_mem_create(zbx_mem_info_t **info, zbx_uint64_t size, const char *descr, const char *param, int allow_oom,
536 char **error)
537 {
538 int shm_id, index, ret = FAIL;
539 void *base;
540
541 descr = ZBX_NULL2STR(descr);
542 param = ZBX_NULL2STR(param);
543
544 zabbix_log(LOG_LEVEL_DEBUG, "In %s() param:'%s' size:" ZBX_FS_SIZE_T, __func__, param, (zbx_fs_size_t)size);
545
546 /* allocate shared memory */
547
548 if (4 != ZBX_PTR_SIZE && 8 != ZBX_PTR_SIZE)
549 {
550 *error = zbx_dsprintf(*error, "failed assumption about pointer size (" ZBX_FS_SIZE_T " not in {4, 8})",
551 (zbx_fs_size_t)ZBX_PTR_SIZE);
552 goto out;
553 }
554
555 if (!(MEM_MIN_SIZE <= size && size <= MEM_MAX_SIZE))
556 {
557 *error = zbx_dsprintf(*error, "requested size " ZBX_FS_SIZE_T " not within bounds [" ZBX_FS_UI64
558 " <= size <= " ZBX_FS_UI64 "]", (zbx_fs_size_t)size, MEM_MIN_SIZE, MEM_MAX_SIZE);
559 goto out;
560 }
561
562 if (-1 == (shm_id = shmget(IPC_PRIVATE, size, 0600)))
563 {
564 *error = zbx_dsprintf(*error, "cannot get private shared memory of size " ZBX_FS_SIZE_T " for %s: %s",
565 (zbx_fs_size_t)size, descr, zbx_strerror(errno));
566 goto out;
567 }
568
569 if ((void *)(-1) == (base = shmat(shm_id, NULL, 0)))
570 {
571 *error = zbx_dsprintf(*error, "cannot attach shared memory for %s: %s", descr, zbx_strerror(errno));
572 goto out;
573 }
574
575 if (-1 == shmctl(shm_id, IPC_RMID, NULL))
576 zbx_error("cannot mark shared memory %d for destruction: %s", shm_id, zbx_strerror(errno));
577
578 ret = SUCCEED;
579
580 /* allocate zbx_mem_info_t structure, its buckets, and description inside shared memory */
581
582 *info = (zbx_mem_info_t *)ALIGN8(base);
583 (*info)->shm_id = shm_id;
584 (*info)->orig_size = size;
585 size -= (char *)(*info + 1) - (char *)base;
586
587 base = (void *)(*info + 1);
588
589 (*info)->buckets = (void **)ALIGNPTR(base);
590 memset((*info)->buckets, 0, MEM_BUCKET_COUNT * ZBX_PTR_SIZE);
591 size -= (char *)((*info)->buckets + MEM_BUCKET_COUNT) - (char *)base;
592 base = (void *)((*info)->buckets + MEM_BUCKET_COUNT);
593
594 zbx_strlcpy((char *)base, descr, size);
595 (*info)->mem_descr = (char *)base;
596 size -= strlen(descr) + 1;
597 base = (void *)((char *)base + strlen(descr) + 1);
598
599 zbx_strlcpy((char *)base, param, size);
600 (*info)->mem_param = (char *)base;
601 size -= strlen(param) + 1;
602 base = (void *)((char *)base + strlen(param) + 1);
603
604 (*info)->allow_oom = allow_oom;
605
606 /* prepare shared memory for further allocation by creating one big chunk */
607 (*info)->lo_bound = ALIGN8(base);
608 (*info)->hi_bound = ALIGN8((char *)base + size - 8);
609
610 (*info)->total_size = (zbx_uint64_t)((char *)((*info)->hi_bound) - (char *)((*info)->lo_bound) -
611 2 * MEM_SIZE_FIELD);
612
613 index = mem_bucket_by_size((*info)->total_size);
614 (*info)->buckets[index] = (*info)->lo_bound;
615 mem_set_chunk_size((*info)->buckets[index], (*info)->total_size);
616 mem_set_prev_chunk((*info)->buckets[index], NULL);
617 mem_set_next_chunk((*info)->buckets[index], NULL);
618
619 (*info)->used_size = 0;
620 (*info)->free_size = (*info)->total_size;
621
622 zabbix_log(LOG_LEVEL_DEBUG, "valid user addresses: [%p, %p] total size: " ZBX_FS_SIZE_T,
623 (void *)((char *)(*info)->lo_bound + MEM_SIZE_FIELD),
624 (void *)((char *)(*info)->hi_bound - MEM_SIZE_FIELD),
625 (zbx_fs_size_t)(*info)->total_size);
626 out:
627 zabbix_log(LOG_LEVEL_DEBUG, "End of %s()", __func__);
628
629 return ret;
630 }
631
__zbx_mem_malloc(const char * file,int line,zbx_mem_info_t * info,const void * old,size_t size)632 void *__zbx_mem_malloc(const char *file, int line, zbx_mem_info_t *info, const void *old, size_t size)
633 {
634 void *chunk;
635
636 if (NULL != old)
637 {
638 zabbix_log(LOG_LEVEL_CRIT, "[file:%s,line:%d] %s(): allocating already allocated memory",
639 file, line, __func__);
640 exit(EXIT_FAILURE);
641 }
642
643 if (0 == size || size > MEM_MAX_SIZE)
644 {
645 zabbix_log(LOG_LEVEL_CRIT, "[file:%s,line:%d] %s(): asking for a bad number of bytes (" ZBX_FS_SIZE_T
646 ")", file, line, __func__, (zbx_fs_size_t)size);
647 exit(EXIT_FAILURE);
648 }
649
650 chunk = __mem_malloc(info, size);
651
652 if (NULL == chunk)
653 {
654 if (1 == info->allow_oom)
655 return NULL;
656
657 zabbix_log(LOG_LEVEL_CRIT, "[file:%s,line:%d] %s(): out of memory (requested " ZBX_FS_SIZE_T " bytes)",
658 file, line, __func__, (zbx_fs_size_t)size);
659 zabbix_log(LOG_LEVEL_CRIT, "[file:%s,line:%d] %s(): please increase %s configuration parameter",
660 file, line, __func__, info->mem_param);
661 zbx_mem_dump_stats(LOG_LEVEL_CRIT, info);
662 zbx_backtrace();
663 exit(EXIT_FAILURE);
664 }
665
666 return (void *)((char *)chunk + MEM_SIZE_FIELD);
667 }
668
__zbx_mem_realloc(const char * file,int line,zbx_mem_info_t * info,void * old,size_t size)669 void *__zbx_mem_realloc(const char *file, int line, zbx_mem_info_t *info, void *old, size_t size)
670 {
671 void *chunk;
672
673 if (0 == size || size > MEM_MAX_SIZE)
674 {
675 zabbix_log(LOG_LEVEL_CRIT, "[file:%s,line:%d] %s(): asking for a bad number of bytes (" ZBX_FS_SIZE_T
676 ")", file, line, __func__, (zbx_fs_size_t)size);
677 exit(EXIT_FAILURE);
678 }
679
680 if (NULL == old)
681 chunk = __mem_malloc(info, size);
682 else
683 chunk = __mem_realloc(info, old, size);
684
685 if (NULL == chunk)
686 {
687 if (1 == info->allow_oom)
688 return NULL;
689
690 zabbix_log(LOG_LEVEL_CRIT, "[file:%s,line:%d] %s(): out of memory (requested " ZBX_FS_SIZE_T " bytes)",
691 file, line, __func__, (zbx_fs_size_t)size);
692 zabbix_log(LOG_LEVEL_CRIT, "[file:%s,line:%d] %s(): please increase %s configuration parameter",
693 file, line, __func__, info->mem_param);
694 zbx_mem_dump_stats(LOG_LEVEL_CRIT, info);
695 zbx_backtrace();
696 exit(EXIT_FAILURE);
697 }
698
699 return (void *)((char *)chunk + MEM_SIZE_FIELD);
700 }
701
__zbx_mem_free(const char * file,int line,zbx_mem_info_t * info,void * ptr)702 void __zbx_mem_free(const char *file, int line, zbx_mem_info_t *info, void *ptr)
703 {
704 if (NULL == ptr)
705 {
706 zabbix_log(LOG_LEVEL_CRIT, "[file:%s,line:%d] %s(): freeing a NULL pointer", file, line, __func__);
707 exit(EXIT_FAILURE);
708 }
709
710 __mem_free(info, ptr);
711 }
712
zbx_mem_clear(zbx_mem_info_t * info)713 void zbx_mem_clear(zbx_mem_info_t *info)
714 {
715 int index;
716
717 zabbix_log(LOG_LEVEL_DEBUG, "In %s()", __func__);
718
719 memset(info->buckets, 0, MEM_BUCKET_COUNT * ZBX_PTR_SIZE);
720 index = mem_bucket_by_size(info->total_size);
721 info->buckets[index] = info->lo_bound;
722 mem_set_chunk_size(info->buckets[index], info->total_size);
723 mem_set_prev_chunk(info->buckets[index], NULL);
724 mem_set_next_chunk(info->buckets[index], NULL);
725 info->used_size = 0;
726 info->free_size = info->total_size;
727
728 zabbix_log(LOG_LEVEL_DEBUG, "End of %s()", __func__);
729 }
730
zbx_mem_get_stats(const zbx_mem_info_t * info,zbx_mem_stats_t * stats)731 void zbx_mem_get_stats(const zbx_mem_info_t *info, zbx_mem_stats_t *stats)
732 {
733 void *chunk;
734 int i;
735 zbx_uint64_t counter;
736
737 stats->free_chunks = 0;
738 stats->max_chunk_size = __UINT64_C(0);
739 stats->min_chunk_size = __UINT64_C(0xffffffffffffffff);
740
741 for (i = 0; i < MEM_BUCKET_COUNT; i++)
742 {
743 counter = 0;
744 chunk = info->buckets[i];
745
746 while (NULL != chunk)
747 {
748 counter++;
749 stats->min_chunk_size = MIN(stats->min_chunk_size, CHUNK_SIZE(chunk));
750 stats->max_chunk_size = MAX(stats->max_chunk_size, CHUNK_SIZE(chunk));
751 chunk = mem_get_next_chunk(chunk);
752 }
753
754 stats->free_chunks += counter;
755 stats->chunks_num[i] = counter;
756 }
757
758 stats->overhead = info->total_size - info->used_size - info->free_size;
759 stats->used_chunks = stats->overhead / (2 * MEM_SIZE_FIELD) + 1 - stats->free_chunks;
760 stats->free_size = info->free_size;
761 stats->used_size = info->used_size;
762 }
763
zbx_mem_dump_stats(int level,zbx_mem_info_t * info)764 void zbx_mem_dump_stats(int level, zbx_mem_info_t *info)
765 {
766 zbx_mem_stats_t stats;
767 int i;
768
769 zbx_mem_get_stats(info, &stats);
770
771 zabbix_log(level, "=== memory statistics for %s ===", info->mem_descr);
772
773 for (i = 0; i < MEM_BUCKET_COUNT; i++)
774 {
775 if (0 == stats.chunks_num[i])
776 continue;
777
778 zabbix_log(level, "free chunks of size %2s %3d bytes: %8u", i == MEM_BUCKET_COUNT - 1 ? ">=" : "",
779 MEM_MIN_BUCKET_SIZE + 8 * i, stats.chunks_num[i]);
780 }
781
782 zabbix_log(level, "min chunk size: %10llu bytes", (unsigned long long)stats.min_chunk_size);
783 zabbix_log(level, "max chunk size: %10llu bytes", (unsigned long long)stats.max_chunk_size);
784
785 zabbix_log(level, "memory of total size %llu bytes fragmented into %llu chunks",
786 (unsigned long long)stats.free_size + stats.used_size,
787 (unsigned long long)stats.free_chunks + stats.used_chunks);
788 zabbix_log(level, "of those, %10llu bytes are in %8llu free chunks",
789 (unsigned long long)stats.free_size, (unsigned long long)stats.free_chunks);
790 zabbix_log(level, "of those, %10llu bytes are in %8llu used chunks",
791 (unsigned long long)stats.used_size, (unsigned long long)stats.used_chunks);
792 zabbix_log(level, "of those, %10llu bytes are used by allocation overhead",
793 (unsigned long long)stats.overhead);
794
795 zabbix_log(level, "================================");
796 }
797
zbx_mem_required_size(int chunks_num,const char * descr,const char * param)798 size_t zbx_mem_required_size(int chunks_num, const char *descr, const char *param)
799 {
800 size_t size = 0;
801
802 zabbix_log(LOG_LEVEL_DEBUG, "In %s() size:" ZBX_FS_SIZE_T " chunks_num:%d descr:'%s' param:'%s'",
803 __func__, (zbx_fs_size_t)size, chunks_num, descr, param);
804
805 /* shared memory of what size should we allocate so that there is a guarantee */
806 /* that we will be able to get ourselves 'chunks_num' pieces of memory with a */
807 /* total size of 'size', given that we also have to store 'descr' and 'param'? */
808
809 size += 7; /* ensure we allocate enough to 8-align zbx_mem_info_t */
810 size += sizeof(zbx_mem_info_t);
811 size += ZBX_PTR_SIZE - 1; /* ensure we allocate enough to align bucket pointers */
812 size += ZBX_PTR_SIZE * MEM_BUCKET_COUNT;
813 size += strlen(descr) + 1;
814 size += strlen(param) + 1;
815 size += (MEM_SIZE_FIELD - 1) + 8; /* ensure we allocate enough to align the first chunk */
816 size += (MEM_SIZE_FIELD - 1) + 8; /* ensure we allocate enough to align right size field */
817
818 size += (chunks_num - 1) * MEM_SIZE_FIELD * 2; /* each additional chunk requires 16 bytes of overhead */
819 size += chunks_num * (MEM_MIN_ALLOC - 1); /* each chunk has size of at least MEM_MIN_ALLOC bytes */
820
821 zabbix_log(LOG_LEVEL_DEBUG, "End of %s() size:" ZBX_FS_SIZE_T, __func__, (zbx_fs_size_t)size);
822
823 return size;
824 }
825
zbx_mem_required_chunk_size(zbx_uint64_t size)826 zbx_uint64_t zbx_mem_required_chunk_size(zbx_uint64_t size)
827 {
828 if (0 == size)
829 return 0;
830
831 return mem_proper_alloc_size(size) + MEM_SIZE_FIELD * 2;
832 }
833