1 /*
2 ** Zabbix
3 ** Copyright (C) 2001-2021 Zabbix SIA
4 **
5 ** This program is free software; you can redistribute it and/or modify
6 ** it under the terms of the GNU General Public License as published by
7 ** the Free Software Foundation; either version 2 of the License, or
8 ** (at your option) any later version.
9 **
10 ** This program is distributed in the hope that it will be useful,
11 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
12 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 ** GNU General Public License for more details.
14 **
15 ** You should have received a copy of the GNU General Public License
16 ** along with this program; if not, write to the Free Software
17 ** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 **/
19
20 #include "common.h"
21 #include "zbxalgo.h"
22 #include "log.h"
23 #include "zbxtrends.h"
24 #include "mutexs.h"
25 #include "memalloc.h"
26 #include "trends.h"
27
28 extern zbx_uint64_t CONFIG_TREND_FUNC_CACHE_SIZE;
29
30 typedef struct
31 {
32 zbx_uint64_t itemid; /* the itemid */
33 int start; /* the period start time */
34 int end; /* the period end time */
35 zbx_trend_function_t function; /* the trends function */
36 zbx_trend_state_t state; /* the cached value state */
37 double value; /* the cached value */
38 zbx_uint32_t prev; /* index of the previous LRU list or unused entry */
39 zbx_uint32_t next; /* index of the next LRU list or unused entry */
40 zbx_uint32_t prev_value; /* index of the previous value list */
41 zbx_uint32_t next_value; /* index of the next value list */
42 }
43 zbx_tfc_data_t;
44
45 typedef struct
46 {
47 char header[ZBX_HASHSET_ENTRY_OFFSET];
48 zbx_tfc_data_t data;
49 }
50 zbx_tfc_slot_t;
51
52 typedef struct
53 {
54 zbx_hashset_t index;
55 zbx_tfc_slot_t *slots;
56 zbx_uint32_t slots_num;
57 zbx_uint32_t free_slot;
58 zbx_uint32_t free_head;
59 zbx_uint32_t lru_head;
60 zbx_uint32_t lru_tail;
61 zbx_uint64_t hits;
62 zbx_uint64_t misses;
63 zbx_uint64_t items_num;
64 }
65 zbx_tfc_t;
66
67 static zbx_tfc_t *cache = NULL;
68
69 /*
70 * The shared memory is split in three parts:
71 * 1) header, containing cache information
72 * 2) indexing hashset slots pointer array, allocated during cache initialization
73 * 3) slots array, allocated during cache initialization and used for hashset entry allocations
74 */
75 static zbx_mem_info_t *tfc_mem = NULL;
76
77 static zbx_mutex_t tfc_lock = ZBX_MUTEX_NULL;
78
ZBX_MEM_FUNC_IMPL(__tfc,tfc_mem)79 ZBX_MEM_FUNC_IMPL(__tfc, tfc_mem)
80
81 #define LOCK_CACHE zbx_mutex_lock(tfc_lock)
82 #define UNLOCK_CACHE zbx_mutex_unlock(tfc_lock)
83
84 static void tfc_free_slot(zbx_tfc_slot_t *slot)
85 {
86 zbx_uint32_t index = slot - cache->slots;
87
88 slot->data.next = cache->free_head;
89 slot->data.prev = UINT32_MAX;
90 cache->free_head = index;
91 }
92
tfc_alloc_slot(void)93 static zbx_tfc_slot_t *tfc_alloc_slot(void)
94 {
95 zbx_uint32_t index;
96
97 if (cache->free_slot != cache->slots_num)
98 tfc_free_slot(&cache->slots[cache->free_slot++]);
99
100 if (UINT32_MAX == cache->free_head)
101 {
102 THIS_SHOULD_NEVER_HAPPEN;
103 exit(EXIT_FAILURE);
104 }
105
106 index = cache->free_head;
107 cache->free_head = cache->slots[index].data.next;
108
109 return &cache->slots[index];
110 }
111
tfc_data_slot_index(zbx_tfc_data_t * data)112 static zbx_uint32_t tfc_data_slot_index(zbx_tfc_data_t *data)
113 {
114 return (zbx_tfc_slot_t *)((char *)data - ZBX_HASHSET_ENTRY_OFFSET) - cache->slots;
115 }
116
tfc_hash_func(const void * v)117 static zbx_hash_t tfc_hash_func(const void *v)
118 {
119 const zbx_tfc_data_t *d = (const zbx_tfc_data_t *)v;
120 zbx_hash_t hash;
121
122 hash = ZBX_DEFAULT_UINT64_HASH_FUNC(&d->itemid);
123 hash = ZBX_DEFAULT_UINT64_HASH_ALGO(&d->start, sizeof(d->start), hash);
124 hash = ZBX_DEFAULT_UINT64_HASH_ALGO(&d->end, sizeof(d->end), hash);
125
126 return ZBX_DEFAULT_UINT64_HASH_ALGO(&d->function, sizeof(d->function), hash);
127 }
128
tfc_compare_func(const void * v1,const void * v2)129 static int tfc_compare_func(const void *v1, const void *v2)
130 {
131 const zbx_tfc_data_t *d1 = (const zbx_tfc_data_t *)v1;
132 const zbx_tfc_data_t *d2 = (const zbx_tfc_data_t *)v2;
133
134 ZBX_RETURN_IF_NOT_EQUAL(d1->itemid, d2->itemid);
135 ZBX_RETURN_IF_NOT_EQUAL(d1->start, d2->start);
136 ZBX_RETURN_IF_NOT_EQUAL(d1->end, d2->end);
137
138 return d1->function - d2->function;
139 }
140
141 /******************************************************************************
142 * *
143 * Function: tfc_malloc_func *
144 * *
145 * Purpose: allocate memory for indexing hashset *
146 * *
147 * Comments: There are two kinds of allocations that should be done: *
148 * 1) initial allocation of hashset slots array *
149 * 2) allocations of hashset entries *
150 * The initial hashset size is chosen large enough to hold all *
151 * entries without reallocation. So there should be no other *
152 * allocations done. *
153 * *
154 ******************************************************************************/
tfc_malloc_func(void * old,size_t size)155 static void *tfc_malloc_func(void *old, size_t size)
156 {
157 static int alloc_num = 0;
158
159 if (sizeof(zbx_tfc_slot_t) == size)
160 return tfc_alloc_slot();
161
162 if (0 == alloc_num++)
163 return __tfc_mem_malloc_func(old, size);
164
165 return NULL;
166 }
167
tfc_realloc_func(void * old,size_t size)168 static void *tfc_realloc_func(void *old, size_t size)
169 {
170 ZBX_UNUSED(old);
171 ZBX_UNUSED(size);
172
173 return NULL;
174 }
175
tfc_free_func(void * ptr)176 static void tfc_free_func(void *ptr)
177 {
178 if (ptr >= (void *)cache->slots && ptr < (void *)(cache->slots + cache->slots_num))
179 return tfc_free_slot(ptr);
180
181 return __tfc_mem_free_func(ptr);
182 }
183
184 /******************************************************************************
185 * *
186 * Function: tfc_lru_append *
187 * *
188 * Purpose: append data to the tail of least recently used slot list *
189 * *
190 ******************************************************************************/
tfc_lru_append(zbx_tfc_data_t * data)191 static void tfc_lru_append(zbx_tfc_data_t *data)
192 {
193 zbx_uint32_t index;
194
195 index = tfc_data_slot_index(data);
196
197 data->prev = cache->lru_tail;
198 data->next = UINT32_MAX;
199
200 if (UINT32_MAX != data->prev)
201 cache->slots[data->prev].data.next = index;
202 else
203 cache->lru_head = index;
204
205 cache->lru_tail = index;
206 }
207
208 /******************************************************************************
209 * *
210 * Function: tfc_lru_remove *
211 * *
212 * Purpose: remove data from least recently used slot list *
213 * *
214 ******************************************************************************/
tfc_lru_remove(zbx_tfc_data_t * data)215 static void tfc_lru_remove(zbx_tfc_data_t *data)
216 {
217 if (UINT32_MAX != data->prev)
218 cache->slots[data->prev].data.next = data->next;
219 else
220 cache->lru_head = data->next;
221
222 if (UINT32_MAX != data->next)
223 cache->slots[data->next].data.prev = data->prev;
224 else
225 cache->lru_tail = data->prev;
226 }
227
228 /******************************************************************************
229 * *
230 * Function: tfc_value_append *
231 * *
232 * Purpose: append data to the tail of same item value list *
233 * *
234 ******************************************************************************/
tfc_value_append(zbx_tfc_data_t * root,zbx_tfc_data_t * data)235 static void tfc_value_append(zbx_tfc_data_t *root, zbx_tfc_data_t *data)
236 {
237 zbx_uint32_t index, root_index;
238
239 if (root->prev_value == (index = tfc_data_slot_index(data)))
240 return;
241
242 root_index = tfc_data_slot_index(root);
243
244 data->next_value = root_index;
245 data->prev_value = root->prev_value;
246
247 root->prev_value = index;
248 cache->slots[data->prev_value].data.next_value = index;
249 }
250
251 /******************************************************************************
252 * *
253 * Function: tfc_value_remove *
254 * *
255 * Purpose: remove data from same item value list *
256 * *
257 ******************************************************************************/
tfc_value_remove(zbx_tfc_data_t * data)258 static void tfc_value_remove(zbx_tfc_data_t *data)
259 {
260 cache->slots[data->prev_value].data.next_value = data->next_value;
261 cache->slots[data->next_value].data.prev_value = data->prev_value;
262 }
263
264 /******************************************************************************
265 * *
266 * Function: tfc_free_data *
267 * *
268 * Purpose: frees slot used to store trends function data *
269 * *
270 ******************************************************************************/
tfc_free_data(zbx_tfc_data_t * data)271 static void tfc_free_data(zbx_tfc_data_t *data)
272 {
273 tfc_lru_remove(data);
274 tfc_value_remove(data);
275
276 if (data->prev_value == data->next_value)
277 {
278 zbx_hashset_remove_direct(&cache->index, &cache->slots[data->prev_value].data);
279 cache->items_num--;
280 }
281
282 zbx_hashset_remove_direct(&cache->index, data);
283 }
284
285 /******************************************************************************
286 * *
287 * Function: tfc_reserve_slot *
288 * *
289 * Purpose: ensure there is a free slot available *
290 * *
291 ******************************************************************************/
tfc_reserve_slot(void)292 static void tfc_reserve_slot(void)
293 {
294 if (UINT32_MAX == cache->free_head && cache->slots_num == cache->free_slot)
295 {
296 if (UINT32_MAX == cache->lru_head)
297 {
298 THIS_SHOULD_NEVER_HAPPEN;
299 exit(1);
300 }
301
302 tfc_free_data(&cache->slots[cache->lru_head].data);
303 }
304 }
305
306 /******************************************************************************
307 * *
308 * Function: tfc_index_add *
309 * *
310 * Purpose: indexes data by adding it to the index hashset *
311 * *
312 ******************************************************************************/
tfc_index_add(zbx_tfc_data_t * data_local)313 static zbx_tfc_data_t *tfc_index_add(zbx_tfc_data_t *data_local)
314 {
315 zbx_tfc_data_t *data;
316
317 if (NULL == (data = (zbx_tfc_data_t *)zbx_hashset_insert(&cache->index, data_local, sizeof(zbx_tfc_data_t))))
318 {
319 cache->slots_num = cache->index.num_data;
320 tfc_reserve_slot();
321
322 if (NULL == (data = (zbx_tfc_data_t *)zbx_hashset_insert(&cache->index, data_local,
323 sizeof(zbx_tfc_data_t))))
324 {
325 THIS_SHOULD_NEVER_HAPPEN;
326 exit(EXIT_FAILURE);
327 }
328 }
329
330 return data;
331 }
332
333 /******************************************************************************
334 * *
335 * Function: zbx_tfc_init *
336 * *
337 * Purpose: initialize trend function cache *
338 * *
339 * Parameters: error - [OUT] the error message *
340 * *
341 * Return value: SUCCEED - the cache was initialized successfully *
342 * FAIL - otherwise *
343 * *
344 ******************************************************************************/
zbx_tfc_init(char ** error)345 int zbx_tfc_init(char **error)
346 {
347 zbx_uint64_t size_reserved;
348 int ret = FAIL;
349
350 if (0 == CONFIG_TREND_FUNC_CACHE_SIZE)
351 {
352 zabbix_log(LOG_LEVEL_DEBUG, "%s(): trends function cache disabled", __func__);
353 return SUCCEED;
354 }
355
356 zabbix_log(LOG_LEVEL_DEBUG, "In %s()", __func__);
357
358 if (SUCCEED != zbx_mutex_create(&tfc_lock, ZBX_MUTEX_TREND_FUNC, error))
359 goto out;
360
361 size_reserved = zbx_mem_required_size(1, "trend function cache size", "TrendFunctionCacheSize");
362
363 if (SUCCEED != zbx_mem_create(&tfc_mem, CONFIG_TREND_FUNC_CACHE_SIZE, "trend function cache size",
364 "TrendFunctionCacheSize", 1, error))
365 {
366 goto out;
367 }
368
369 cache = (zbx_tfc_t *)__tfc_mem_realloc_func(NULL, sizeof(zbx_tfc_t));
370
371 /* (8 + 8) * 3 - overhead for 3 allocations */
372 CONFIG_TREND_FUNC_CACHE_SIZE -= size_reserved + sizeof(zbx_tfc_t) + (8 + 8) * 3;
373
374 /* 5/4 - reversing critical load factor which is accounted for when inserting new hashset entry */
375 /* but ignored when creating hashset with the specified size */
376 cache->slots_num = CONFIG_TREND_FUNC_CACHE_SIZE / (16 * 5 / 4 + sizeof(zbx_tfc_slot_t));
377
378 zabbix_log(LOG_LEVEL_DEBUG, "%s(): slots:%u", __func__, cache->slots_num);
379
380 zbx_hashset_create_ext(&cache->index, cache->slots_num, tfc_hash_func, tfc_compare_func,
381 NULL, tfc_malloc_func, tfc_realloc_func, tfc_free_func);
382
383 cache->lru_head = UINT32_MAX;
384 cache->lru_tail = UINT32_MAX;
385
386 cache->slots = (zbx_tfc_slot_t *)__tfc_mem_malloc_func(NULL, sizeof(zbx_tfc_slot_t) * cache->slots_num);
387 cache->free_head = UINT32_MAX;
388 cache->free_slot = 0;
389
390 cache->hits = 0;
391 cache->misses = 0;
392 cache->items_num = 0;
393
394 ret = SUCCEED;
395 out:
396 zabbix_log(LOG_LEVEL_DEBUG, "End of %s(): %s", __func__, ZBX_NULL2EMPTY_STR(*error));
397
398 return ret;
399 }
400
401 /******************************************************************************
402 * *
403 * Function: zbx_tfc_get_value *
404 * *
405 * Purpose: get value and state from trend function cache *
406 * *
407 * Parameters: itemid - [IN] the itemid *
408 * start - [IN] the period start time (including) *
409 * end - [IN] the period end time (including) *
410 * function - [IN] the trend function *
411 * value - [OUT] the cached value *
412 * state - [OUT] the cached state *
413 * *
414 * Return value: SUCCEED - the value/state was retrieved successfully *
415 * FAIL - no cached item value of the function over the range *
416 * *
417 ******************************************************************************/
zbx_tfc_get_value(zbx_uint64_t itemid,int start,int end,zbx_trend_function_t function,double * value,zbx_trend_state_t * state)418 int zbx_tfc_get_value(zbx_uint64_t itemid, int start, int end, zbx_trend_function_t function, double *value,
419 zbx_trend_state_t *state)
420 {
421 zbx_tfc_data_t *data, data_local;
422
423 if (NULL == cache)
424 return FAIL;
425
426 zabbix_log(LOG_LEVEL_DEBUG, "In %s() itemid:" ZBX_FS_UI64 " period:%d-%d", __func__, itemid, start, end);
427
428 data_local.itemid = itemid;
429 data_local.start = start;
430 data_local.end = end;
431 data_local.function = function;
432
433 LOCK_CACHE;
434
435 if (NULL != (data = (zbx_tfc_data_t *)zbx_hashset_search(&cache->index, &data_local)))
436 {
437 tfc_lru_remove(data);
438 tfc_lru_append(data);
439
440 *value = data->value;
441 *state = data->state;
442
443 cache->hits++;
444 }
445 else
446 cache->misses++;
447
448 UNLOCK_CACHE;
449
450 zabbix_log(LOG_LEVEL_DEBUG, "End of %s() data:%p", __func__, data);
451
452 return NULL != data ? SUCCEED : FAIL;
453 }
454
455 /******************************************************************************
456 * *
457 * Function: zbx_tfc_put_value *
458 * *
459 * Purpose: put value and state from trend function cache *
460 * *
461 * Parameters: itemid - [IN] the itemid *
462 * start - [IN] the period start time (including) *
463 * end - [IN] the period end time (including) *
464 * function - [IN] the trend function *
465 * value - [IN] the value to cache *
466 * state - [IN] the state to cache *
467 * *
468 ******************************************************************************/
zbx_tfc_put_value(zbx_uint64_t itemid,int start,int end,zbx_trend_function_t function,double value,zbx_trend_state_t state)469 void zbx_tfc_put_value(zbx_uint64_t itemid, int start, int end, zbx_trend_function_t function, double value,
470 zbx_trend_state_t state)
471 {
472 zbx_tfc_data_t *data, data_local, *root;
473
474 if (NULL == cache)
475 return;
476
477 data_local.itemid = itemid;
478 data_local.start = 0;
479 data_local.end = 0;
480 data_local.function = ZBX_TREND_FUNCTION_UNKNOWN;
481
482 LOCK_CACHE;
483
484 tfc_reserve_slot();
485
486 if (NULL == (root = (zbx_tfc_data_t *)zbx_hashset_search(&cache->index, &data_local)))
487 {
488 root = tfc_index_add(&data_local);
489 root->prev_value = tfc_data_slot_index(root);
490 root->next_value = root->prev_value;
491 cache->items_num++;
492 tfc_reserve_slot();
493 }
494
495 data_local.start = start;
496 data_local.end = end;
497 data_local.function = function;
498 data_local.state = ZBX_TREND_STATE_UNKNOWN;
499 data = tfc_index_add(&data_local);
500
501 if (ZBX_TREND_STATE_UNKNOWN == data->state)
502 {
503 /* new slot was allocated, link it */
504 tfc_lru_append(data);
505 tfc_value_append(root, data);
506 }
507
508 data->value = value;
509 data->state = state;
510
511 UNLOCK_CACHE;
512 }
513
zbx_tfc_invalidate_trends(ZBX_DC_TREND * trends,int trends_num)514 void zbx_tfc_invalidate_trends(ZBX_DC_TREND *trends, int trends_num)
515 {
516 zbx_tfc_data_t *root, *data, data_local;
517 int i, next;
518
519 if (NULL == cache)
520 return;
521
522 zabbix_log(LOG_LEVEL_DEBUG, "In %s() trends_num:%d", __func__, trends_num);
523
524 data_local.start = 0;
525 data_local.end = 0;
526 data_local.function = ZBX_TREND_FUNCTION_UNKNOWN;
527
528 LOCK_CACHE;
529
530 for (i = 0; i < trends_num; i++)
531 {
532 data_local.itemid = trends[i].itemid;
533
534 if (NULL == (root = (zbx_tfc_data_t *)zbx_hashset_search(&cache->index, &data_local)))
535 continue;
536
537 for (data = &cache->slots[root->next_value].data; data != root; data = &cache->slots[next].data)
538 {
539 next = data->next_value;
540
541 if (trends[i].clock < data->start || trends[i].clock > data->end)
542 continue;
543
544 tfc_free_data(data);
545 }
546 }
547
548 UNLOCK_CACHE;
549
550 zabbix_log(LOG_LEVEL_DEBUG, "End of %s()", __func__);
551 }
552
zbx_tfc_get_stats(zbx_tfc_stats_t * stats,char ** error)553 int zbx_tfc_get_stats(zbx_tfc_stats_t *stats, char **error)
554 {
555 if (NULL == cache)
556 {
557 if (NULL != error)
558 *error = zbx_strdup(*error, "Trends function cache is disabled.");
559
560 return FAIL;
561 }
562
563 LOCK_CACHE;
564
565 stats->hits = cache->hits;
566 stats->misses = cache->misses;
567 stats->items_num = cache->items_num;
568 stats->requests_num = cache->index.num_data - cache->items_num;
569
570 UNLOCK_CACHE;
571
572 return SUCCEED;
573 }
574