1 /* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
2
3 This program is free software; you can redistribute it and/or modify
4 it under the terms of the GNU General Public License, version 2.0,
5 as published by the Free Software Foundation.
6
7 This program is also distributed with certain software (including
8 but not limited to OpenSSL) that is licensed under separate terms,
9 as designated in a particular file or component or in included license
10 documentation. The authors of MySQL hereby grant you an additional
11 permission to link the program and your derivative works with the
12 separately licensed software that they have included with MySQL.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License, version 2.0, for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
22
23 #include "xcom/xcom_cache.h"
24
25 #include <assert.h>
26 #include <rpc/rpc.h>
27 #include <stdlib.h>
28
29 #include "xcom/app_data.h"
30 #include "xcom/bitset.h"
31 #include "xcom/node_no.h"
32 #include "xcom/pax_msg.h"
33 #include "xcom/server_struct.h"
34 #include "xcom/simset.h"
35 #include "xcom/site_def.h"
36 #include "xcom/site_struct.h"
37 #include "xcom/synode_no.h"
38 #include "xcom/task.h"
39 #include "xcom/task_debug.h"
40 #include "xcom/xcom_base.h"
41 #include "xcom/xcom_cfg.h"
42 #include "xcom/xcom_common.h"
43 #include "xcom/xcom_detector.h"
44 #include "xcom/xcom_profile.h"
45 #include "xcom/xcom_transport.h"
46 #include "xcom/xcom_vp_str.h"
47 #include "xdr_gen/xcom_vp.h"
48
49 #define DBG_CACHE_SIZE 0
50
51 /* Protect at least MIN_CACHED * (number of nodes) pax_machine objects from
52 * deallocation by shrink_cache */
53 #define MIN_CACHED 10
54
55 /* Paxos machine cache */
56
57 struct lru_machine {
58 linkage lru_link;
59 pax_machine pax;
60 };
61
62 static synode_no last_removed_cache;
63
cache_get_last_removed()64 synode_no cache_get_last_removed() { return last_removed_cache; }
65
was_removed_from_cache(synode_no x)66 int was_removed_from_cache(synode_no x) {
67 ADD_DBG(D_CACHE, add_event(EVENT_DUMP_PAD, string_arg("x "));
68 add_synode_event(x);
69 add_event(EVENT_DUMP_PAD, string_arg("last_removed_cache "));
70 add_synode_event(last_removed_cache););
71 /*
72 What to do with requests from nodes that have a different group ID?
73 Should we just ignore them, as we do with the current code,
74 or should we do something about it?
75 */
76 return last_removed_cache.group_id == x.group_id &&
77 !synode_gt(x, last_removed_cache);
78 }
79
80 static size_t length_increment = INCREMENT;
81 static size_t size_decrement = INCREMENT / 10;
82
83 #define BUCKETS length_increment
84
85 struct stack_machine {
86 linkage stack_link;
87 uint64_t start_msgno;
88 uint occupation;
89 linkage *pax_hash;
90 };
91
92 static linkage hash_stack = {0, &hash_stack,
93 &hash_stack}; /* Head of the hash stack */
94 static linkage protected_lru = {
95 0, &protected_lru, &protected_lru}; /* Head of LRU chain of cache hits */
96 static linkage probation_lru = {
97 0, &probation_lru, &probation_lru}; /* Head of LRU chain of cache misses */
98
hash_init(stack_machine * hash_bucket)99 static void hash_init(stack_machine *hash_bucket) {
100 size_t i;
101 hash_bucket->pax_hash = (linkage *)malloc(sizeof(linkage) * BUCKETS);
102 for (i = 0; i < BUCKETS; i++) {
103 link_init(&hash_bucket->pax_hash[i], TYPE_HASH("pax_machine"));
104 }
105 }
106
107 extern void hexdump(void *p, long length);
108
synode_hash(synode_no synode)109 static unsigned int synode_hash(synode_no synode) {
110 /* Need to hash three fields separately, since struct may contain padding with
111 undefined values */
112 return (unsigned int)(4711 * synode.node + 5 * synode.group_id +
113 synode.msgno) %
114 (unsigned int)BUCKETS;
115 }
116
117 static uint64_t highest_msgno = 0;
118 static uint64_t cache_length = 0;
119 static uint64_t occupation = 0;
120
121 static void do_increment_step();
122
hash_in(pax_machine * pm)123 static pax_machine *hash_in(pax_machine *pm) {
124 synode_no synode = pm->synode;
125 IFDBG(D_NONE, FN; PTREXP(pm); SYCEXP(synode););
126
127 if (highest_msgno < synode.msgno) highest_msgno = synode.msgno;
128
129 FWD_ITER(&hash_stack, stack_machine, {
130 if (link_iter->start_msgno < synode.msgno || link_iter->start_msgno == 0) {
131 link_into(&pm->hash_link, &link_iter->pax_hash[synode_hash(synode)]);
132 pm->stack_link = link_iter;
133 link_iter->occupation++;
134 occupation++;
135 if (occupation == cache_length) {
136 do_increment_step();
137 }
138 break;
139 }
140 })
141
142 return pm;
143 }
144
hash_out(pax_machine * p)145 static pax_machine *hash_out(pax_machine *p) {
146 IFDBG(D_NONE, FN; PTREXP(p); SYCEXP(p->synode););
147 if (!link_empty(&p->hash_link)) {
148 occupation--;
149 p->stack_link->occupation--;
150 }
151 return (pax_machine *)link_out(&p->hash_link);
152 }
153
hash_get(synode_no synode)154 pax_machine *hash_get(synode_no synode) {
155 /* static pax_machine *cached_machine = NULL; */
156 stack_machine *hash_table = NULL;
157
158 /* if(cached_machine && synode_eq(synode, cached_machine->synode)) */
159 /* return cached_machine; */
160
161 FWD_ITER(&hash_stack, stack_machine, {
162 if (link_iter->start_msgno < synode.msgno || link_iter->start_msgno == 0) {
163 hash_table = link_iter;
164 break;
165 }
166 })
167
168 if (hash_table != NULL) {
169 linkage *bucket = &hash_table->pax_hash[synode_hash(synode)];
170
171 FWD_ITER(bucket, pax_machine, {
172 if (synode_eq(link_iter->synode, synode)) {
173 /* cached_machine = link_iter; */
174 return link_iter;
175 }
176 });
177 }
178 return NULL;
179 }
180
was_machine_executed(pax_machine * p)181 static int was_machine_executed(pax_machine *p) {
182 int const not_yet_functional = synode_eq(null_synode, get_delivered_msg());
183 int const already_executed = synode_lt(p->synode, get_delivered_msg());
184 return not_yet_functional || already_executed;
185 }
186
187 /*
188 Get a machine for (re)use.
189 The machines are statically allocated, and organized in two lists.
190 probation_lru is the free list.
191 protected_lru tracks the machines that are currently in the cache in
192 lest recently used order.
193 */
194
lru_get(bool_t force)195 static lru_machine *lru_get(bool_t force) {
196 lru_machine *retval = NULL;
197 lru_machine *force_retval = NULL;
198 if (!link_empty(&probation_lru)) {
199 retval = (lru_machine *)link_first(&probation_lru);
200 } else {
201 /* Find the first non-busy instance in the LRU */
202 FWD_ITER(
203 &protected_lru, lru_machine, if (!is_busy_machine(&link_iter->pax)) {
204 if (was_machine_executed(&link_iter->pax)) {
205 retval = link_iter;
206 break;
207 } else if (force && !force_retval) {
208 force_retval = link_iter;
209 }
210 })
211
212 if (!retval && force) retval = force_retval;
213
214 /* Since this machine is in the cache, we need to update
215 last_removed_cache */
216 if (retval) last_removed_cache = retval->pax.synode;
217 }
218 return retval;
219 }
220
lru_touch_hit(pax_machine * p)221 static lru_machine *lru_touch_hit(pax_machine *p) {
222 lru_machine *lru = p->lru;
223 link_into(link_out(&lru->lru_link), &protected_lru);
224 return lru;
225 }
226
227 /* Resets cache structures */
reset_cache()228 static void reset_cache() {
229 link_init(&protected_lru, TYPE_HASH("lru_machine"));
230 link_init(&probation_lru, TYPE_HASH("lru_machine"));
231 link_init(&hash_stack, TYPE_HASH("stack_machine"));
232 init_cache_size(); /* After cache has been intialized, size is 0 */
233 last_removed_cache = null_synode;
234 highest_msgno = 0;
235 }
236
237 static void add_stack_machine(uint64_t start_msgno);
238 static void expand_lru();
239
240 /*
241 Initialize the message cache.
242 The cache_manager_task is initialized in xcom_base to avoid memory
243 leaks in tests.
244 */
init_cache()245 void init_cache() {
246 reset_cache();
247 /* Init LRU */
248 expand_lru();
249 /* Init first hash_table */
250 add_stack_machine(0);
251 }
252
deinit_pax_machine(pax_machine * p,lru_machine * l)253 static void deinit_pax_machine(pax_machine *p, lru_machine *l) {
254 init_pax_machine(&l->pax, NULL, null_synode);
255 if (p->proposer.prep_nodeset) {
256 free_bit_set(p->proposer.prep_nodeset);
257 p->proposer.prep_nodeset = NULL;
258 }
259 if (p->proposer.prop_nodeset) {
260 free_bit_set(p->proposer.prop_nodeset);
261 p->proposer.prop_nodeset = NULL;
262 }
263 }
264
free_lru_machine(lru_machine * link_iter)265 static void free_lru_machine(lru_machine *link_iter) {
266 link_out(&link_iter->lru_link);
267 deinit_pax_machine(&link_iter->pax, link_iter);
268 free(link_iter);
269 cache_length--;
270 }
271
deinit_cache()272 void deinit_cache() {
273 FWD_ITER(&probation_lru, lru_machine, { free_lru_machine(link_iter); })
274
275 FWD_ITER(&protected_lru, lru_machine, {
276 hash_out(&link_iter->pax);
277 free_lru_machine(link_iter);
278 })
279
280 FWD_ITER(&hash_stack, stack_machine, {
281 free(link_iter->pax_hash);
282 free(link_iter);
283 })
284
285 reset_cache();
286 psi_report_cache_shutdown();
287 }
288
289 /* static synode_no log_tail; */
290
get_cache_no_touch(synode_no synode,bool_t force)291 pax_machine *get_cache_no_touch(synode_no synode, bool_t force) {
292 pax_machine *retval = hash_get(synode);
293 /* IFDBG(D_NONE, FN; SYCEXP(synode); STREXP(task_name())); */
294 IFDBG(D_NONE, FN; SYCEXP(synode); PTREXP(retval));
295 if (!retval) {
296 lru_machine *l =
297 lru_get(force); /* Need to know when it is safe to re-use... */
298 if (!l) return NULL;
299 IFDBG(D_NONE, FN; PTREXP(l); COPY_AND_FREE_GOUT(dbg_pax_machine(&l->pax)););
300 /* assert(l->pax.synode > log_tail); */
301
302 retval = hash_out(&l->pax); /* Remove from hash table */
303 init_pax_machine(retval, l, synode); /* Initialize */
304 hash_in(retval); /* Insert in hash table again */
305 }
306 IFDBG(D_NONE, FN; SYCEXP(synode); PTREXP(retval));
307 return retval;
308 }
309
force_get_cache(synode_no synode)310 pax_machine *force_get_cache(synode_no synode) {
311 pax_machine *retval = get_cache_no_touch(synode, TRUE);
312 lru_touch_hit(retval); /* Insert in protected_lru */
313 IFDBG(D_NONE, FN; SYCEXP(synode); PTREXP(retval));
314 return retval;
315 }
316
get_cache(synode_no synode)317 pax_machine *get_cache(synode_no synode) {
318 pax_machine *retval = get_cache_no_touch(synode, FALSE);
319 if (retval) lru_touch_hit(retval); /* Insert in protected_lru */
320 IFDBG(D_NONE, FN; SYCEXP(synode); PTREXP(retval));
321 return retval;
322 }
323
can_deallocate(lru_machine * link_iter)324 static inline int can_deallocate(lru_machine *link_iter) {
325 synode_no delivered_msg;
326 site_def const *site = get_site_def();
327 site_def const *dealloc_site = find_site_def(link_iter->pax.synode);
328
329 /* If we have no site, or site was just installed, refuse deallocation */
330 if (site == 0) return 0;
331 /*
332 With the patch that was put in to ensure that nodes always see a
333 global view message when it joins, the node that joins may need
334 messages which are significantly behind the point where the node
335 joins (effectively starting with the latest config). So there is
336 a very real risk that a node which joined might find that those
337 messages had been removed, since all the other nodes had executed
338 past that point. This test effectively stops garbage collection
339 of old messages until the joining node has got a chance to tell
340 the others about its low water mark. If it has not done that
341 within DETECTOR_LIVE_TIMEOUT, it will be considered dead by the
342 other nodes anyway, and expelled.
343 */
344 if ((site->install_time + DETECTOR_LIVE_TIMEOUT) > task_now()) return 0;
345 if (dealloc_site == 0) /* Synode does not match any site, OK to deallocate */
346 return 1;
347 delivered_msg = get_min_delivered_msg(site);
348 if (synode_eq(delivered_msg,
349 null_synode)) /* Missing info from some node, not OK */
350 return 0;
351 return link_iter->pax.synode.group_id != delivered_msg.group_id ||
352 (link_iter->pax.synode.msgno + MIN_CACHED) < delivered_msg.msgno;
353 }
354
355 static uint64_t cache_size = 0;
356
357 /*
358 Loop through the LRU (protected_lru) and deallocate objects until the size
359 of the cache is below the limit. The freshly initialized objects are put
360 into the probation_lru, so we can always start scanning at the end of
361 protected_lru. lru_get will always look in probation_lru first.
362 */
shrink_cache()363 size_t shrink_cache() {
364 size_t shrunk = 0;
365 FWD_ITER(&protected_lru, lru_machine, {
366 if (above_cache_limit() && can_deallocate(link_iter)) {
367 last_removed_cache = link_iter->pax.synode;
368 hash_out(&link_iter->pax); /* Remove from hash table */
369 link_into(link_out(&link_iter->lru_link),
370 &probation_lru); /* Put in probation lru */
371 init_pax_machine(&link_iter->pax, link_iter, null_synode);
372 if (shrunk++ == size_decrement) {
373 break;
374 }
375 } else {
376 break;
377 }
378 });
379 return shrunk;
380 }
381
xcom_cache_var_init()382 void xcom_cache_var_init() {}
383
384 /* Paxos machine */
385
386 /* Initialize a Paxos instance */
init_pax_machine(pax_machine * p,lru_machine * lru,synode_no synode)387 pax_machine *init_pax_machine(pax_machine *p, lru_machine *lru,
388 synode_no synode) {
389 sub_cache_size(p);
390 link_init(&p->hash_link, TYPE_HASH("pax_machine"));
391 p->lru = lru;
392 p->stack_link = NULL;
393 p->synode = synode;
394 p->last_modified = 0.0;
395 link_init(&p->rv, TYPE_HASH("task_env"));
396 init_ballot(&p->proposer.bal, -1, 0);
397 init_ballot(&p->proposer.sent_prop, 0, 0);
398 init_ballot(&p->proposer.sent_learn, -1, 0);
399 if (!p->proposer.prep_nodeset)
400 p->proposer.prep_nodeset = new_bit_set(NSERVERS);
401 BIT_ZERO(p->proposer.prep_nodeset);
402 if (!p->proposer.prop_nodeset)
403 p->proposer.prop_nodeset = new_bit_set(NSERVERS);
404 BIT_ZERO(p->proposer.prop_nodeset);
405 replace_pax_msg(&p->proposer.msg, NULL);
406 init_ballot(&p->acceptor.promise, 0, 0);
407 replace_pax_msg(&p->acceptor.msg, NULL);
408 replace_pax_msg(&p->learner.msg, NULL);
409 p->lock = 0;
410 p->op = initial_op;
411 p->force_delivery = 0;
412 p->enforcer = 0;
413 return p;
414 }
415
lock_pax_machine(pax_machine * p)416 int lock_pax_machine(pax_machine *p) {
417 int old = p->lock;
418 if (!p->lock) p->lock = 1;
419 return old;
420 }
421
unlock_pax_machine(pax_machine * p)422 void unlock_pax_machine(pax_machine *p) { p->lock = 0; }
423
is_busy_machine(pax_machine * p)424 int is_busy_machine(pax_machine *p) { return p->lock; }
425
426 /* purecov: begin deadcode */
427 /* Debug nodesets of Paxos instance */
dbg_machine_nodeset(pax_machine * p,u_int nodes)428 char *dbg_machine_nodeset(pax_machine *p, u_int nodes) {
429 GET_NEW_GOUT;
430 STRLIT("proposer.prep_nodeset ");
431 COPY_AND_FREE_GOUT(dbg_bitset(p->proposer.prep_nodeset, nodes));
432 STRLIT("proposer.prop_nodeset ");
433 COPY_AND_FREE_GOUT(dbg_bitset(p->proposer.prop_nodeset, nodes));
434 RET_GOUT;
435 }
436
437 #if TASK_DBUG_ON
438 /* Debug a Paxos instance */
dbg_pax_machine(pax_machine * p)439 char *dbg_pax_machine(pax_machine *p) {
440 GET_NEW_GOUT;
441 if (!p) {
442 STRLIT("p == 0 ");
443 RET_GOUT;
444 }
445 PTREXP(p);
446 COPY_AND_FREE_GOUT(
447 dbg_machine_nodeset(p, get_maxnodes(find_site_def(p->synode))));
448 BALCEXP(p->proposer.bal);
449 BALCEXP(p->proposer.sent_prop);
450 BALCEXP(p->proposer.sent_learn);
451 BALCEXP(p->acceptor.promise);
452 STRLIT("proposer.msg ");
453 COPY_AND_FREE_GOUT(dbg_pax_msg(p->proposer.msg));
454 STRLIT("acceptor.msg ");
455 COPY_AND_FREE_GOUT(dbg_pax_msg(p->acceptor.msg));
456 STRLIT("learner.msg ");
457 COPY_AND_FREE_GOUT(dbg_pax_msg(p->learner.msg));
458 NDBG(p->last_modified, f);
459 NDBG(p->lock, d);
460 STREXP(pax_op_to_str(p->op));
461 RET_GOUT;
462 }
463 /* purecov: end */
464 #endif
465
466 /*
467 Return the size of a pax_msg. Counts only the pax_msg struct itself
468 and the size of the app_data.
469 */
get_app_msg_size(pax_msg const * p)470 static inline size_t get_app_msg_size(pax_msg const *p) {
471 if (!p)
472 return (size_t)0;
473 else
474 return sizeof(pax_msg) + app_data_list_size(p->a);
475 }
476
477 /*
478 Return the size of the messages referenced by a pax_machine.
479 The pax_machine itself is statically allocated, so we do
480 not count this when computing the cache size.
481 */
pax_machine_size(pax_machine const * p)482 size_t pax_machine_size(pax_machine const *p) {
483 size_t size = get_app_msg_size(p->proposer.msg);
484
485 if (p->acceptor.msg && p->proposer.msg != p->acceptor.msg)
486 size += get_app_msg_size(p->acceptor.msg);
487
488 if (p->learner.msg && p->acceptor.msg != p->learner.msg &&
489 p->proposer.msg != p->learner.msg)
490 size += get_app_msg_size(p->learner.msg);
491 return size;
492 }
493 /* }}} */
494
495 /* The cache itself is statically allocated, set size of dynamically allocted
496 * data to 0 */
init_cache_size()497 void init_cache_size() { cache_size = 0; }
498
499 /* Add to cache size */
500
add_cache_size(pax_machine * p)501 uint64_t add_cache_size(pax_machine *p) {
502 uint64_t x = pax_machine_size(p);
503 cache_size += x;
504 if (DBG_CACHE_SIZE && x) {
505 G_DEBUG("%f %s:%d cache_size %lu x %lu", seconds(), __FILE__, __LINE__,
506 (long unsigned int)cache_size, (long unsigned int)x);
507 }
508 #ifndef XCOM_STANDALONE
509 p->is_instrumented = psi_report_mem_alloc(x);
510 #endif
511 return cache_size;
512 }
513
514 /* Subtract from cache size */
sub_cache_size(pax_machine * p)515 uint64_t sub_cache_size(pax_machine *p) {
516 uint64_t x = pax_machine_size(p);
517 cache_size -= x;
518 if (DBG_CACHE_SIZE && x) {
519 G_DEBUG("%f %s:%d cache_size %lu x %lu", seconds(), __FILE__, __LINE__,
520 (long unsigned int)cache_size, (long unsigned int)x);
521 }
522 #ifndef XCOM_STANDALONE
523 psi_report_mem_free(x, p->is_instrumented);
524 p->is_instrumented = 0;
525 #endif
526 return cache_size;
527 }
528
529 /* See if cache is above limit */
above_cache_limit()530 int above_cache_limit() {
531 return the_app_xcom_cfg && cache_size > the_app_xcom_cfg->m_cache_limit;
532 }
533
534 /* If cfg object exits, set max cache size */
set_max_cache_size(uint64_t x)535 uint64_t set_max_cache_size(uint64_t x) {
536 uint64_t ret = 0;
537 if (the_app_xcom_cfg) {
538 G_DEBUG("Changing max cache size to %llu. Previous value was %llu.",
539 (unsigned long long)x,
540 (unsigned long long)the_app_xcom_cfg->m_cache_limit);
541 ret = the_app_xcom_cfg->m_cache_limit = x;
542 if (above_cache_limit()) shrink_cache();
543 }
544 return ret;
545 }
546
expand_lru()547 static void expand_lru() {
548 uint64_t i;
549 for (i = 0; i < BUCKETS; i++) {
550 lru_machine *l = (lru_machine *)calloc(1, sizeof(lru_machine));
551 link_init(&l->lru_link, TYPE_HASH("lru_machine"));
552 link_into(&l->lru_link, &probation_lru);
553 init_pax_machine(&l->pax, l, null_synode);
554 cache_length++;
555 }
556 }
557
add_stack_machine(uint64_t start_msgno)558 static void add_stack_machine(uint64_t start_msgno) {
559 stack_machine *hash_bucket = (stack_machine *)malloc(sizeof(stack_machine));
560 link_init(&hash_bucket->stack_link, TYPE_HASH("stack_machine"));
561 hash_bucket->occupation = 0;
562 hash_bucket->start_msgno = start_msgno;
563 hash_init(hash_bucket);
564 link_follow(&hash_bucket->stack_link, &hash_stack);
565 }
566
do_increment_step()567 static void do_increment_step() {
568 expand_lru();
569 add_stack_machine(highest_msgno);
570 }
571
do_decrement_step()572 static void do_decrement_step() {
573 uint count = 0;
574 FWD_ITER(&probation_lru, lru_machine, {
575 free_lru_machine(link_iter);
576 if (++count == BUCKETS) break;
577 })
578
579 free(((stack_machine *)link_last(&hash_stack))->pax_hash);
580 free(link_out(link_last(&hash_stack)));
581 ((stack_machine *)link_last(&hash_stack))->start_msgno = 0;
582 }
583
584 /* Use vars instead of defines for unit testing */
585 static uint64_t dec_threshold_length = DEC_THRESHOLD_LENGTH;
586 static float min_target_occupation = MIN_TARGET_OCCUPATION;
587 static float dec_threshold_size = DEC_THRESHOLD_SIZE;
588 static float min_length_threshold = MIN_LENGTH_THRESHOLD;
589
590 /* Shrink the cache if appropriate, else return error code */
check_decrease()591 uint16_t check_decrease() {
592 /* Do not decrease before 500k length */
593 if ((cache_length <= dec_threshold_length)) return CACHE_TOO_SMALL;
594 /* Oldest hash item is empty */
595 if (((stack_machine *)link_last(&hash_stack))->occupation != 0)
596 return CACHE_HASH_NOTEMPTY;
597 /* Low occupation */
598 if ((float)occupation >= (float)cache_length * min_target_occupation)
599 return CACHE_HIGH_OCCUPATION;
600 /* Resulting length high enough */
601 if (((float)cache_length - (float)BUCKETS) * min_length_threshold <=
602 (float)occupation)
603 return CACHE_RESULT_LOW;
604 /* Skip if cache is (likely) still increasing. */
605 if (((float)cache_size <=
606 (float)the_app_xcom_cfg->m_cache_limit * dec_threshold_size)) {
607 return CACHE_INCREASING;
608 }
609 do_decrement_step();
610 return CACHE_SHRINK_OK;
611 }
612
613 extern int xcom_shutdown;
do_cache_maintenance()614 void do_cache_maintenance() {
615 if (above_cache_limit()) {
616 shrink_cache();
617 } else {
618 check_decrease();
619 }
620 }
621
cache_manager_task(task_arg arg MY_ATTRIBUTE ((unused)))622 int cache_manager_task(task_arg arg MY_ATTRIBUTE((unused))) {
623 DECL_ENV
624 int dummy;
625 END_ENV;
626
627 TASK_BEGIN
628
629 while (!xcom_shutdown) {
630 do_cache_maintenance();
631 TASK_DELAY(0.1);
632 }
633 FINALLY
634 IFDBG(D_BUG, FN; STRLIT(" shutdown "));
635 TASK_END;
636 }
637
638 /* Unit testing */
639 /* purecov: begin deadcode */
get_xcom_cache_occupation()640 uint64_t get_xcom_cache_occupation() { return occupation; }
get_xcom_cache_length()641 uint64_t get_xcom_cache_length() { return cache_length; }
get_xcom_cache_size()642 uint64_t get_xcom_cache_size() { return cache_size; }
643
set_length_increment(size_t increment)644 void set_length_increment(size_t increment) { length_increment = increment; }
645
set_size_decrement(size_t decrement)646 void set_size_decrement(size_t decrement) { size_decrement = decrement; }
647
set_dec_threshold_length(uint64_t threshold)648 void set_dec_threshold_length(uint64_t threshold) {
649 dec_threshold_length = threshold;
650 }
651
set_min_target_occupation(float threshold)652 void set_min_target_occupation(float threshold) {
653 min_target_occupation = threshold;
654 }
655
set_dec_threshold_size(float t_hold)656 void set_dec_threshold_size(float t_hold) { dec_threshold_size = t_hold; }
657
set_min_length_threshold(float threshold)658 void set_min_length_threshold(float threshold) {
659 min_length_threshold = threshold;
660 }
661 /* purecov: end */
662