1 /** @file
2
3 A brief file description
4
5 @section license License
6
7 Licensed to the Apache Software Foundation (ASF) under one
8 or more contributor license agreements. See the NOTICE file
9 distributed with this work for additional information
10 regarding copyright ownership. The ASF licenses this file
11 to you under the Apache License, Version 2.0 (the
12 "License"); you may not use this file except in compliance
13 with the License. You may obtain a copy of the License at
14
15 http://www.apache.org/licenses/LICENSE-2.0
16
17 Unless required by applicable law or agreed to in writing, software
18 distributed under the License is distributed on an "AS IS" BASIS,
19 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
20 See the License for the specific language governing permissions and
21 limitations under the License.
22
23 */
24
25 #include "P_Cache.h"
26 #include "P_CacheTest.h"
27 #include <vector>
28 #include <cmath>
29 #include <cstdlib>
30
CacheTestSM(RegressionTest * t,const char * name)31 CacheTestSM::CacheTestSM(RegressionTest *t, const char *name) : RegressionSM(t), cache_test_name(name)
32 {
33 SET_HANDLER(&CacheTestSM::event_handler);
34 }
35
CacheTestSM(const CacheTestSM & ao)36 CacheTestSM::CacheTestSM(const CacheTestSM &ao) : RegressionSM(ao)
37 {
38 int o = static_cast<int>((reinterpret_cast<char *>(&start_memcpy_on_clone)) - (reinterpret_cast<char *>(this)));
39 int s = static_cast<int>((reinterpret_cast<char *>(&end_memcpy_on_clone)) - (reinterpret_cast<char *>(&start_memcpy_on_clone)));
40 memcpy((reinterpret_cast<char *>(this)) + o, ((char *)&ao) + o, s);
41 SET_HANDLER(&CacheTestSM::event_handler);
42 }
43
~CacheTestSM()44 CacheTestSM::~CacheTestSM()
45 {
46 ink_assert(!cache_action);
47 ink_assert(!cache_vc);
48 if (buffer_reader) {
49 buffer->dealloc_reader(buffer_reader);
50 }
51 if (buffer) {
52 free_MIOBuffer(buffer);
53 }
54 }
55
56 int
open_read_callout()57 CacheTestSM::open_read_callout()
58 {
59 cvio = cache_vc->do_io_read(this, nbytes, buffer);
60 return 1;
61 }
62
63 int
open_write_callout()64 CacheTestSM::open_write_callout()
65 {
66 cvio = cache_vc->do_io_write(this, nbytes, buffer_reader);
67 return 1;
68 }
69
70 int
event_handler(int event,void * data)71 CacheTestSM::event_handler(int event, void *data)
72 {
73 switch (event) {
74 case EVENT_INTERVAL:
75 case EVENT_IMMEDIATE:
76 cancel_timeout();
77 if (cache_action) {
78 cache_action->cancel();
79 cache_action = nullptr;
80 }
81 if (cache_vc) {
82 cache_vc->do_io_close();
83 cache_vc = nullptr;
84 }
85 cvio = nullptr;
86 make_request();
87 return EVENT_DONE;
88
89 case CACHE_EVENT_LOOKUP_FAILED:
90 case CACHE_EVENT_LOOKUP:
91 goto Lcancel_next;
92
93 case CACHE_EVENT_OPEN_READ:
94 initial_event = event;
95 cancel_timeout();
96 cache_action = nullptr;
97 cache_vc = static_cast<CacheVConnection *>(data);
98 buffer = new_empty_MIOBuffer(BUFFER_SIZE_INDEX_32K);
99 buffer_reader = buffer->alloc_reader();
100 if (open_read_callout() < 0) {
101 goto Lclose_error_next;
102 } else {
103 return EVENT_DONE;
104 }
105
106 case CACHE_EVENT_OPEN_READ_FAILED:
107 goto Lcancel_next;
108
109 case VC_EVENT_READ_READY:
110 if (!check_buffer()) {
111 goto Lclose_error_next;
112 }
113 buffer_reader->consume(buffer_reader->read_avail());
114 (static_cast<VIO *>(data))->reenable();
115 return EVENT_CONT;
116
117 case VC_EVENT_READ_COMPLETE:
118 if (!check_buffer()) {
119 goto Lclose_error_next;
120 }
121 goto Lclose_next;
122
123 case VC_EVENT_ERROR:
124 case VC_EVENT_EOS:
125 goto Lclose_error_next;
126
127 case CACHE_EVENT_OPEN_WRITE:
128 initial_event = event;
129 cancel_timeout();
130 cache_action = nullptr;
131 cache_vc = static_cast<CacheVConnection *>(data);
132 buffer = new_empty_MIOBuffer(BUFFER_SIZE_INDEX_32K);
133 buffer_reader = buffer->alloc_reader();
134 if (open_write_callout() < 0) {
135 goto Lclose_error_next;
136 } else {
137 return EVENT_DONE;
138 }
139
140 case CACHE_EVENT_OPEN_WRITE_FAILED:
141 goto Lcancel_next;
142
143 case VC_EVENT_WRITE_READY:
144 fill_buffer();
145 cvio->reenable();
146 return EVENT_CONT;
147
148 case VC_EVENT_WRITE_COMPLETE:
149 if (nbytes != cvio->ndone) {
150 goto Lclose_error_next;
151 }
152 goto Lclose_next;
153
154 case CACHE_EVENT_REMOVE:
155 case CACHE_EVENT_REMOVE_FAILED:
156 goto Lcancel_next;
157
158 case CACHE_EVENT_SCAN:
159 initial_event = event;
160 cache_vc = static_cast<CacheVConnection *>(data);
161 return EVENT_CONT;
162
163 case CACHE_EVENT_SCAN_OBJECT:
164 return CACHE_SCAN_RESULT_CONTINUE;
165
166 case CACHE_EVENT_SCAN_OPERATION_FAILED:
167 return CACHE_SCAN_RESULT_CONTINUE;
168
169 case CACHE_EVENT_SCAN_OPERATION_BLOCKED:
170 return CACHE_SCAN_RESULT_CONTINUE;
171
172 case CACHE_EVENT_SCAN_DONE:
173 return EVENT_CONT;
174
175 case CACHE_EVENT_SCAN_FAILED:
176 return EVENT_CONT;
177
178 case AIO_EVENT_DONE:
179 goto Lnext;
180
181 default:
182 ink_assert(!"case");
183 break;
184 }
185 return EVENT_DONE;
186
187 Lcancel_next:
188 cancel_timeout();
189 cache_action = nullptr;
190 goto Lnext;
191 Lclose_error_next:
192 cache_vc->do_io_close(1);
193 goto Lclose_next_internal;
194 Lclose_next:
195 cache_vc->do_io_close();
196 Lclose_next_internal:
197 cache_vc = nullptr;
198 if (buffer_reader) {
199 buffer->dealloc_reader(buffer_reader);
200 buffer_reader = nullptr;
201 }
202 if (buffer) {
203 free_MIOBuffer(buffer);
204 buffer = nullptr;
205 }
206 Lnext:
207 if (check_result(event) && repeat_count) {
208 repeat_count--;
209 timeout = eventProcessor.schedule_imm(this);
210 return EVENT_DONE;
211 } else {
212 return complete(event);
213 }
214 }
215
216 void
fill_buffer()217 CacheTestSM::fill_buffer()
218 {
219 int64_t avail = buffer->write_avail();
220 CacheKey k = key;
221 k.b[1] += content_salt;
222 int64_t sk = static_cast<int64_t>(sizeof(key));
223 while (avail > 0) {
224 int64_t l = avail;
225 if (l > sk) {
226 l = sk;
227 }
228
229 int64_t pos = cvio->ndone + buffer_reader->read_avail();
230 int64_t o = pos % sk;
231
232 if (l > sk - o) {
233 l = sk - o;
234 }
235 k.b[0] = pos / sk;
236 char *x = (reinterpret_cast<char *>(&k)) + o;
237 buffer->write(x, l);
238 buffer->fill(l);
239 avail -= l;
240 }
241 }
242
243 int
check_buffer()244 CacheTestSM::check_buffer()
245 {
246 int64_t avail = buffer_reader->read_avail();
247 CacheKey k = key;
248 k.b[1] += content_salt;
249 char b[sizeof(key)];
250 int64_t sk = static_cast<int64_t>(sizeof(key));
251 int64_t pos = cvio->ndone - buffer_reader->read_avail();
252 while (avail > 0) {
253 int64_t l = avail;
254 if (l > sk) {
255 l = sk;
256 }
257 int64_t o = pos % sk;
258 if (l > sk - o) {
259 l = sk - o;
260 }
261 k.b[0] = pos / sk;
262 char *x = (reinterpret_cast<char *>(&k)) + o;
263 buffer_reader->read(&b[0], l);
264 if (::memcmp(b, x, l)) {
265 return 0;
266 }
267 buffer_reader->consume(l);
268 pos += l;
269 avail -= l;
270 }
271 return 1;
272 }
273
274 int
check_result(int event)275 CacheTestSM::check_result(int event)
276 {
277 return initial_event == expect_initial_event && event == expect_event;
278 }
279
280 int
complete(int event)281 CacheTestSM::complete(int event)
282 {
283 if (!check_result(event)) {
284 done(REGRESSION_TEST_FAILED);
285 } else {
286 done(REGRESSION_TEST_PASSED);
287 }
288 delete this;
289 return EVENT_DONE;
290 }
291
EXCLUSIVE_REGRESSION_TEST(cache)292 EXCLUSIVE_REGRESSION_TEST(cache)(RegressionTest *t, int /* atype ATS_UNUSED */, int *pstatus)
293 {
294 if (cacheProcessor.IsCacheEnabled() != CACHE_INITIALIZED) {
295 rprintf(t, "cache not initialized");
296 *pstatus = REGRESSION_TEST_FAILED;
297 return;
298 }
299
300 EThread *thread = this_ethread();
301
302 CACHE_SM(t, write_test, { cacheProcessor.open_write(this, &key, CACHE_FRAG_TYPE_NONE, 100, CACHE_WRITE_OPT_SYNC); });
303 write_test.expect_initial_event = CACHE_EVENT_OPEN_WRITE;
304 write_test.expect_event = VC_EVENT_WRITE_COMPLETE;
305 write_test.nbytes = 100;
306 rand_CacheKey(&write_test.key, thread->mutex);
307
308 CACHE_SM(t, lookup_test, { cacheProcessor.lookup(this, &key); });
309 lookup_test.expect_event = CACHE_EVENT_LOOKUP;
310 lookup_test.key = write_test.key;
311
312 CACHE_SM(t, read_test, { cacheProcessor.open_read(this, &key); });
313 read_test.expect_initial_event = CACHE_EVENT_OPEN_READ;
314 read_test.expect_event = VC_EVENT_READ_COMPLETE;
315 read_test.nbytes = 100;
316 read_test.key = write_test.key;
317
318 CACHE_SM(t, remove_test, { cacheProcessor.remove(this, &key); });
319 remove_test.expect_event = CACHE_EVENT_REMOVE;
320 remove_test.key = write_test.key;
321
322 CACHE_SM(t, lookup_fail_test, { cacheProcessor.lookup(this, &key); });
323 lookup_fail_test.expect_event = CACHE_EVENT_LOOKUP_FAILED;
324 lookup_fail_test.key = write_test.key;
325
326 CACHE_SM(t, read_fail_test, { cacheProcessor.open_read(this, &key); });
327 read_fail_test.expect_event = CACHE_EVENT_OPEN_READ_FAILED;
328 read_fail_test.key = write_test.key;
329
330 CACHE_SM(t, remove_fail_test, { cacheProcessor.remove(this, &key); });
331 remove_fail_test.expect_event = CACHE_EVENT_REMOVE_FAILED;
332 rand_CacheKey(&remove_fail_test.key, thread->mutex);
333
334 CACHE_SM(
335 t, replace_write_test,
336 { cacheProcessor.open_write(this, &key, CACHE_FRAG_TYPE_NONE, 100, CACHE_WRITE_OPT_SYNC); } int open_write_callout() {
337 header.serial = 10;
338 cache_vc->set_header(&header, sizeof(header));
339 cvio = cache_vc->do_io_write(this, nbytes, buffer_reader);
340 return 1;
341 });
342 replace_write_test.expect_initial_event = CACHE_EVENT_OPEN_WRITE;
343 replace_write_test.expect_event = VC_EVENT_WRITE_COMPLETE;
344 replace_write_test.nbytes = 100;
345 rand_CacheKey(&replace_write_test.key, thread->mutex);
346
347 CACHE_SM(
348 t, replace_test,
349 { cacheProcessor.open_write(this, &key, CACHE_FRAG_TYPE_NONE, 100, CACHE_WRITE_OPT_OVERWRITE_SYNC); } int open_write_callout() {
350 CacheTestHeader *h = nullptr;
351 int hlen = 0;
352 if (cache_vc->get_header((void **)&h, &hlen) < 0)
353 return -1;
354 if (h->serial != 10)
355 return -1;
356 header.serial = 11;
357 cache_vc->set_header(&header, sizeof(header));
358 cvio = cache_vc->do_io_write(this, nbytes, buffer_reader);
359 return 1;
360 });
361 replace_test.expect_initial_event = CACHE_EVENT_OPEN_WRITE;
362 replace_test.expect_event = VC_EVENT_WRITE_COMPLETE;
363 replace_test.nbytes = 100;
364 replace_test.key = replace_write_test.key;
365 replace_test.content_salt = 1;
366
367 CACHE_SM(
368 t, replace_read_test, { cacheProcessor.open_read(this, &key); } int open_read_callout() {
369 CacheTestHeader *h = nullptr;
370 int hlen = 0;
371 if (cache_vc->get_header((void **)&h, &hlen) < 0)
372 return -1;
373 if (h->serial != 11)
374 return -1;
375 cvio = cache_vc->do_io_read(this, nbytes, buffer);
376 return 1;
377 });
378 replace_read_test.expect_initial_event = CACHE_EVENT_OPEN_READ;
379 replace_read_test.expect_event = VC_EVENT_READ_COMPLETE;
380 replace_read_test.nbytes = 100;
381 replace_read_test.key = replace_test.key;
382 replace_read_test.content_salt = 1;
383
384 CACHE_SM(t, large_write_test, { cacheProcessor.open_write(this, &key, CACHE_FRAG_TYPE_NONE, 100, CACHE_WRITE_OPT_SYNC); });
385 large_write_test.expect_initial_event = CACHE_EVENT_OPEN_WRITE;
386 large_write_test.expect_event = VC_EVENT_WRITE_COMPLETE;
387 large_write_test.nbytes = 10000000;
388 rand_CacheKey(&large_write_test.key, thread->mutex);
389
390 CACHE_SM(
391 t, pread_test, { cacheProcessor.open_read(this, &key); } int open_read_callout() {
392 cvio = cache_vc->do_io_pread(this, nbytes, buffer, 7000000);
393 return 1;
394 });
395 pread_test.expect_initial_event = CACHE_EVENT_OPEN_READ;
396 pread_test.expect_event = VC_EVENT_READ_COMPLETE;
397 pread_test.nbytes = 100;
398 pread_test.key = large_write_test.key;
399
400 // clang-format off
401 r_sequential(t,
402 write_test.clone(),
403 lookup_test.clone(),
404 r_sequential(t, 10, read_test.clone()) /* run read_test 10 times */,
405 remove_test.clone(),
406 lookup_fail_test.clone(),
407 read_fail_test.clone(),
408 remove_fail_test.clone(),
409 replace_write_test.clone(),
410 replace_test.clone(),
411 replace_read_test.clone(),
412 large_write_test.clone(),
413 pread_test.clone(),
414 nullptr)
415 ->run(pstatus);
416 // clang-format on
417
418 return;
419 }
420
421 void
force_link_CacheTest()422 force_link_CacheTest()
423 {
424 }
425
426 // run -R 3 -r cache_disk_replacement_stability
427
REGRESSION_TEST(cache_disk_replacement_stability)428 REGRESSION_TEST(cache_disk_replacement_stability)(RegressionTest *t, int level, int *pstatus)
429 {
430 static int const MAX_VOLS = 26; // maximum values used in any test.
431 static uint64_t DEFAULT_SKIP = 8192;
432 static uint64_t DEFAULT_STRIPE_SIZE = 1024ULL * 1024 * 1024 * 911; // 911G
433 CacheDisk disk; // Only need one because it's just checked for failure.
434 CacheHostRecord hr1, hr2;
435 Vol *sample;
436 static int const sample_idx = 16;
437 Vol vols[MAX_VOLS];
438 Vol *vol_ptrs[MAX_VOLS]; // array of pointers.
439 char buff[2048];
440
441 // Only run at the highest levels.
442 if (REGRESSION_TEST_EXTENDED > level) {
443 *pstatus = REGRESSION_TEST_PASSED;
444 return;
445 }
446
447 *pstatus = REGRESSION_TEST_INPROGRESS;
448
449 disk.num_errors = 0;
450
451 for (int i = 0; i < MAX_VOLS; ++i) {
452 vol_ptrs[i] = vols + i;
453 vols[i].disk = &disk;
454 vols[i].len = DEFAULT_STRIPE_SIZE;
455 snprintf(buff, sizeof(buff), "/dev/sd%c %" PRIu64 ":%" PRIu64, 'a' + i, DEFAULT_SKIP, vols[i].len);
456 CryptoContext().hash_immediate(vols[i].hash_id, buff, strlen(buff));
457 }
458
459 hr1.vol_hash_table = nullptr;
460 hr1.vols = vol_ptrs;
461 hr1.num_vols = MAX_VOLS;
462 build_vol_hash_table(&hr1);
463
464 hr2.vol_hash_table = nullptr;
465 hr2.vols = vol_ptrs;
466 hr2.num_vols = MAX_VOLS;
467
468 sample = vols + sample_idx;
469 sample->len = 1024ULL * 1024 * 1024 * (1024 + 128); // 1.1 TB
470 snprintf(buff, sizeof(buff), "/dev/sd%c %" PRIu64 ":%" PRIu64, 'a' + sample_idx, DEFAULT_SKIP, sample->len);
471 CryptoContext().hash_immediate(sample->hash_id, buff, strlen(buff));
472 build_vol_hash_table(&hr2);
473
474 // See what the difference is
475 int to = 0, from = 0;
476 int then = 0, now = 0;
477 for (int i = 0; i < VOL_HASH_TABLE_SIZE; ++i) {
478 if (hr1.vol_hash_table[i] == sample_idx) {
479 ++then;
480 }
481 if (hr2.vol_hash_table[i] == sample_idx) {
482 ++now;
483 }
484 if (hr1.vol_hash_table[i] != hr2.vol_hash_table[i]) {
485 if (hr1.vol_hash_table[i] == sample_idx) {
486 ++from;
487 } else {
488 ++to;
489 }
490 }
491 }
492 rprintf(t,
493 "Cache stability difference - "
494 "delta = %d of %d : %d to, %d from, originally %d slots, now %d slots (net gain = %d/%d)\n",
495 to + from, VOL_HASH_TABLE_SIZE, to, from, then, now, now - then, to - from);
496 *pstatus = REGRESSION_TEST_PASSED;
497
498 hr1.vols = nullptr;
499 hr2.vols = nullptr;
500 }
501
502 static double zipf_alpha = 1.2;
503 static int64_t zipf_bucket_size = 1;
504
505 #define ZIPF_SIZE (1 << 20)
506
507 static double *zipf_table = nullptr;
508
509 static void
build_zipf()510 build_zipf()
511 {
512 if (zipf_table) {
513 return;
514 }
515 zipf_table = static_cast<double *>(ats_malloc(ZIPF_SIZE * sizeof(double)));
516 for (int i = 0; i < ZIPF_SIZE; i++) {
517 zipf_table[i] = 1.0 / pow(i + 2, zipf_alpha);
518 }
519 for (int i = 1; i < ZIPF_SIZE; i++) {
520 zipf_table[i] = zipf_table[i - 1] + zipf_table[i];
521 }
522 double x = zipf_table[ZIPF_SIZE - 1];
523 for (int i = 0; i < ZIPF_SIZE; i++) {
524 zipf_table[i] = zipf_table[i] / x;
525 }
526 }
527
528 static int
get_zipf(double v)529 get_zipf(double v)
530 {
531 int l = 0, r = ZIPF_SIZE - 1, m;
532 do {
533 m = (r + l) / 2;
534 if (v < zipf_table[m]) {
535 r = m - 1;
536 } else {
537 l = m + 1;
538 }
539 } while (l < r);
540 if (zipf_bucket_size == 1) {
541 return m;
542 }
543 double x = zipf_table[m], y = zipf_table[m + 1];
544 m += static_cast<int>((v - x) / (y - x));
545 return m;
546 }
547
548 static bool
test_RamCache(RegressionTest * t,RamCache * cache,const char * name,int64_t cache_size)549 test_RamCache(RegressionTest *t, RamCache *cache, const char *name, int64_t cache_size)
550 {
551 bool pass = true;
552 CacheKey key;
553 Vol *vol = theCache->key_to_vol(&key, "example.com", sizeof("example.com") - 1);
554 std::vector<Ptr<IOBufferData>> data;
555
556 cache->init(cache_size, vol);
557
558 for (int l = 0; l < 10; l++) {
559 for (int i = 0; i < 200; i++) {
560 IOBufferData *d = THREAD_ALLOC(ioDataAllocator, this_thread());
561 CryptoHash hash;
562
563 d->alloc(BUFFER_SIZE_INDEX_16K);
564 data.push_back(make_ptr(d));
565 hash.u64[0] = (static_cast<uint64_t>(i) << 32) + i;
566 hash.u64[1] = (static_cast<uint64_t>(i) << 32) + i;
567 cache->put(&hash, data[i].get(), 1 << 15);
568 // More hits for the first 10.
569 for (int j = 0; j <= i && j < 10; j++) {
570 Ptr<IOBufferData> data;
571 CryptoHash hash;
572
573 hash.u64[0] = (static_cast<uint64_t>(j) << 32) + j;
574 hash.u64[1] = (static_cast<uint64_t>(j) << 32) + j;
575 cache->get(&hash, &data);
576 }
577 }
578 }
579
580 for (int i = 0; i < 10; i++) {
581 CryptoHash hash;
582 Ptr<IOBufferData> data;
583
584 hash.u64[0] = (static_cast<uint64_t>(i) << 32) + i;
585 hash.u64[1] = (static_cast<uint64_t>(i) << 32) + i;
586 if (!cache->get(&hash, &data)) {
587 pass = false;
588 }
589 }
590
591 int sample_size = cache_size >> 6;
592 build_zipf();
593 srand48(13);
594 int *r = static_cast<int *>(ats_malloc(sample_size * sizeof(int)));
595 for (int i = 0; i < sample_size; i++) {
596 // coverity[dont_call]
597 r[i] = get_zipf(drand48());
598 }
599 data.clear();
600 int misses = 0;
601 for (int i = 0; i < sample_size; i++) {
602 CryptoHash hash;
603 hash.u64[0] = (static_cast<uint64_t>(r[i]) << 32) + r[i];
604 hash.u64[1] = (static_cast<uint64_t>(r[i]) << 32) + r[i];
605 Ptr<IOBufferData> get_data;
606 if (!cache->get(&hash, &get_data)) {
607 IOBufferData *d = THREAD_ALLOC(ioDataAllocator, this_thread());
608 d->alloc(BUFFER_SIZE_INDEX_16K);
609 data.push_back(make_ptr(d));
610 cache->put(&hash, data.back().get(), 1 << 15);
611 if (i >= sample_size / 2) {
612 misses++; // Sample last half of the gets.
613 }
614 }
615 }
616 double fixed_hit_rate = 1.0 - ((static_cast<double>(misses)) / (sample_size / 2));
617 rprintf(t, "RamCache %s Fixed Size Hit Rate %f\n", name, fixed_hit_rate);
618
619 data.clear();
620 misses = 0;
621 for (int i = 0; i < sample_size; i++) {
622 CryptoHash hash;
623 hash.u64[0] = (static_cast<uint64_t>(r[i]) << 32) + r[i];
624 hash.u64[1] = (static_cast<uint64_t>(r[i]) << 32) + r[i];
625 Ptr<IOBufferData> get_data;
626 if (!cache->get(&hash, &get_data)) {
627 IOBufferData *d = THREAD_ALLOC(ioDataAllocator, this_thread());
628 d->alloc(BUFFER_SIZE_INDEX_8K + (r[i] % 3));
629 data.push_back(make_ptr(d));
630 cache->put(&hash, data.back().get(), d->block_size());
631 if (i >= sample_size / 2) {
632 misses++; // Sample last half of the gets.
633 }
634 }
635 }
636 double variable_hit_rate = 1.0 - ((static_cast<double>(misses)) / (sample_size / 2));
637 rprintf(t, "RamCache %s Variable Size Hit Rate %f\n", name, variable_hit_rate);
638
639 rprintf(t, "RamCache %s Nominal Size %lld Size %lld\n", name, cache_size, cache->size());
640
641 if (fixed_hit_rate < 0.55 || variable_hit_rate < 0.55) {
642 return false;
643 }
644 if (abs(cache_size - cache->size()) > 0.02 * cache_size) {
645 return false;
646 }
647
648 ats_free(r);
649
650 rprintf(t, "RamCache %s Test Done\r", name);
651
652 return pass;
653 }
654
REGRESSION_TEST(ram_cache)655 REGRESSION_TEST(ram_cache)(RegressionTest *t, int level, int *pstatus)
656 {
657 // Run with -R 3 for now to trigger this check, until we figure out the CI
658 if (REGRESSION_TEST_EXTENDED > level) {
659 *pstatus = REGRESSION_TEST_PASSED;
660 return;
661 }
662
663 if (cacheProcessor.IsCacheEnabled() != CACHE_INITIALIZED) {
664 rprintf(t, "cache not initialized");
665 *pstatus = REGRESSION_TEST_FAILED;
666 return;
667 }
668 for (int s = 20; s <= 28; s += 4) {
669 int64_t cache_size = 1LL << s;
670 *pstatus = REGRESSION_TEST_PASSED;
671 if (!test_RamCache(t, new_RamCacheLRU(), "LRU", cache_size) || !test_RamCache(t, new_RamCacheCLFUS(), "CLFUS", cache_size)) {
672 *pstatus = REGRESSION_TEST_FAILED;
673 }
674 }
675 }
676