1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
5 //
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
9
10 #include "rocksdb/cache.h"
11
12 #include <forward_list>
13 #include <functional>
14 #include <iostream>
15 #include <string>
16 #include <vector>
17 #include "cache/clock_cache.h"
18 #include "cache/lru_cache.h"
19 #include "test_util/testharness.h"
20 #include "util/coding.h"
21 #include "util/string_util.h"
22
23 namespace ROCKSDB_NAMESPACE {
24
25 // Conversions between numeric keys/values and the types expected by Cache.
EncodeKey(int k)26 static std::string EncodeKey(int k) {
27 std::string result;
28 PutFixed32(&result, k);
29 return result;
30 }
DecodeKey(const Slice & k)31 static int DecodeKey(const Slice& k) {
32 assert(k.size() == 4);
33 return DecodeFixed32(k.data());
34 }
EncodeValue(uintptr_t v)35 static void* EncodeValue(uintptr_t v) { return reinterpret_cast<void*>(v); }
DecodeValue(void * v)36 static int DecodeValue(void* v) {
37 return static_cast<int>(reinterpret_cast<uintptr_t>(v));
38 }
39
40 const std::string kLRU = "lru";
41 const std::string kClock = "clock";
42
dumbDeleter(const Slice &,void *)43 void dumbDeleter(const Slice& /*key*/, void* /*value*/) {}
44
eraseDeleter(const Slice &,void * value)45 void eraseDeleter(const Slice& /*key*/, void* value) {
46 Cache* cache = reinterpret_cast<Cache*>(value);
47 cache->Erase("foo");
48 }
49
50 class CacheTest : public testing::TestWithParam<std::string> {
51 public:
52 static CacheTest* current_;
53
Deleter(const Slice & key,void * v)54 static void Deleter(const Slice& key, void* v) {
55 current_->deleted_keys_.push_back(DecodeKey(key));
56 current_->deleted_values_.push_back(DecodeValue(v));
57 }
58
59 static const int kCacheSize = 1000;
60 static const int kNumShardBits = 4;
61
62 static const int kCacheSize2 = 100;
63 static const int kNumShardBits2 = 2;
64
65 std::vector<int> deleted_keys_;
66 std::vector<int> deleted_values_;
67 std::shared_ptr<Cache> cache_;
68 std::shared_ptr<Cache> cache2_;
69
CacheTest()70 CacheTest()
71 : cache_(NewCache(kCacheSize, kNumShardBits, false)),
72 cache2_(NewCache(kCacheSize2, kNumShardBits2, false)) {
73 current_ = this;
74 }
75
~CacheTest()76 ~CacheTest() override {}
77
NewCache(size_t capacity)78 std::shared_ptr<Cache> NewCache(size_t capacity) {
79 auto type = GetParam();
80 if (type == kLRU) {
81 return NewLRUCache(capacity);
82 }
83 if (type == kClock) {
84 return NewClockCache(capacity);
85 }
86 return nullptr;
87 }
88
NewCache(size_t capacity,int num_shard_bits,bool strict_capacity_limit,CacheMetadataChargePolicy charge_policy=kDontChargeCacheMetadata)89 std::shared_ptr<Cache> NewCache(
90 size_t capacity, int num_shard_bits, bool strict_capacity_limit,
91 CacheMetadataChargePolicy charge_policy = kDontChargeCacheMetadata) {
92 auto type = GetParam();
93 if (type == kLRU) {
94 LRUCacheOptions co;
95 co.capacity = capacity;
96 co.num_shard_bits = num_shard_bits;
97 co.strict_capacity_limit = strict_capacity_limit;
98 co.high_pri_pool_ratio = 0;
99 co.metadata_charge_policy = charge_policy;
100 return NewLRUCache(co);
101 }
102 if (type == kClock) {
103 return NewClockCache(capacity, num_shard_bits, strict_capacity_limit,
104 charge_policy);
105 }
106 return nullptr;
107 }
108
Lookup(std::shared_ptr<Cache> cache,int key)109 int Lookup(std::shared_ptr<Cache> cache, int key) {
110 Cache::Handle* handle = cache->Lookup(EncodeKey(key));
111 const int r = (handle == nullptr) ? -1 : DecodeValue(cache->Value(handle));
112 if (handle != nullptr) {
113 cache->Release(handle);
114 }
115 return r;
116 }
117
Insert(std::shared_ptr<Cache> cache,int key,int value,int charge=1)118 void Insert(std::shared_ptr<Cache> cache, int key, int value,
119 int charge = 1) {
120 EXPECT_OK(cache->Insert(EncodeKey(key), EncodeValue(value), charge,
121 &CacheTest::Deleter));
122 }
123
Erase(std::shared_ptr<Cache> cache,int key)124 void Erase(std::shared_ptr<Cache> cache, int key) {
125 cache->Erase(EncodeKey(key));
126 }
127
Lookup(int key)128 int Lookup(int key) {
129 return Lookup(cache_, key);
130 }
131
Insert(int key,int value,int charge=1)132 void Insert(int key, int value, int charge = 1) {
133 Insert(cache_, key, value, charge);
134 }
135
Erase(int key)136 void Erase(int key) {
137 Erase(cache_, key);
138 }
139
Lookup2(int key)140 int Lookup2(int key) {
141 return Lookup(cache2_, key);
142 }
143
Insert2(int key,int value,int charge=1)144 void Insert2(int key, int value, int charge = 1) {
145 Insert(cache2_, key, value, charge);
146 }
147
Erase2(int key)148 void Erase2(int key) {
149 Erase(cache2_, key);
150 }
151 };
152 CacheTest* CacheTest::current_;
153
154 class LRUCacheTest : public CacheTest {};
155
TEST_P(CacheTest,UsageTest)156 TEST_P(CacheTest, UsageTest) {
157 // cache is std::shared_ptr and will be automatically cleaned up.
158 const uint64_t kCapacity = 100000;
159 auto cache = NewCache(kCapacity, 8, false, kDontChargeCacheMetadata);
160 auto precise_cache = NewCache(kCapacity, 0, false, kFullChargeCacheMetadata);
161 ASSERT_EQ(0, cache->GetUsage());
162 ASSERT_EQ(0, precise_cache->GetUsage());
163
164 size_t usage = 0;
165 char value[10] = "abcdef";
166 // make sure everything will be cached
167 for (int i = 1; i < 100; ++i) {
168 std::string key(i, 'a');
169 auto kv_size = key.size() + 5;
170 ASSERT_OK(cache->Insert(key, reinterpret_cast<void*>(value), kv_size,
171 dumbDeleter));
172 ASSERT_OK(precise_cache->Insert(key, reinterpret_cast<void*>(value),
173 kv_size, dumbDeleter));
174 usage += kv_size;
175 ASSERT_EQ(usage, cache->GetUsage());
176 ASSERT_LT(usage, precise_cache->GetUsage());
177 }
178
179 cache->EraseUnRefEntries();
180 precise_cache->EraseUnRefEntries();
181 ASSERT_EQ(0, cache->GetUsage());
182 ASSERT_EQ(0, precise_cache->GetUsage());
183
184 // make sure the cache will be overloaded
185 for (uint64_t i = 1; i < kCapacity; ++i) {
186 auto key = ToString(i);
187 ASSERT_OK(cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5,
188 dumbDeleter));
189 ASSERT_OK(precise_cache->Insert(key, reinterpret_cast<void*>(value),
190 key.size() + 5, dumbDeleter));
191 }
192
193 // the usage should be close to the capacity
194 ASSERT_GT(kCapacity, cache->GetUsage());
195 ASSERT_GT(kCapacity, precise_cache->GetUsage());
196 ASSERT_LT(kCapacity * 0.95, cache->GetUsage());
197 ASSERT_LT(kCapacity * 0.95, precise_cache->GetUsage());
198 }
199
TEST_P(CacheTest,PinnedUsageTest)200 TEST_P(CacheTest, PinnedUsageTest) {
201 // cache is std::shared_ptr and will be automatically cleaned up.
202 const uint64_t kCapacity = 200000;
203 auto cache = NewCache(kCapacity, 8, false, kDontChargeCacheMetadata);
204 auto precise_cache = NewCache(kCapacity, 8, false, kFullChargeCacheMetadata);
205
206 size_t pinned_usage = 0;
207 char value[10] = "abcdef";
208
209 std::forward_list<Cache::Handle*> unreleased_handles;
210 std::forward_list<Cache::Handle*> unreleased_handles_in_precise_cache;
211
212 // Add entries. Unpin some of them after insertion. Then, pin some of them
213 // again. Check GetPinnedUsage().
214 for (int i = 1; i < 100; ++i) {
215 std::string key(i, 'a');
216 auto kv_size = key.size() + 5;
217 Cache::Handle* handle;
218 Cache::Handle* handle_in_precise_cache;
219 ASSERT_OK(cache->Insert(key, reinterpret_cast<void*>(value), kv_size,
220 dumbDeleter, &handle));
221 assert(handle);
222 ASSERT_OK(precise_cache->Insert(key, reinterpret_cast<void*>(value),
223 kv_size, dumbDeleter,
224 &handle_in_precise_cache));
225 assert(handle_in_precise_cache);
226 pinned_usage += kv_size;
227 ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
228 ASSERT_LT(pinned_usage, precise_cache->GetPinnedUsage());
229 if (i % 2 == 0) {
230 cache->Release(handle);
231 precise_cache->Release(handle_in_precise_cache);
232 pinned_usage -= kv_size;
233 ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
234 ASSERT_LT(pinned_usage, precise_cache->GetPinnedUsage());
235 } else {
236 unreleased_handles.push_front(handle);
237 unreleased_handles_in_precise_cache.push_front(handle_in_precise_cache);
238 }
239 if (i % 3 == 0) {
240 unreleased_handles.push_front(cache->Lookup(key));
241 auto x = precise_cache->Lookup(key);
242 assert(x);
243 unreleased_handles_in_precise_cache.push_front(x);
244 // If i % 2 == 0, then the entry was unpinned before Lookup, so pinned
245 // usage increased
246 if (i % 2 == 0) {
247 pinned_usage += kv_size;
248 }
249 ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
250 ASSERT_LT(pinned_usage, precise_cache->GetPinnedUsage());
251 }
252 }
253 auto precise_cache_pinned_usage = precise_cache->GetPinnedUsage();
254 ASSERT_LT(pinned_usage, precise_cache_pinned_usage);
255
256 // check that overloading the cache does not change the pinned usage
257 for (uint64_t i = 1; i < 2 * kCapacity; ++i) {
258 auto key = ToString(i);
259 ASSERT_OK(cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5,
260 dumbDeleter));
261 ASSERT_OK(precise_cache->Insert(key, reinterpret_cast<void*>(value),
262 key.size() + 5, dumbDeleter));
263 }
264 ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
265 ASSERT_EQ(precise_cache_pinned_usage, precise_cache->GetPinnedUsage());
266
267 cache->EraseUnRefEntries();
268 precise_cache->EraseUnRefEntries();
269 ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
270 ASSERT_EQ(precise_cache_pinned_usage, precise_cache->GetPinnedUsage());
271
272 // release handles for pinned entries to prevent memory leaks
273 for (auto handle : unreleased_handles) {
274 cache->Release(handle);
275 }
276 for (auto handle : unreleased_handles_in_precise_cache) {
277 precise_cache->Release(handle);
278 }
279 ASSERT_EQ(0, cache->GetPinnedUsage());
280 ASSERT_EQ(0, precise_cache->GetPinnedUsage());
281 cache->EraseUnRefEntries();
282 precise_cache->EraseUnRefEntries();
283 ASSERT_EQ(0, cache->GetUsage());
284 ASSERT_EQ(0, precise_cache->GetUsage());
285 }
286
TEST_P(CacheTest,HitAndMiss)287 TEST_P(CacheTest, HitAndMiss) {
288 ASSERT_EQ(-1, Lookup(100));
289
290 Insert(100, 101);
291 ASSERT_EQ(101, Lookup(100));
292 ASSERT_EQ(-1, Lookup(200));
293 ASSERT_EQ(-1, Lookup(300));
294
295 Insert(200, 201);
296 ASSERT_EQ(101, Lookup(100));
297 ASSERT_EQ(201, Lookup(200));
298 ASSERT_EQ(-1, Lookup(300));
299
300 Insert(100, 102);
301 ASSERT_EQ(102, Lookup(100));
302 ASSERT_EQ(201, Lookup(200));
303 ASSERT_EQ(-1, Lookup(300));
304
305 ASSERT_EQ(1U, deleted_keys_.size());
306 ASSERT_EQ(100, deleted_keys_[0]);
307 ASSERT_EQ(101, deleted_values_[0]);
308 }
309
TEST_P(CacheTest,InsertSameKey)310 TEST_P(CacheTest, InsertSameKey) {
311 Insert(1, 1);
312 Insert(1, 2);
313 ASSERT_EQ(2, Lookup(1));
314 }
315
TEST_P(CacheTest,Erase)316 TEST_P(CacheTest, Erase) {
317 Erase(200);
318 ASSERT_EQ(0U, deleted_keys_.size());
319
320 Insert(100, 101);
321 Insert(200, 201);
322 Erase(100);
323 ASSERT_EQ(-1, Lookup(100));
324 ASSERT_EQ(201, Lookup(200));
325 ASSERT_EQ(1U, deleted_keys_.size());
326 ASSERT_EQ(100, deleted_keys_[0]);
327 ASSERT_EQ(101, deleted_values_[0]);
328
329 Erase(100);
330 ASSERT_EQ(-1, Lookup(100));
331 ASSERT_EQ(201, Lookup(200));
332 ASSERT_EQ(1U, deleted_keys_.size());
333 }
334
TEST_P(CacheTest,EntriesArePinned)335 TEST_P(CacheTest, EntriesArePinned) {
336 Insert(100, 101);
337 Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
338 ASSERT_EQ(101, DecodeValue(cache_->Value(h1)));
339 ASSERT_EQ(1U, cache_->GetUsage());
340
341 Insert(100, 102);
342 Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
343 ASSERT_EQ(102, DecodeValue(cache_->Value(h2)));
344 ASSERT_EQ(0U, deleted_keys_.size());
345 ASSERT_EQ(2U, cache_->GetUsage());
346
347 cache_->Release(h1);
348 ASSERT_EQ(1U, deleted_keys_.size());
349 ASSERT_EQ(100, deleted_keys_[0]);
350 ASSERT_EQ(101, deleted_values_[0]);
351 ASSERT_EQ(1U, cache_->GetUsage());
352
353 Erase(100);
354 ASSERT_EQ(-1, Lookup(100));
355 ASSERT_EQ(1U, deleted_keys_.size());
356 ASSERT_EQ(1U, cache_->GetUsage());
357
358 cache_->Release(h2);
359 ASSERT_EQ(2U, deleted_keys_.size());
360 ASSERT_EQ(100, deleted_keys_[1]);
361 ASSERT_EQ(102, deleted_values_[1]);
362 ASSERT_EQ(0U, cache_->GetUsage());
363 }
364
TEST_P(CacheTest,EvictionPolicy)365 TEST_P(CacheTest, EvictionPolicy) {
366 Insert(100, 101);
367 Insert(200, 201);
368
369 // Frequently used entry must be kept around
370 for (int i = 0; i < kCacheSize * 2; i++) {
371 Insert(1000+i, 2000+i);
372 ASSERT_EQ(101, Lookup(100));
373 }
374 ASSERT_EQ(101, Lookup(100));
375 ASSERT_EQ(-1, Lookup(200));
376 }
377
TEST_P(CacheTest,ExternalRefPinsEntries)378 TEST_P(CacheTest, ExternalRefPinsEntries) {
379 Insert(100, 101);
380 Cache::Handle* h = cache_->Lookup(EncodeKey(100));
381 ASSERT_TRUE(cache_->Ref(h));
382 ASSERT_EQ(101, DecodeValue(cache_->Value(h)));
383 ASSERT_EQ(1U, cache_->GetUsage());
384
385 for (int i = 0; i < 3; ++i) {
386 if (i > 0) {
387 // First release (i == 1) corresponds to Ref(), second release (i == 2)
388 // corresponds to Lookup(). Then, since all external refs are released,
389 // the below insertions should push out the cache entry.
390 cache_->Release(h);
391 }
392 // double cache size because the usage bit in block cache prevents 100 from
393 // being evicted in the first kCacheSize iterations
394 for (int j = 0; j < 2 * kCacheSize + 100; j++) {
395 Insert(1000 + j, 2000 + j);
396 }
397 if (i < 2) {
398 ASSERT_EQ(101, Lookup(100));
399 }
400 }
401 ASSERT_EQ(-1, Lookup(100));
402 }
403
TEST_P(CacheTest,EvictionPolicyRef)404 TEST_P(CacheTest, EvictionPolicyRef) {
405 Insert(100, 101);
406 Insert(101, 102);
407 Insert(102, 103);
408 Insert(103, 104);
409 Insert(200, 101);
410 Insert(201, 102);
411 Insert(202, 103);
412 Insert(203, 104);
413 Cache::Handle* h201 = cache_->Lookup(EncodeKey(200));
414 Cache::Handle* h202 = cache_->Lookup(EncodeKey(201));
415 Cache::Handle* h203 = cache_->Lookup(EncodeKey(202));
416 Cache::Handle* h204 = cache_->Lookup(EncodeKey(203));
417 Insert(300, 101);
418 Insert(301, 102);
419 Insert(302, 103);
420 Insert(303, 104);
421
422 // Insert entries much more than Cache capacity
423 for (int i = 0; i < kCacheSize * 2; i++) {
424 Insert(1000 + i, 2000 + i);
425 }
426
427 // Check whether the entries inserted in the beginning
428 // are evicted. Ones without extra ref are evicted and
429 // those with are not.
430 ASSERT_EQ(-1, Lookup(100));
431 ASSERT_EQ(-1, Lookup(101));
432 ASSERT_EQ(-1, Lookup(102));
433 ASSERT_EQ(-1, Lookup(103));
434
435 ASSERT_EQ(-1, Lookup(300));
436 ASSERT_EQ(-1, Lookup(301));
437 ASSERT_EQ(-1, Lookup(302));
438 ASSERT_EQ(-1, Lookup(303));
439
440 ASSERT_EQ(101, Lookup(200));
441 ASSERT_EQ(102, Lookup(201));
442 ASSERT_EQ(103, Lookup(202));
443 ASSERT_EQ(104, Lookup(203));
444
445 // Cleaning up all the handles
446 cache_->Release(h201);
447 cache_->Release(h202);
448 cache_->Release(h203);
449 cache_->Release(h204);
450 }
451
TEST_P(CacheTest,EvictEmptyCache)452 TEST_P(CacheTest, EvictEmptyCache) {
453 // Insert item large than capacity to trigger eviction on empty cache.
454 auto cache = NewCache(1, 0, false);
455 ASSERT_OK(cache->Insert("foo", nullptr, 10, dumbDeleter));
456 }
457
TEST_P(CacheTest,EraseFromDeleter)458 TEST_P(CacheTest, EraseFromDeleter) {
459 // Have deleter which will erase item from cache, which will re-enter
460 // the cache at that point.
461 std::shared_ptr<Cache> cache = NewCache(10, 0, false);
462 ASSERT_OK(cache->Insert("foo", nullptr, 1, dumbDeleter));
463 ASSERT_OK(cache->Insert("bar", cache.get(), 1, eraseDeleter));
464 cache->Erase("bar");
465 ASSERT_EQ(nullptr, cache->Lookup("foo"));
466 ASSERT_EQ(nullptr, cache->Lookup("bar"));
467 }
468
TEST_P(CacheTest,ErasedHandleState)469 TEST_P(CacheTest, ErasedHandleState) {
470 // insert a key and get two handles
471 Insert(100, 1000);
472 Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
473 Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
474 ASSERT_EQ(h1, h2);
475 ASSERT_EQ(DecodeValue(cache_->Value(h1)), 1000);
476 ASSERT_EQ(DecodeValue(cache_->Value(h2)), 1000);
477
478 // delete the key from the cache
479 Erase(100);
480 // can no longer find in the cache
481 ASSERT_EQ(-1, Lookup(100));
482
483 // release one handle
484 cache_->Release(h1);
485 // still can't find in cache
486 ASSERT_EQ(-1, Lookup(100));
487
488 cache_->Release(h2);
489 }
490
TEST_P(CacheTest,HeavyEntries)491 TEST_P(CacheTest, HeavyEntries) {
492 // Add a bunch of light and heavy entries and then count the combined
493 // size of items still in the cache, which must be approximately the
494 // same as the total capacity.
495 const int kLight = 1;
496 const int kHeavy = 10;
497 int added = 0;
498 int index = 0;
499 while (added < 2*kCacheSize) {
500 const int weight = (index & 1) ? kLight : kHeavy;
501 Insert(index, 1000+index, weight);
502 added += weight;
503 index++;
504 }
505
506 int cached_weight = 0;
507 for (int i = 0; i < index; i++) {
508 const int weight = (i & 1 ? kLight : kHeavy);
509 int r = Lookup(i);
510 if (r >= 0) {
511 cached_weight += weight;
512 ASSERT_EQ(1000+i, r);
513 }
514 }
515 ASSERT_LE(cached_weight, kCacheSize + kCacheSize/10);
516 }
517
TEST_P(CacheTest,NewId)518 TEST_P(CacheTest, NewId) {
519 uint64_t a = cache_->NewId();
520 uint64_t b = cache_->NewId();
521 ASSERT_NE(a, b);
522 }
523
524
525 class Value {
526 public:
Value(size_t v)527 explicit Value(size_t v) : v_(v) { }
528
529 size_t v_;
530 };
531
532 namespace {
deleter(const Slice &,void * value)533 void deleter(const Slice& /*key*/, void* value) {
534 delete static_cast<Value *>(value);
535 }
536 } // namespace
537
TEST_P(CacheTest,ReleaseAndErase)538 TEST_P(CacheTest, ReleaseAndErase) {
539 std::shared_ptr<Cache> cache = NewCache(5, 0, false);
540 Cache::Handle* handle;
541 Status s = cache->Insert(EncodeKey(100), EncodeValue(100), 1,
542 &CacheTest::Deleter, &handle);
543 ASSERT_TRUE(s.ok());
544 ASSERT_EQ(5U, cache->GetCapacity());
545 ASSERT_EQ(1U, cache->GetUsage());
546 ASSERT_EQ(0U, deleted_keys_.size());
547 auto erased = cache->Release(handle, true);
548 ASSERT_TRUE(erased);
549 // This tests that deleter has been called
550 ASSERT_EQ(1U, deleted_keys_.size());
551 }
552
TEST_P(CacheTest,ReleaseWithoutErase)553 TEST_P(CacheTest, ReleaseWithoutErase) {
554 std::shared_ptr<Cache> cache = NewCache(5, 0, false);
555 Cache::Handle* handle;
556 Status s = cache->Insert(EncodeKey(100), EncodeValue(100), 1,
557 &CacheTest::Deleter, &handle);
558 ASSERT_TRUE(s.ok());
559 ASSERT_EQ(5U, cache->GetCapacity());
560 ASSERT_EQ(1U, cache->GetUsage());
561 ASSERT_EQ(0U, deleted_keys_.size());
562 auto erased = cache->Release(handle);
563 ASSERT_FALSE(erased);
564 // This tests that deleter is not called. When cache has free capacity it is
565 // not expected to immediately erase the released items.
566 ASSERT_EQ(0U, deleted_keys_.size());
567 }
568
TEST_P(CacheTest,SetCapacity)569 TEST_P(CacheTest, SetCapacity) {
570 // test1: increase capacity
571 // lets create a cache with capacity 5,
572 // then, insert 5 elements, then increase capacity
573 // to 10, returned capacity should be 10, usage=5
574 std::shared_ptr<Cache> cache = NewCache(5, 0, false);
575 std::vector<Cache::Handle*> handles(10);
576 // Insert 5 entries, but not releasing.
577 for (size_t i = 0; i < 5; i++) {
578 std::string key = ToString(i+1);
579 Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
580 ASSERT_TRUE(s.ok());
581 }
582 ASSERT_EQ(5U, cache->GetCapacity());
583 ASSERT_EQ(5U, cache->GetUsage());
584 cache->SetCapacity(10);
585 ASSERT_EQ(10U, cache->GetCapacity());
586 ASSERT_EQ(5U, cache->GetUsage());
587
588 // test2: decrease capacity
589 // insert 5 more elements to cache, then release 5,
590 // then decrease capacity to 7, final capacity should be 7
591 // and usage should be 7
592 for (size_t i = 5; i < 10; i++) {
593 std::string key = ToString(i+1);
594 Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
595 ASSERT_TRUE(s.ok());
596 }
597 ASSERT_EQ(10U, cache->GetCapacity());
598 ASSERT_EQ(10U, cache->GetUsage());
599 for (size_t i = 0; i < 5; i++) {
600 cache->Release(handles[i]);
601 }
602 ASSERT_EQ(10U, cache->GetCapacity());
603 ASSERT_EQ(10U, cache->GetUsage());
604 cache->SetCapacity(7);
605 ASSERT_EQ(7, cache->GetCapacity());
606 ASSERT_EQ(7, cache->GetUsage());
607
608 // release remaining 5 to keep valgrind happy
609 for (size_t i = 5; i < 10; i++) {
610 cache->Release(handles[i]);
611 }
612 }
613
TEST_P(LRUCacheTest,SetStrictCapacityLimit)614 TEST_P(LRUCacheTest, SetStrictCapacityLimit) {
615 // test1: set the flag to false. Insert more keys than capacity. See if they
616 // all go through.
617 std::shared_ptr<Cache> cache = NewCache(5, 0, false);
618 std::vector<Cache::Handle*> handles(10);
619 Status s;
620 for (size_t i = 0; i < 10; i++) {
621 std::string key = ToString(i + 1);
622 s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
623 ASSERT_OK(s);
624 ASSERT_NE(nullptr, handles[i]);
625 }
626 ASSERT_EQ(10, cache->GetUsage());
627
628 // test2: set the flag to true. Insert and check if it fails.
629 std::string extra_key = "extra";
630 Value* extra_value = new Value(0);
631 cache->SetStrictCapacityLimit(true);
632 Cache::Handle* handle;
633 s = cache->Insert(extra_key, extra_value, 1, &deleter, &handle);
634 ASSERT_TRUE(s.IsIncomplete());
635 ASSERT_EQ(nullptr, handle);
636 ASSERT_EQ(10, cache->GetUsage());
637
638 for (size_t i = 0; i < 10; i++) {
639 cache->Release(handles[i]);
640 }
641
642 // test3: init with flag being true.
643 std::shared_ptr<Cache> cache2 = NewCache(5, 0, true);
644 for (size_t i = 0; i < 5; i++) {
645 std::string key = ToString(i + 1);
646 s = cache2->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
647 ASSERT_OK(s);
648 ASSERT_NE(nullptr, handles[i]);
649 }
650 s = cache2->Insert(extra_key, extra_value, 1, &deleter, &handle);
651 ASSERT_TRUE(s.IsIncomplete());
652 ASSERT_EQ(nullptr, handle);
653 // test insert without handle
654 s = cache2->Insert(extra_key, extra_value, 1, &deleter);
655 // AS if the key have been inserted into cache but get evicted immediately.
656 ASSERT_OK(s);
657 ASSERT_EQ(5, cache2->GetUsage());
658 ASSERT_EQ(nullptr, cache2->Lookup(extra_key));
659
660 for (size_t i = 0; i < 5; i++) {
661 cache2->Release(handles[i]);
662 }
663 }
664
TEST_P(CacheTest,OverCapacity)665 TEST_P(CacheTest, OverCapacity) {
666 size_t n = 10;
667
668 // a LRUCache with n entries and one shard only
669 std::shared_ptr<Cache> cache = NewCache(n, 0, false);
670
671 std::vector<Cache::Handle*> handles(n+1);
672
673 // Insert n+1 entries, but not releasing.
674 for (size_t i = 0; i < n + 1; i++) {
675 std::string key = ToString(i+1);
676 Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
677 ASSERT_TRUE(s.ok());
678 }
679
680 // Guess what's in the cache now?
681 for (size_t i = 0; i < n + 1; i++) {
682 std::string key = ToString(i+1);
683 auto h = cache->Lookup(key);
684 ASSERT_TRUE(h != nullptr);
685 if (h) cache->Release(h);
686 }
687
688 // the cache is over capacity since nothing could be evicted
689 ASSERT_EQ(n + 1U, cache->GetUsage());
690 for (size_t i = 0; i < n + 1; i++) {
691 cache->Release(handles[i]);
692 }
693 // Make sure eviction is triggered.
694 cache->SetCapacity(n);
695
696 // cache is under capacity now since elements were released
697 ASSERT_EQ(n, cache->GetUsage());
698
699 // element 0 is evicted and the rest is there
700 // This is consistent with the LRU policy since the element 0
701 // was released first
702 for (size_t i = 0; i < n + 1; i++) {
703 std::string key = ToString(i+1);
704 auto h = cache->Lookup(key);
705 if (h) {
706 ASSERT_NE(i, 0U);
707 cache->Release(h);
708 } else {
709 ASSERT_EQ(i, 0U);
710 }
711 }
712 }
713
714 namespace {
715 std::vector<std::pair<int, int>> legacy_callback_state;
legacy_callback(void * value,size_t charge)716 void legacy_callback(void* value, size_t charge) {
717 legacy_callback_state.push_back(
718 {DecodeValue(value), static_cast<int>(charge)});
719 }
720 };
721
TEST_P(CacheTest,ApplyToAllCacheEntriesTest)722 TEST_P(CacheTest, ApplyToAllCacheEntriesTest) {
723 std::vector<std::pair<int, int>> inserted;
724 legacy_callback_state.clear();
725
726 for (int i = 0; i < 10; ++i) {
727 Insert(i, i * 2, i + 1);
728 inserted.push_back({i * 2, i + 1});
729 }
730 cache_->ApplyToAllCacheEntries(legacy_callback, true);
731
732 std::sort(inserted.begin(), inserted.end());
733 std::sort(legacy_callback_state.begin(), legacy_callback_state.end());
734 ASSERT_EQ(inserted.size(), legacy_callback_state.size());
735 for (size_t i = 0; i < inserted.size(); ++i) {
736 EXPECT_EQ(inserted[i], legacy_callback_state[i]);
737 }
738 }
739
TEST_P(CacheTest,ApplyToAllEntriesTest)740 TEST_P(CacheTest, ApplyToAllEntriesTest) {
741 std::vector<std::string> callback_state;
742 const auto callback = [&](const Slice& key, void* value, size_t charge,
743 Cache::DeleterFn deleter) {
744 callback_state.push_back(ToString(DecodeKey(key)) + "," +
745 ToString(DecodeValue(value)) + "," +
746 ToString(charge));
747 assert(deleter == &CacheTest::Deleter);
748 };
749
750 std::vector<std::string> inserted;
751 callback_state.clear();
752
753 for (int i = 0; i < 10; ++i) {
754 Insert(i, i * 2, i + 1);
755 inserted.push_back(ToString(i) + "," + ToString(i * 2) + "," +
756 ToString(i + 1));
757 }
758 cache_->ApplyToAllEntries(callback, /*opts*/ {});
759
760 std::sort(inserted.begin(), inserted.end());
761 std::sort(callback_state.begin(), callback_state.end());
762 ASSERT_EQ(inserted.size(), callback_state.size());
763 for (size_t i = 0; i < inserted.size(); ++i) {
764 EXPECT_EQ(inserted[i], callback_state[i]);
765 }
766 }
767
TEST_P(CacheTest,ApplyToAllEntriesDuringResize)768 TEST_P(CacheTest, ApplyToAllEntriesDuringResize) {
769 // This is a mini-stress test of ApplyToAllEntries, to ensure
770 // items in the cache that are neither added nor removed
771 // during ApplyToAllEntries are counted exactly once.
772
773 // Insert some entries that we expect to be seen exactly once
774 // during iteration.
775 constexpr int kSpecialCharge = 2;
776 constexpr int kNotSpecialCharge = 1;
777 constexpr int kSpecialCount = 100;
778 for (int i = 0; i < kSpecialCount; ++i) {
779 Insert(i, i * 2, kSpecialCharge);
780 }
781
782 // For callback
783 int special_count = 0;
784 const auto callback = [&](const Slice&, void*, size_t charge,
785 Cache::DeleterFn) {
786 if (charge == static_cast<size_t>(kSpecialCharge)) {
787 ++special_count;
788 }
789 };
790
791 // Start counting
792 std::thread apply_thread([&]() {
793 // Use small average_entries_per_lock to make the problem difficult
794 Cache::ApplyToAllEntriesOptions opts;
795 opts.average_entries_per_lock = 2;
796 cache_->ApplyToAllEntries(callback, opts);
797 });
798
799 // In parallel, add more entries, enough to cause resize but not enough
800 // to cause ejections
801 for (int i = kSpecialCount * 1; i < kSpecialCount * 6; ++i) {
802 Insert(i, i * 2, kNotSpecialCharge);
803 }
804
805 apply_thread.join();
806 ASSERT_EQ(special_count, kSpecialCount);
807 }
808
TEST_P(CacheTest,DefaultShardBits)809 TEST_P(CacheTest, DefaultShardBits) {
810 // test1: set the flag to false. Insert more keys than capacity. See if they
811 // all go through.
812 std::shared_ptr<Cache> cache = NewCache(16 * 1024L * 1024L);
813 ShardedCache* sc = dynamic_cast<ShardedCache*>(cache.get());
814 ASSERT_EQ(5, sc->GetNumShardBits());
815
816 cache = NewLRUCache(511 * 1024L, -1, true);
817 sc = dynamic_cast<ShardedCache*>(cache.get());
818 ASSERT_EQ(0, sc->GetNumShardBits());
819
820 cache = NewLRUCache(1024L * 1024L * 1024L, -1, true);
821 sc = dynamic_cast<ShardedCache*>(cache.get());
822 ASSERT_EQ(6, sc->GetNumShardBits());
823 }
824
TEST_P(CacheTest,GetChargeAndDeleter)825 TEST_P(CacheTest, GetChargeAndDeleter) {
826 Insert(1, 2);
827 Cache::Handle* h1 = cache_->Lookup(EncodeKey(1));
828 ASSERT_EQ(2, DecodeValue(cache_->Value(h1)));
829 ASSERT_EQ(1, cache_->GetCharge(h1));
830 ASSERT_EQ(&CacheTest::Deleter, cache_->GetDeleter(h1));
831 cache_->Release(h1);
832 }
833
834 #ifdef SUPPORT_CLOCK_CACHE
835 std::shared_ptr<Cache> (*new_clock_cache_func)(
836 size_t, int, bool, CacheMetadataChargePolicy) = NewClockCache;
837 INSTANTIATE_TEST_CASE_P(CacheTestInstance, CacheTest,
838 testing::Values(kLRU, kClock));
839 #else
840 INSTANTIATE_TEST_CASE_P(CacheTestInstance, CacheTest, testing::Values(kLRU));
841 #endif // SUPPORT_CLOCK_CACHE
842 INSTANTIATE_TEST_CASE_P(CacheTestInstance, LRUCacheTest, testing::Values(kLRU));
843
844 } // namespace ROCKSDB_NAMESPACE
845
main(int argc,char ** argv)846 int main(int argc, char** argv) {
847 ::testing::InitGoogleTest(&argc, argv);
848 return RUN_ALL_TESTS();
849 }
850