1 //  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
2 //  This source code is licensed under both the GPLv2 (found in the
3 //  COPYING file in the root directory) and Apache 2.0 License
4 //  (found in the LICENSE.Apache file in the root directory).
5 
6 #include "cache/lru_cache.h"
7 
8 #include <string>
9 #include <vector>
10 
11 #include "db/db_test_util.h"
12 #include "file/sst_file_manager_impl.h"
13 #include "port/port.h"
14 #include "port/stack_trace.h"
15 #include "rocksdb/cache.h"
16 #include "rocksdb/io_status.h"
17 #include "rocksdb/sst_file_manager.h"
18 #include "test_util/testharness.h"
19 #include "util/coding.h"
20 #include "util/random.h"
21 
22 namespace ROCKSDB_NAMESPACE {
23 
24 class LRUCacheTest : public testing::Test {
25  public:
LRUCacheTest()26   LRUCacheTest() {}
~LRUCacheTest()27   ~LRUCacheTest() override { DeleteCache(); }
28 
DeleteCache()29   void DeleteCache() {
30     if (cache_ != nullptr) {
31       cache_->~LRUCacheShard();
32       port::cacheline_aligned_free(cache_);
33       cache_ = nullptr;
34     }
35   }
36 
NewCache(size_t capacity,double high_pri_pool_ratio=0.0,bool use_adaptive_mutex=kDefaultToAdaptiveMutex)37   void NewCache(size_t capacity, double high_pri_pool_ratio = 0.0,
38                 bool use_adaptive_mutex = kDefaultToAdaptiveMutex) {
39     DeleteCache();
40     cache_ = reinterpret_cast<LRUCacheShard*>(
41         port::cacheline_aligned_alloc(sizeof(LRUCacheShard)));
42     new (cache_) LRUCacheShard(
43         capacity, false /*strict_capcity_limit*/, high_pri_pool_ratio,
44         use_adaptive_mutex, kDontChargeCacheMetadata,
45         24 /*max_upper_hash_bits*/, nullptr /*secondary_cache*/);
46   }
47 
Insert(const std::string & key,Cache::Priority priority=Cache::Priority::LOW)48   void Insert(const std::string& key,
49               Cache::Priority priority = Cache::Priority::LOW) {
50     EXPECT_OK(cache_->Insert(key, 0 /*hash*/, nullptr /*value*/, 1 /*charge*/,
51                              nullptr /*deleter*/, nullptr /*handle*/,
52                              priority));
53   }
54 
Insert(char key,Cache::Priority priority=Cache::Priority::LOW)55   void Insert(char key, Cache::Priority priority = Cache::Priority::LOW) {
56     Insert(std::string(1, key), priority);
57   }
58 
Lookup(const std::string & key)59   bool Lookup(const std::string& key) {
60     auto handle = cache_->Lookup(key, 0 /*hash*/);
61     if (handle) {
62       cache_->Release(handle);
63       return true;
64     }
65     return false;
66   }
67 
Lookup(char key)68   bool Lookup(char key) { return Lookup(std::string(1, key)); }
69 
Erase(const std::string & key)70   void Erase(const std::string& key) { cache_->Erase(key, 0 /*hash*/); }
71 
ValidateLRUList(std::vector<std::string> keys,size_t num_high_pri_pool_keys=0)72   void ValidateLRUList(std::vector<std::string> keys,
73                        size_t num_high_pri_pool_keys = 0) {
74     LRUHandle* lru;
75     LRUHandle* lru_low_pri;
76     cache_->TEST_GetLRUList(&lru, &lru_low_pri);
77     LRUHandle* iter = lru;
78     bool in_high_pri_pool = false;
79     size_t high_pri_pool_keys = 0;
80     if (iter == lru_low_pri) {
81       in_high_pri_pool = true;
82     }
83     for (const auto& key : keys) {
84       iter = iter->next;
85       ASSERT_NE(lru, iter);
86       ASSERT_EQ(key, iter->key().ToString());
87       ASSERT_EQ(in_high_pri_pool, iter->InHighPriPool());
88       if (in_high_pri_pool) {
89         high_pri_pool_keys++;
90       }
91       if (iter == lru_low_pri) {
92         ASSERT_FALSE(in_high_pri_pool);
93         in_high_pri_pool = true;
94       }
95     }
96     ASSERT_EQ(lru, iter->next);
97     ASSERT_TRUE(in_high_pri_pool);
98     ASSERT_EQ(num_high_pri_pool_keys, high_pri_pool_keys);
99   }
100 
101  private:
102   LRUCacheShard* cache_ = nullptr;
103 };
104 
TEST_F(LRUCacheTest,BasicLRU)105 TEST_F(LRUCacheTest, BasicLRU) {
106   NewCache(5);
107   for (char ch = 'a'; ch <= 'e'; ch++) {
108     Insert(ch);
109   }
110   ValidateLRUList({"a", "b", "c", "d", "e"});
111   for (char ch = 'x'; ch <= 'z'; ch++) {
112     Insert(ch);
113   }
114   ValidateLRUList({"d", "e", "x", "y", "z"});
115   ASSERT_FALSE(Lookup("b"));
116   ValidateLRUList({"d", "e", "x", "y", "z"});
117   ASSERT_TRUE(Lookup("e"));
118   ValidateLRUList({"d", "x", "y", "z", "e"});
119   ASSERT_TRUE(Lookup("z"));
120   ValidateLRUList({"d", "x", "y", "e", "z"});
121   Erase("x");
122   ValidateLRUList({"d", "y", "e", "z"});
123   ASSERT_TRUE(Lookup("d"));
124   ValidateLRUList({"y", "e", "z", "d"});
125   Insert("u");
126   ValidateLRUList({"y", "e", "z", "d", "u"});
127   Insert("v");
128   ValidateLRUList({"e", "z", "d", "u", "v"});
129 }
130 
TEST_F(LRUCacheTest,MidpointInsertion)131 TEST_F(LRUCacheTest, MidpointInsertion) {
132   // Allocate 2 cache entries to high-pri pool.
133   NewCache(5, 0.45);
134 
135   Insert("a", Cache::Priority::LOW);
136   Insert("b", Cache::Priority::LOW);
137   Insert("c", Cache::Priority::LOW);
138   Insert("x", Cache::Priority::HIGH);
139   Insert("y", Cache::Priority::HIGH);
140   ValidateLRUList({"a", "b", "c", "x", "y"}, 2);
141 
142   // Low-pri entries inserted to the tail of low-pri list (the midpoint).
143   // After lookup, it will move to the tail of the full list.
144   Insert("d", Cache::Priority::LOW);
145   ValidateLRUList({"b", "c", "d", "x", "y"}, 2);
146   ASSERT_TRUE(Lookup("d"));
147   ValidateLRUList({"b", "c", "x", "y", "d"}, 2);
148 
149   // High-pri entries will be inserted to the tail of full list.
150   Insert("z", Cache::Priority::HIGH);
151   ValidateLRUList({"c", "x", "y", "d", "z"}, 2);
152 }
153 
TEST_F(LRUCacheTest,EntriesWithPriority)154 TEST_F(LRUCacheTest, EntriesWithPriority) {
155   // Allocate 2 cache entries to high-pri pool.
156   NewCache(5, 0.45);
157 
158   Insert("a", Cache::Priority::LOW);
159   Insert("b", Cache::Priority::LOW);
160   Insert("c", Cache::Priority::LOW);
161   ValidateLRUList({"a", "b", "c"}, 0);
162 
163   // Low-pri entries can take high-pri pool capacity if available
164   Insert("u", Cache::Priority::LOW);
165   Insert("v", Cache::Priority::LOW);
166   ValidateLRUList({"a", "b", "c", "u", "v"}, 0);
167 
168   Insert("X", Cache::Priority::HIGH);
169   Insert("Y", Cache::Priority::HIGH);
170   ValidateLRUList({"c", "u", "v", "X", "Y"}, 2);
171 
172   // High-pri entries can overflow to low-pri pool.
173   Insert("Z", Cache::Priority::HIGH);
174   ValidateLRUList({"u", "v", "X", "Y", "Z"}, 2);
175 
176   // Low-pri entries will be inserted to head of low-pri pool.
177   Insert("a", Cache::Priority::LOW);
178   ValidateLRUList({"v", "X", "a", "Y", "Z"}, 2);
179 
180   // Low-pri entries will be inserted to head of high-pri pool after lookup.
181   ASSERT_TRUE(Lookup("v"));
182   ValidateLRUList({"X", "a", "Y", "Z", "v"}, 2);
183 
184   // High-pri entries will be inserted to the head of the list after lookup.
185   ASSERT_TRUE(Lookup("X"));
186   ValidateLRUList({"a", "Y", "Z", "v", "X"}, 2);
187   ASSERT_TRUE(Lookup("Z"));
188   ValidateLRUList({"a", "Y", "v", "X", "Z"}, 2);
189 
190   Erase("Y");
191   ValidateLRUList({"a", "v", "X", "Z"}, 2);
192   Erase("X");
193   ValidateLRUList({"a", "v", "Z"}, 1);
194   Insert("d", Cache::Priority::LOW);
195   Insert("e", Cache::Priority::LOW);
196   ValidateLRUList({"a", "v", "d", "e", "Z"}, 1);
197   Insert("f", Cache::Priority::LOW);
198   Insert("g", Cache::Priority::LOW);
199   ValidateLRUList({"d", "e", "f", "g", "Z"}, 1);
200   ASSERT_TRUE(Lookup("d"));
201   ValidateLRUList({"e", "f", "g", "Z", "d"}, 2);
202 }
203 
204 class TestSecondaryCache : public SecondaryCache {
205  public:
TestSecondaryCache(size_t capacity)206   explicit TestSecondaryCache(size_t capacity)
207       : num_inserts_(0), num_lookups_(0), inject_failure_(false) {
208     cache_ = NewLRUCache(capacity, 0, false, 0.5, nullptr,
209                          kDefaultToAdaptiveMutex, kDontChargeCacheMetadata);
210   }
~TestSecondaryCache()211   ~TestSecondaryCache() override { cache_.reset(); }
212 
Name()213   std::string Name() override { return "TestSecondaryCache"; }
214 
InjectFailure()215   void InjectFailure() { inject_failure_ = true; }
216 
ResetInjectFailure()217   void ResetInjectFailure() { inject_failure_ = false; }
218 
Insert(const Slice & key,void * value,const Cache::CacheItemHelper * helper)219   Status Insert(const Slice& key, void* value,
220                 const Cache::CacheItemHelper* helper) override {
221     if (inject_failure_) {
222       return Status::Corruption("Insertion Data Corrupted");
223     }
224     size_t size;
225     char* buf;
226     Status s;
227 
228     num_inserts_++;
229     size = (*helper->size_cb)(value);
230     buf = new char[size + sizeof(uint64_t)];
231     EncodeFixed64(buf, size);
232     s = (*helper->saveto_cb)(value, 0, size, buf + sizeof(uint64_t));
233     if (!s.ok()) {
234       delete[] buf;
235       return s;
236     }
237     return cache_->Insert(key, buf, size,
238                           [](const Slice& /*key*/, void* val) -> void {
239                             delete[] static_cast<char*>(val);
240                           });
241   }
242 
Lookup(const Slice & key,const Cache::CreateCallback & create_cb,bool)243   std::unique_ptr<SecondaryCacheHandle> Lookup(
244       const Slice& key, const Cache::CreateCallback& create_cb,
245       bool /*wait*/) override {
246     std::unique_ptr<SecondaryCacheHandle> secondary_handle;
247     Cache::Handle* handle = cache_->Lookup(key);
248     num_lookups_++;
249     if (handle) {
250       void* value;
251       size_t charge;
252       char* ptr = (char*)cache_->Value(handle);
253       size_t size = DecodeFixed64(ptr);
254       ptr += sizeof(uint64_t);
255       Status s = create_cb(ptr, size, &value, &charge);
256       if (s.ok()) {
257         secondary_handle.reset(
258             new TestSecondaryCacheHandle(cache_.get(), handle, value, charge));
259       } else {
260         cache_->Release(handle);
261       }
262     }
263     return secondary_handle;
264   }
265 
Erase(const Slice &)266   void Erase(const Slice& /*key*/) override {}
267 
WaitAll(std::vector<SecondaryCacheHandle * >)268   void WaitAll(std::vector<SecondaryCacheHandle*> /*handles*/) override {}
269 
GetPrintableOptions() const270   std::string GetPrintableOptions() const override { return ""; }
271 
num_inserts()272   uint32_t num_inserts() { return num_inserts_; }
273 
num_lookups()274   uint32_t num_lookups() { return num_lookups_; }
275 
276  private:
277   class TestSecondaryCacheHandle : public SecondaryCacheHandle {
278    public:
TestSecondaryCacheHandle(Cache * cache,Cache::Handle * handle,void * value,size_t size)279     TestSecondaryCacheHandle(Cache* cache, Cache::Handle* handle, void* value,
280                              size_t size)
281         : cache_(cache), handle_(handle), value_(value), size_(size) {}
~TestSecondaryCacheHandle()282     ~TestSecondaryCacheHandle() override { cache_->Release(handle_); }
283 
IsReady()284     bool IsReady() override { return true; }
285 
Wait()286     void Wait() override {}
287 
Value()288     void* Value() override { return value_; }
289 
Size()290     size_t Size() override { return size_; }
291 
292    private:
293     Cache* cache_;
294     Cache::Handle* handle_;
295     void* value_;
296     size_t size_;
297   };
298 
299   std::shared_ptr<Cache> cache_;
300   uint32_t num_inserts_;
301   uint32_t num_lookups_;
302   bool inject_failure_;
303 };
304 
305 class DBSecondaryCacheTest : public DBTestBase {
306  public:
DBSecondaryCacheTest()307   DBSecondaryCacheTest()
308       : DBTestBase("/db_secondary_cache_test", /*env_do_fsync=*/true) {}
309 };
310 
311 class LRUSecondaryCacheTest : public LRUCacheTest {
312  public:
LRUSecondaryCacheTest()313   LRUSecondaryCacheTest() : fail_create_(false) {}
~LRUSecondaryCacheTest()314   ~LRUSecondaryCacheTest() {}
315 
316  protected:
317   class TestItem {
318    public:
TestItem(const char * buf,size_t size)319     TestItem(const char* buf, size_t size) : buf_(new char[size]), size_(size) {
320       memcpy(buf_.get(), buf, size);
321     }
~TestItem()322     ~TestItem() {}
323 
Buf()324     char* Buf() { return buf_.get(); }
Size()325     size_t Size() { return size_; }
326 
327    private:
328     std::unique_ptr<char[]> buf_;
329     size_t size_;
330   };
331 
SizeCallback(void * obj)332   static size_t SizeCallback(void* obj) {
333     return reinterpret_cast<TestItem*>(obj)->Size();
334   }
335 
SaveToCallback(void * from_obj,size_t from_offset,size_t length,void * out)336   static Status SaveToCallback(void* from_obj, size_t from_offset,
337                                size_t length, void* out) {
338     TestItem* item = reinterpret_cast<TestItem*>(from_obj);
339     char* buf = item->Buf();
340     EXPECT_EQ(length, item->Size());
341     EXPECT_EQ(from_offset, 0);
342     memcpy(out, buf, length);
343     return Status::OK();
344   }
345 
DeletionCallback(const Slice &,void * obj)346   static void DeletionCallback(const Slice& /*key*/, void* obj) {
347     delete reinterpret_cast<TestItem*>(obj);
348   }
349 
350   static Cache::CacheItemHelper helper_;
351 
SaveToCallbackFail(void *,size_t,size_t,void *)352   static Status SaveToCallbackFail(void* /*obj*/, size_t /*offset*/,
353                                    size_t /*size*/, void* /*out*/) {
354     return Status::NotSupported();
355   }
356 
357   static Cache::CacheItemHelper helper_fail_;
358 
359   Cache::CreateCallback test_item_creator =
__anonbc50c3e80202(void* buf, size_t size, void** out_obj, size_t* charge) 360       [&](void* buf, size_t size, void** out_obj, size_t* charge) -> Status {
361     if (fail_create_) {
362       return Status::NotSupported();
363     }
364     *out_obj = reinterpret_cast<void*>(new TestItem((char*)buf, size));
365     *charge = size;
366     return Status::OK();
367   };
368 
SetFailCreate(bool fail)369   void SetFailCreate(bool fail) { fail_create_ = fail; }
370 
371  private:
372   bool fail_create_;
373 };
374 
375 Cache::CacheItemHelper LRUSecondaryCacheTest::helper_(
376     LRUSecondaryCacheTest::SizeCallback, LRUSecondaryCacheTest::SaveToCallback,
377     LRUSecondaryCacheTest::DeletionCallback);
378 
379 Cache::CacheItemHelper LRUSecondaryCacheTest::helper_fail_(
380     LRUSecondaryCacheTest::SizeCallback,
381     LRUSecondaryCacheTest::SaveToCallbackFail,
382     LRUSecondaryCacheTest::DeletionCallback);
383 
TEST_F(LRUSecondaryCacheTest,BasicTest)384 TEST_F(LRUSecondaryCacheTest, BasicTest) {
385   LRUCacheOptions opts(1024, 0, false, 0.5, nullptr, kDefaultToAdaptiveMutex,
386                        kDontChargeCacheMetadata);
387   std::shared_ptr<TestSecondaryCache> secondary_cache =
388       std::make_shared<TestSecondaryCache>(2048);
389   opts.secondary_cache = secondary_cache;
390   std::shared_ptr<Cache> cache = NewLRUCache(opts);
391 
392   Random rnd(301);
393   std::string str1 = rnd.RandomString(1020);
394   TestItem* item1 = new TestItem(str1.data(), str1.length());
395   ASSERT_OK(cache->Insert("k1", item1, &LRUSecondaryCacheTest::helper_,
396                           str1.length()));
397   std::string str2 = rnd.RandomString(1020);
398   TestItem* item2 = new TestItem(str2.data(), str2.length());
399   // k2 should be demoted to NVM
400   ASSERT_OK(cache->Insert("k2", item2, &LRUSecondaryCacheTest::helper_,
401                           str2.length()));
402 
403   Cache::Handle* handle;
404   handle = cache->Lookup("k2", &LRUSecondaryCacheTest::helper_,
405                          test_item_creator, Cache::Priority::LOW, true);
406   ASSERT_NE(handle, nullptr);
407   cache->Release(handle);
408   // This lookup should promote k1 and demote k2
409   handle = cache->Lookup("k1", &LRUSecondaryCacheTest::helper_,
410                          test_item_creator, Cache::Priority::LOW, true);
411   ASSERT_NE(handle, nullptr);
412   cache->Release(handle);
413   ASSERT_EQ(secondary_cache->num_inserts(), 2u);
414   ASSERT_EQ(secondary_cache->num_lookups(), 1u);
415 
416   cache.reset();
417   secondary_cache.reset();
418 }
419 
TEST_F(LRUSecondaryCacheTest,BasicFailTest)420 TEST_F(LRUSecondaryCacheTest, BasicFailTest) {
421   LRUCacheOptions opts(1024, 0, false, 0.5, nullptr, kDefaultToAdaptiveMutex,
422                        kDontChargeCacheMetadata);
423   std::shared_ptr<TestSecondaryCache> secondary_cache =
424       std::make_shared<TestSecondaryCache>(2048);
425   opts.secondary_cache = secondary_cache;
426   std::shared_ptr<Cache> cache = NewLRUCache(opts);
427 
428   Random rnd(301);
429   std::string str1 = rnd.RandomString(1020);
430   TestItem* item1 = new TestItem(str1.data(), str1.length());
431   ASSERT_NOK(cache->Insert("k1", item1, nullptr, str1.length()));
432   ASSERT_OK(cache->Insert("k1", item1, &LRUSecondaryCacheTest::helper_,
433                           str1.length()));
434 
435   Cache::Handle* handle;
436   handle = cache->Lookup("k2", nullptr, test_item_creator, Cache::Priority::LOW,
437                          true);
438   ASSERT_EQ(handle, nullptr);
439   handle = cache->Lookup("k2", &LRUSecondaryCacheTest::helper_,
440                          test_item_creator, Cache::Priority::LOW, false);
441   ASSERT_EQ(handle, nullptr);
442 
443   cache.reset();
444   secondary_cache.reset();
445 }
446 
TEST_F(LRUSecondaryCacheTest,SaveFailTest)447 TEST_F(LRUSecondaryCacheTest, SaveFailTest) {
448   LRUCacheOptions opts(1024, 0, false, 0.5, nullptr, kDefaultToAdaptiveMutex,
449                        kDontChargeCacheMetadata);
450   std::shared_ptr<TestSecondaryCache> secondary_cache =
451       std::make_shared<TestSecondaryCache>(2048);
452   opts.secondary_cache = secondary_cache;
453   std::shared_ptr<Cache> cache = NewLRUCache(opts);
454 
455   Random rnd(301);
456   std::string str1 = rnd.RandomString(1020);
457   TestItem* item1 = new TestItem(str1.data(), str1.length());
458   ASSERT_OK(cache->Insert("k1", item1, &LRUSecondaryCacheTest::helper_fail_,
459                           str1.length()));
460   std::string str2 = rnd.RandomString(1020);
461   TestItem* item2 = new TestItem(str2.data(), str2.length());
462   // k1 should be demoted to NVM
463   ASSERT_OK(cache->Insert("k2", item2, &LRUSecondaryCacheTest::helper_fail_,
464                           str2.length()));
465 
466   Cache::Handle* handle;
467   handle = cache->Lookup("k2", &LRUSecondaryCacheTest::helper_fail_,
468                          test_item_creator, Cache::Priority::LOW, true);
469   ASSERT_NE(handle, nullptr);
470   cache->Release(handle);
471   // This lookup should fail, since k1 demotion would have failed
472   handle = cache->Lookup("k1", &LRUSecondaryCacheTest::helper_fail_,
473                          test_item_creator, Cache::Priority::LOW, true);
474   ASSERT_EQ(handle, nullptr);
475   // Since k1 didn't get promoted, k2 should still be in cache
476   handle = cache->Lookup("k2", &LRUSecondaryCacheTest::helper_fail_,
477                          test_item_creator, Cache::Priority::LOW, true);
478   ASSERT_NE(handle, nullptr);
479   cache->Release(handle);
480   ASSERT_EQ(secondary_cache->num_inserts(), 1u);
481   ASSERT_EQ(secondary_cache->num_lookups(), 1u);
482 
483   cache.reset();
484   secondary_cache.reset();
485 }
486 
TEST_F(LRUSecondaryCacheTest,CreateFailTest)487 TEST_F(LRUSecondaryCacheTest, CreateFailTest) {
488   LRUCacheOptions opts(1024, 0, false, 0.5, nullptr, kDefaultToAdaptiveMutex,
489                        kDontChargeCacheMetadata);
490   std::shared_ptr<TestSecondaryCache> secondary_cache =
491       std::make_shared<TestSecondaryCache>(2048);
492   opts.secondary_cache = secondary_cache;
493   std::shared_ptr<Cache> cache = NewLRUCache(opts);
494 
495   Random rnd(301);
496   std::string str1 = rnd.RandomString(1020);
497   TestItem* item1 = new TestItem(str1.data(), str1.length());
498   ASSERT_OK(cache->Insert("k1", item1, &LRUSecondaryCacheTest::helper_,
499                           str1.length()));
500   std::string str2 = rnd.RandomString(1020);
501   TestItem* item2 = new TestItem(str2.data(), str2.length());
502   // k1 should be demoted to NVM
503   ASSERT_OK(cache->Insert("k2", item2, &LRUSecondaryCacheTest::helper_,
504                           str2.length()));
505 
506   Cache::Handle* handle;
507   SetFailCreate(true);
508   handle = cache->Lookup("k2", &LRUSecondaryCacheTest::helper_,
509                          test_item_creator, Cache::Priority::LOW, true);
510   ASSERT_NE(handle, nullptr);
511   cache->Release(handle);
512   // This lookup should fail, since k1 creation would have failed
513   handle = cache->Lookup("k1", &LRUSecondaryCacheTest::helper_,
514                          test_item_creator, Cache::Priority::LOW, true);
515   ASSERT_EQ(handle, nullptr);
516   // Since k1 didn't get promoted, k2 should still be in cache
517   handle = cache->Lookup("k2", &LRUSecondaryCacheTest::helper_,
518                          test_item_creator, Cache::Priority::LOW, true);
519   ASSERT_NE(handle, nullptr);
520   cache->Release(handle);
521   ASSERT_EQ(secondary_cache->num_inserts(), 1u);
522   ASSERT_EQ(secondary_cache->num_lookups(), 1u);
523 
524   cache.reset();
525   secondary_cache.reset();
526 }
527 
TEST_F(LRUSecondaryCacheTest,FullCapacityTest)528 TEST_F(LRUSecondaryCacheTest, FullCapacityTest) {
529   LRUCacheOptions opts(1024, 0, /*_strict_capacity_limit=*/true, 0.5, nullptr,
530                        kDefaultToAdaptiveMutex, kDontChargeCacheMetadata);
531   std::shared_ptr<TestSecondaryCache> secondary_cache =
532       std::make_shared<TestSecondaryCache>(2048);
533   opts.secondary_cache = secondary_cache;
534   std::shared_ptr<Cache> cache = NewLRUCache(opts);
535 
536   Random rnd(301);
537   std::string str1 = rnd.RandomString(1020);
538   TestItem* item1 = new TestItem(str1.data(), str1.length());
539   ASSERT_OK(cache->Insert("k1", item1, &LRUSecondaryCacheTest::helper_,
540                           str1.length()));
541   std::string str2 = rnd.RandomString(1020);
542   TestItem* item2 = new TestItem(str2.data(), str2.length());
543   // k1 should be demoted to NVM
544   ASSERT_OK(cache->Insert("k2", item2, &LRUSecondaryCacheTest::helper_,
545                           str2.length()));
546 
547   Cache::Handle* handle;
548   handle = cache->Lookup("k2", &LRUSecondaryCacheTest::helper_,
549                          test_item_creator, Cache::Priority::LOW, true);
550   ASSERT_NE(handle, nullptr);
551   // This lookup should fail, since k1 promotion would have failed due to
552   // the block cache being at capacity
553   Cache::Handle* handle2;
554   handle2 = cache->Lookup("k1", &LRUSecondaryCacheTest::helper_,
555                           test_item_creator, Cache::Priority::LOW, true);
556   ASSERT_EQ(handle2, nullptr);
557   // Since k1 didn't get promoted, k2 should still be in cache
558   cache->Release(handle);
559   handle = cache->Lookup("k2", &LRUSecondaryCacheTest::helper_,
560                          test_item_creator, Cache::Priority::LOW, true);
561   ASSERT_NE(handle, nullptr);
562   cache->Release(handle);
563   ASSERT_EQ(secondary_cache->num_inserts(), 1u);
564   ASSERT_EQ(secondary_cache->num_lookups(), 1u);
565 
566   cache.reset();
567   secondary_cache.reset();
568 }
569 
570 // In this test, the block cache size is set to 4096, after insert 6 KV-pairs
571 // and flush, there are 5 blocks in this SST file, 2 data blocks and 3 meta
572 // blocks. block_1 size is 4096 and block_2 size is 2056. The total size
573 // of the meta blocks are about 900 to 1000. Therefore, in any situation,
574 // if we try to insert block_1 to the block cache, it will always fails. Only
575 // block_2 will be successfully inserted into the block cache.
TEST_F(DBSecondaryCacheTest,TestSecondaryCacheCorrectness1)576 TEST_F(DBSecondaryCacheTest, TestSecondaryCacheCorrectness1) {
577   LRUCacheOptions opts(4 * 1024, 0, false, 0.5, nullptr,
578                        kDefaultToAdaptiveMutex, kDontChargeCacheMetadata);
579   std::shared_ptr<TestSecondaryCache> secondary_cache(
580       new TestSecondaryCache(2048 * 1024));
581   opts.secondary_cache = secondary_cache;
582   std::shared_ptr<Cache> cache = NewLRUCache(opts);
583   BlockBasedTableOptions table_options;
584   table_options.block_cache = cache;
585   table_options.block_size = 4 * 1024;
586   Options options = GetDefaultOptions();
587   options.create_if_missing = true;
588   options.table_factory.reset(NewBlockBasedTableFactory(table_options));
589 
590   // Set the file paranoid check, so after flush, the file will be read
591   // all the blocks will be accessed.
592   options.paranoid_file_checks = true;
593   DestroyAndReopen(options);
594   Random rnd(301);
595   const int N = 6;
596   for (int i = 0; i < N; i++) {
597     std::string p_v = rnd.RandomString(1007);
598     ASSERT_OK(Put(Key(i), p_v));
599   }
600 
601   ASSERT_OK(Flush());
602   // After Flush is successful, RocksDB do the paranoid check for the new
603   // SST file. Meta blocks are always cached in the block cache and they
604   // will not be evicted. When block_2 is cache miss and read out, it is
605   // inserted to the block cache. Note that, block_1 is never successfully
606   // inserted to the block cache. Here are 2 lookups in the secondary cache
607   // for block_1 and block_2
608   ASSERT_EQ(secondary_cache->num_inserts(), 0u);
609   ASSERT_EQ(secondary_cache->num_lookups(), 2u);
610 
611   Compact("a", "z");
612   // Compaction will create the iterator to scan the whole file. So all the
613   // blocks are needed. Meta blocks are always cached. When block_1 is read
614   // out, block_2 is evicted from block cache and inserted to secondary
615   // cache.
616   ASSERT_EQ(secondary_cache->num_inserts(), 1u);
617   ASSERT_EQ(secondary_cache->num_lookups(), 3u);
618 
619   std::string v = Get(Key(0));
620   ASSERT_EQ(1007, v.size());
621   // The first data block is not in the cache, similarly, trigger the block
622   // cache Lookup and secondary cache lookup for block_1. But block_1 will not
623   // be inserted successfully due to the size. Currently, cache only has
624   // the meta blocks.
625   ASSERT_EQ(secondary_cache->num_inserts(), 1u);
626   ASSERT_EQ(secondary_cache->num_lookups(), 4u);
627 
628   v = Get(Key(5));
629   ASSERT_EQ(1007, v.size());
630   // The second data block is not in the cache, similarly, trigger the block
631   // cache Lookup and secondary cache lookup for block_2 and block_2 is found
632   // in the secondary cache. Now block cache has block_2
633   ASSERT_EQ(secondary_cache->num_inserts(), 1u);
634   ASSERT_EQ(secondary_cache->num_lookups(), 5u);
635 
636   v = Get(Key(5));
637   ASSERT_EQ(1007, v.size());
638   // block_2 is in the block cache. There is a block cache hit. No need to
639   // lookup or insert the secondary cache.
640   ASSERT_EQ(secondary_cache->num_inserts(), 1u);
641   ASSERT_EQ(secondary_cache->num_lookups(), 5u);
642 
643   v = Get(Key(0));
644   ASSERT_EQ(1007, v.size());
645   // Lookup the first data block, not in the block cache, so lookup the
646   // secondary cache. Also not in the secondary cache. After Get, still
647   // block_1 is will not be cached.
648   ASSERT_EQ(secondary_cache->num_inserts(), 1u);
649   ASSERT_EQ(secondary_cache->num_lookups(), 6u);
650 
651   v = Get(Key(0));
652   ASSERT_EQ(1007, v.size());
653   // Lookup the first data block, not in the block cache, so lookup the
654   // secondary cache. Also not in the secondary cache. After Get, still
655   // block_1 is will not be cached.
656   ASSERT_EQ(secondary_cache->num_inserts(), 1u);
657   ASSERT_EQ(secondary_cache->num_lookups(), 7u);
658 
659   Destroy(options);
660 }
661 
662 // In this test, the block cache size is set to 6100, after insert 6 KV-pairs
663 // and flush, there are 5 blocks in this SST file, 2 data blocks and 3 meta
664 // blocks. block_1 size is 4096 and block_2 size is 2056. The total size
665 // of the meta blocks are about 900 to 1000. Therefore, we can successfully
666 // insert and cache block_1 in the block cache (this is the different place
667 // from TestSecondaryCacheCorrectness1)
TEST_F(DBSecondaryCacheTest,TestSecondaryCacheCorrectness2)668 TEST_F(DBSecondaryCacheTest, TestSecondaryCacheCorrectness2) {
669   LRUCacheOptions opts(6100, 0, false, 0.5, nullptr, kDefaultToAdaptiveMutex,
670                        kDontChargeCacheMetadata);
671   std::shared_ptr<TestSecondaryCache> secondary_cache(
672       new TestSecondaryCache(2048 * 1024));
673   opts.secondary_cache = secondary_cache;
674   std::shared_ptr<Cache> cache = NewLRUCache(opts);
675   BlockBasedTableOptions table_options;
676   table_options.block_cache = cache;
677   table_options.block_size = 4 * 1024;
678   Options options = GetDefaultOptions();
679   options.create_if_missing = true;
680   options.table_factory.reset(NewBlockBasedTableFactory(table_options));
681   options.paranoid_file_checks = true;
682   DestroyAndReopen(options);
683   Random rnd(301);
684   const int N = 6;
685   for (int i = 0; i < N; i++) {
686     std::string p_v = rnd.RandomString(1007);
687     ASSERT_OK(Put(Key(i), p_v));
688   }
689 
690   ASSERT_OK(Flush());
691   // After Flush is successful, RocksDB do the paranoid check for the new
692   // SST file. Meta blocks are always cached in the block cache and they
693   // will not be evicted. When block_2 is cache miss and read out, it is
694   // inserted to the block cache. Thefore, block_1 is evicted from block
695   // cache and successfully inserted to the secondary cache. Here are 2
696   // lookups in the secondary cache for block_1 and block_2.
697   ASSERT_EQ(secondary_cache->num_inserts(), 1u);
698   ASSERT_EQ(secondary_cache->num_lookups(), 2u);
699 
700   Compact("a", "z");
701   // Compaction will create the iterator to scan the whole file. So all the
702   // blocks are needed. After Flush, only block_2 is cached in block cache
703   // and block_1 is in the secondary cache. So when read block_1, it is
704   // read out from secondary cache and inserted to block cache. At the same
705   // time, block_2 is inserted to secondary cache. Now, secondary cache has
706   // both block_1 and block_2. After compaction, block_1 is in the cache.
707   ASSERT_EQ(secondary_cache->num_inserts(), 2u);
708   ASSERT_EQ(secondary_cache->num_lookups(), 3u);
709 
710   std::string v = Get(Key(0));
711   ASSERT_EQ(1007, v.size());
712   // This Get needs to access block_1, since block_1 is cached in block cache
713   // there is no secondary cache lookup.
714   ASSERT_EQ(secondary_cache->num_inserts(), 2u);
715   ASSERT_EQ(secondary_cache->num_lookups(), 3u);
716 
717   v = Get(Key(5));
718   ASSERT_EQ(1007, v.size());
719   // This Get needs to access block_2 which is not in the block cache. So
720   // it will lookup the secondary cache for block_2 and cache it in the
721   // block_cache.
722   ASSERT_EQ(secondary_cache->num_inserts(), 2u);
723   ASSERT_EQ(secondary_cache->num_lookups(), 4u);
724 
725   v = Get(Key(5));
726   ASSERT_EQ(1007, v.size());
727   // This Get needs to access block_2 which is already in the block cache.
728   // No need to lookup secondary cache.
729   ASSERT_EQ(secondary_cache->num_inserts(), 2u);
730   ASSERT_EQ(secondary_cache->num_lookups(), 4u);
731 
732   v = Get(Key(0));
733   ASSERT_EQ(1007, v.size());
734   // This Get needs to access block_1, since block_1 is not in block cache
735   // there is one econdary cache lookup. Then, block_1 is cached in the
736   // block cache.
737   ASSERT_EQ(secondary_cache->num_inserts(), 2u);
738   ASSERT_EQ(secondary_cache->num_lookups(), 5u);
739 
740   v = Get(Key(0));
741   ASSERT_EQ(1007, v.size());
742   // This Get needs to access block_1, since block_1 is cached in block cache
743   // there is no secondary cache lookup.
744   ASSERT_EQ(secondary_cache->num_inserts(), 2u);
745   ASSERT_EQ(secondary_cache->num_lookups(), 5u);
746 
747   Destroy(options);
748 }
749 
750 // The block cache size is set to 1024*1024, after insert 6 KV-pairs
751 // and flush, there are 5 blocks in this SST file, 2 data blocks and 3 meta
752 // blocks. block_1 size is 4096 and block_2 size is 2056. The total size
753 // of the meta blocks are about 900 to 1000. Therefore, we can successfully
754 // cache all the blocks in the block cache and there is not secondary cache
755 // insertion. 2 lookup is needed for the blocks.
TEST_F(DBSecondaryCacheTest,NoSecondaryCacheInsertion)756 TEST_F(DBSecondaryCacheTest, NoSecondaryCacheInsertion) {
757   LRUCacheOptions opts(1024 * 1024, 0, false, 0.5, nullptr,
758                        kDefaultToAdaptiveMutex, kDontChargeCacheMetadata);
759   std::shared_ptr<TestSecondaryCache> secondary_cache(
760       new TestSecondaryCache(2048 * 1024));
761   opts.secondary_cache = secondary_cache;
762   std::shared_ptr<Cache> cache = NewLRUCache(opts);
763   BlockBasedTableOptions table_options;
764   table_options.block_cache = cache;
765   table_options.block_size = 4 * 1024;
766   Options options = GetDefaultOptions();
767   options.create_if_missing = true;
768   options.paranoid_file_checks = true;
769   options.table_factory.reset(NewBlockBasedTableFactory(table_options));
770   DestroyAndReopen(options);
771   Random rnd(301);
772   const int N = 6;
773   for (int i = 0; i < N; i++) {
774     std::string p_v = rnd.RandomString(1000);
775     ASSERT_OK(Put(Key(i), p_v));
776   }
777 
778   ASSERT_OK(Flush());
779   // After Flush is successful, RocksDB do the paranoid check for the new
780   // SST file. Meta blocks are always cached in the block cache and they
781   // will not be evicted. Now, block cache is large enough, it cache
782   // both block_1 and block_2. When first time read block_1 and block_2
783   // there are cache misses. So 2 secondary cache lookups are needed for
784   // the 2 blocks
785   ASSERT_EQ(secondary_cache->num_inserts(), 0u);
786   ASSERT_EQ(secondary_cache->num_lookups(), 2u);
787 
788   Compact("a", "z");
789   // Compaction will iterate the whole SST file. Since all the data blocks
790   // are in the block cache. No need to lookup the secondary cache.
791   ASSERT_EQ(secondary_cache->num_inserts(), 0u);
792   ASSERT_EQ(secondary_cache->num_lookups(), 2u);
793 
794   std::string v = Get(Key(0));
795   ASSERT_EQ(1000, v.size());
796   // Since the block cache is large enough, all the blocks are cached. we
797   // do not need to lookup the seondary cache.
798   ASSERT_EQ(secondary_cache->num_inserts(), 0u);
799   ASSERT_EQ(secondary_cache->num_lookups(), 2u);
800 
801   Destroy(options);
802 }
803 
TEST_F(DBSecondaryCacheTest,SecondaryCacheIntensiveTesting)804 TEST_F(DBSecondaryCacheTest, SecondaryCacheIntensiveTesting) {
805   LRUCacheOptions opts(8 * 1024, 0, false, 0.5, nullptr,
806                        kDefaultToAdaptiveMutex, kDontChargeCacheMetadata);
807   std::shared_ptr<TestSecondaryCache> secondary_cache(
808       new TestSecondaryCache(2048 * 1024));
809   opts.secondary_cache = secondary_cache;
810   std::shared_ptr<Cache> cache = NewLRUCache(opts);
811   BlockBasedTableOptions table_options;
812   table_options.block_cache = cache;
813   table_options.block_size = 4 * 1024;
814   Options options = GetDefaultOptions();
815   options.create_if_missing = true;
816   options.table_factory.reset(NewBlockBasedTableFactory(table_options));
817   DestroyAndReopen(options);
818   Random rnd(301);
819   const int N = 256;
820   for (int i = 0; i < N; i++) {
821     std::string p_v = rnd.RandomString(1000);
822     ASSERT_OK(Put(Key(i), p_v));
823   }
824   ASSERT_OK(Flush());
825   Compact("a", "z");
826 
827   Random r_index(47);
828   std::string v;
829   for (int i = 0; i < 1000; i++) {
830     uint32_t key_i = r_index.Next() % N;
831     v = Get(Key(key_i));
832   }
833 
834   // We have over 200 data blocks there will be multiple insertion
835   // and lookups.
836   ASSERT_GE(secondary_cache->num_inserts(), 1u);
837   ASSERT_GE(secondary_cache->num_lookups(), 1u);
838 
839   Destroy(options);
840 }
841 
842 // In this test, the block cache size is set to 4096, after insert 6 KV-pairs
843 // and flush, there are 5 blocks in this SST file, 2 data blocks and 3 meta
844 // blocks. block_1 size is 4096 and block_2 size is 2056. The total size
845 // of the meta blocks are about 900 to 1000. Therefore, in any situation,
846 // if we try to insert block_1 to the block cache, it will always fails. Only
847 // block_2 will be successfully inserted into the block cache.
TEST_F(DBSecondaryCacheTest,SecondaryCacheFailureTest)848 TEST_F(DBSecondaryCacheTest, SecondaryCacheFailureTest) {
849   LRUCacheOptions opts(4 * 1024, 0, false, 0.5, nullptr,
850                        kDefaultToAdaptiveMutex, kDontChargeCacheMetadata);
851   std::shared_ptr<TestSecondaryCache> secondary_cache(
852       new TestSecondaryCache(2048 * 1024));
853   opts.secondary_cache = secondary_cache;
854   std::shared_ptr<Cache> cache = NewLRUCache(opts);
855   BlockBasedTableOptions table_options;
856   table_options.block_cache = cache;
857   table_options.block_size = 4 * 1024;
858   Options options = GetDefaultOptions();
859   options.create_if_missing = true;
860   options.paranoid_file_checks = true;
861   options.table_factory.reset(NewBlockBasedTableFactory(table_options));
862   DestroyAndReopen(options);
863   Random rnd(301);
864   const int N = 6;
865   for (int i = 0; i < N; i++) {
866     std::string p_v = rnd.RandomString(1007);
867     ASSERT_OK(Put(Key(i), p_v));
868   }
869 
870   ASSERT_OK(Flush());
871   // After Flush is successful, RocksDB do the paranoid check for the new
872   // SST file. Meta blocks are always cached in the block cache and they
873   // will not be evicted. When block_2 is cache miss and read out, it is
874   // inserted to the block cache. Note that, block_1 is never successfully
875   // inserted to the block cache. Here are 2 lookups in the secondary cache
876   // for block_1 and block_2
877   ASSERT_EQ(secondary_cache->num_inserts(), 0u);
878   ASSERT_EQ(secondary_cache->num_lookups(), 2u);
879 
880   // Fail the insertion, in LRU cache, the secondary insertion returned status
881   // is not checked, therefore, the DB will not be influenced.
882   secondary_cache->InjectFailure();
883   Compact("a", "z");
884   // Compaction will create the iterator to scan the whole file. So all the
885   // blocks are needed. Meta blocks are always cached. When block_1 is read
886   // out, block_2 is evicted from block cache and inserted to secondary
887   // cache.
888   ASSERT_EQ(secondary_cache->num_inserts(), 0u);
889   ASSERT_EQ(secondary_cache->num_lookups(), 3u);
890 
891   std::string v = Get(Key(0));
892   ASSERT_EQ(1007, v.size());
893   // The first data block is not in the cache, similarly, trigger the block
894   // cache Lookup and secondary cache lookup for block_1. But block_1 will not
895   // be inserted successfully due to the size. Currently, cache only has
896   // the meta blocks.
897   ASSERT_EQ(secondary_cache->num_inserts(), 0u);
898   ASSERT_EQ(secondary_cache->num_lookups(), 4u);
899 
900   v = Get(Key(5));
901   ASSERT_EQ(1007, v.size());
902   // The second data block is not in the cache, similarly, trigger the block
903   // cache Lookup and secondary cache lookup for block_2 and block_2 is found
904   // in the secondary cache. Now block cache has block_2
905   ASSERT_EQ(secondary_cache->num_inserts(), 0u);
906   ASSERT_EQ(secondary_cache->num_lookups(), 5u);
907 
908   v = Get(Key(5));
909   ASSERT_EQ(1007, v.size());
910   // block_2 is in the block cache. There is a block cache hit. No need to
911   // lookup or insert the secondary cache.
912   ASSERT_EQ(secondary_cache->num_inserts(), 0u);
913   ASSERT_EQ(secondary_cache->num_lookups(), 5u);
914 
915   v = Get(Key(0));
916   ASSERT_EQ(1007, v.size());
917   // Lookup the first data block, not in the block cache, so lookup the
918   // secondary cache. Also not in the secondary cache. After Get, still
919   // block_1 is will not be cached.
920   ASSERT_EQ(secondary_cache->num_inserts(), 0u);
921   ASSERT_EQ(secondary_cache->num_lookups(), 6u);
922 
923   v = Get(Key(0));
924   ASSERT_EQ(1007, v.size());
925   // Lookup the first data block, not in the block cache, so lookup the
926   // secondary cache. Also not in the secondary cache. After Get, still
927   // block_1 is will not be cached.
928   ASSERT_EQ(secondary_cache->num_inserts(), 0u);
929   ASSERT_EQ(secondary_cache->num_lookups(), 7u);
930   secondary_cache->ResetInjectFailure();
931 
932   Destroy(options);
933 }
934 
935 }  // namespace ROCKSDB_NAMESPACE
936 
main(int argc,char ** argv)937 int main(int argc, char** argv) {
938   ::testing::InitGoogleTest(&argc, argv);
939   return RUN_ALL_TESTS();
940 }
941