1 //  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
2 //  This source code is licensed under both the GPLv2 (found in the
3 //  COPYING file in the root directory) and Apache 2.0 License
4 //  (found in the LICENSE.Apache file in the root directory).
5 //
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
9 
10 #include "cache/lru_cache.h"
11 
12 #include <cassert>
13 #include <cstdint>
14 #include <cstdio>
15 
16 #include "util/mutexlock.h"
17 
18 namespace ROCKSDB_NAMESPACE {
19 
LRUHandleTable(int max_upper_hash_bits)20 LRUHandleTable::LRUHandleTable(int max_upper_hash_bits)
21     : length_bits_(/* historical starting size*/ 4),
22       list_(new LRUHandle* [size_t{1} << length_bits_] {}),
23       elems_(0),
24       max_length_bits_(max_upper_hash_bits) {}
25 
~LRUHandleTable()26 LRUHandleTable::~LRUHandleTable() {
27   ApplyToEntriesRange(
28       [](LRUHandle* h) {
29         if (!h->HasRefs()) {
30           h->Free();
31         }
32       },
33       0, uint32_t{1} << length_bits_);
34 }
35 
Lookup(const Slice & key,uint32_t hash)36 LRUHandle* LRUHandleTable::Lookup(const Slice& key, uint32_t hash) {
37   return *FindPointer(key, hash);
38 }
39 
Insert(LRUHandle * h)40 LRUHandle* LRUHandleTable::Insert(LRUHandle* h) {
41   LRUHandle** ptr = FindPointer(h->key(), h->hash);
42   LRUHandle* old = *ptr;
43   h->next_hash = (old == nullptr ? nullptr : old->next_hash);
44   *ptr = h;
45   if (old == nullptr) {
46     ++elems_;
47     if ((elems_ >> length_bits_) > 0) {  // elems_ >= length
48       // Since each cache entry is fairly large, we aim for a small
49       // average linked list length (<= 1).
50       Resize();
51     }
52   }
53   return old;
54 }
55 
Remove(const Slice & key,uint32_t hash)56 LRUHandle* LRUHandleTable::Remove(const Slice& key, uint32_t hash) {
57   LRUHandle** ptr = FindPointer(key, hash);
58   LRUHandle* result = *ptr;
59   if (result != nullptr) {
60     *ptr = result->next_hash;
61     --elems_;
62   }
63   return result;
64 }
65 
FindPointer(const Slice & key,uint32_t hash)66 LRUHandle** LRUHandleTable::FindPointer(const Slice& key, uint32_t hash) {
67   LRUHandle** ptr = &list_[hash >> (32 - length_bits_)];
68   while (*ptr != nullptr && ((*ptr)->hash != hash || key != (*ptr)->key())) {
69     ptr = &(*ptr)->next_hash;
70   }
71   return ptr;
72 }
73 
Resize()74 void LRUHandleTable::Resize() {
75   if (length_bits_ >= max_length_bits_) {
76     // Due to reaching limit of hash information, if we made the table
77     // bigger, we would allocate more addresses but only the same
78     // number would be used.
79     return;
80   }
81   if (length_bits_ >= 31) {
82     // Avoid undefined behavior shifting uint32_t by 32
83     return;
84   }
85 
86   uint32_t old_length = uint32_t{1} << length_bits_;
87   int new_length_bits = length_bits_ + 1;
88   std::unique_ptr<LRUHandle* []> new_list {
89     new LRUHandle* [size_t{1} << new_length_bits] {}
90   };
91   uint32_t count = 0;
92   for (uint32_t i = 0; i < old_length; i++) {
93     LRUHandle* h = list_[i];
94     while (h != nullptr) {
95       LRUHandle* next = h->next_hash;
96       uint32_t hash = h->hash;
97       LRUHandle** ptr = &new_list[hash >> (32 - new_length_bits)];
98       h->next_hash = *ptr;
99       *ptr = h;
100       h = next;
101       count++;
102     }
103   }
104   assert(elems_ == count);
105   list_ = std::move(new_list);
106   length_bits_ = new_length_bits;
107 }
108 
LRUCacheShard(size_t capacity,bool strict_capacity_limit,double high_pri_pool_ratio,bool use_adaptive_mutex,CacheMetadataChargePolicy metadata_charge_policy,int max_upper_hash_bits,const std::shared_ptr<SecondaryCache> & secondary_cache)109 LRUCacheShard::LRUCacheShard(
110     size_t capacity, bool strict_capacity_limit, double high_pri_pool_ratio,
111     bool use_adaptive_mutex, CacheMetadataChargePolicy metadata_charge_policy,
112     int max_upper_hash_bits,
113     const std::shared_ptr<SecondaryCache>& secondary_cache)
114     : capacity_(0),
115       high_pri_pool_usage_(0),
116       strict_capacity_limit_(strict_capacity_limit),
117       high_pri_pool_ratio_(high_pri_pool_ratio),
118       high_pri_pool_capacity_(0),
119       table_(max_upper_hash_bits),
120       usage_(0),
121       lru_usage_(0),
122       mutex_(use_adaptive_mutex),
123       secondary_cache_(secondary_cache) {
124   set_metadata_charge_policy(metadata_charge_policy);
125   // Make empty circular linked list
126   lru_.next = &lru_;
127   lru_.prev = &lru_;
128   lru_low_pri_ = &lru_;
129   SetCapacity(capacity);
130 }
131 
EraseUnRefEntries()132 void LRUCacheShard::EraseUnRefEntries() {
133   autovector<LRUHandle*> last_reference_list;
134   {
135     MutexLock l(&mutex_);
136     while (lru_.next != &lru_) {
137       LRUHandle* old = lru_.next;
138       // LRU list contains only elements which can be evicted
139       assert(old->InCache() && !old->HasRefs());
140       LRU_Remove(old);
141       table_.Remove(old->key(), old->hash);
142       old->SetInCache(false);
143       size_t total_charge = old->CalcTotalCharge(metadata_charge_policy_);
144       assert(usage_ >= total_charge);
145       usage_ -= total_charge;
146       last_reference_list.push_back(old);
147     }
148   }
149 
150   for (auto entry : last_reference_list) {
151     entry->Free();
152   }
153 }
154 
ApplyToSomeEntries(const std::function<void (const Slice & key,void * value,size_t charge,DeleterFn deleter)> & callback,uint32_t average_entries_per_lock,uint32_t * state)155 void LRUCacheShard::ApplyToSomeEntries(
156     const std::function<void(const Slice& key, void* value, size_t charge,
157                              DeleterFn deleter)>& callback,
158     uint32_t average_entries_per_lock, uint32_t* state) {
159   // The state is essentially going to be the starting hash, which works
160   // nicely even if we resize between calls because we use upper-most
161   // hash bits for table indexes.
162   MutexLock l(&mutex_);
163   uint32_t length_bits = table_.GetLengthBits();
164   uint32_t length = uint32_t{1} << length_bits;
165 
166   assert(average_entries_per_lock > 0);
167   // Assuming we are called with same average_entries_per_lock repeatedly,
168   // this simplifies some logic (index_end will not overflow)
169   assert(average_entries_per_lock < length || *state == 0);
170 
171   uint32_t index_begin = *state >> (32 - length_bits);
172   uint32_t index_end = index_begin + average_entries_per_lock;
173   if (index_end >= length) {
174     // Going to end
175     index_end = length;
176     *state = UINT32_MAX;
177   } else {
178     *state = index_end << (32 - length_bits);
179   }
180 
181   table_.ApplyToEntriesRange(
182       [callback](LRUHandle* h) {
183         DeleterFn deleter = h->IsSecondaryCacheCompatible()
184                                 ? h->info_.helper->del_cb
185                                 : h->info_.deleter;
186         callback(h->key(), h->value, h->charge, deleter);
187       },
188       index_begin, index_end);
189 }
190 
TEST_GetLRUList(LRUHandle ** lru,LRUHandle ** lru_low_pri)191 void LRUCacheShard::TEST_GetLRUList(LRUHandle** lru, LRUHandle** lru_low_pri) {
192   MutexLock l(&mutex_);
193   *lru = &lru_;
194   *lru_low_pri = lru_low_pri_;
195 }
196 
TEST_GetLRUSize()197 size_t LRUCacheShard::TEST_GetLRUSize() {
198   MutexLock l(&mutex_);
199   LRUHandle* lru_handle = lru_.next;
200   size_t lru_size = 0;
201   while (lru_handle != &lru_) {
202     lru_size++;
203     lru_handle = lru_handle->next;
204   }
205   return lru_size;
206 }
207 
GetHighPriPoolRatio()208 double LRUCacheShard::GetHighPriPoolRatio() {
209   MutexLock l(&mutex_);
210   return high_pri_pool_ratio_;
211 }
212 
LRU_Remove(LRUHandle * e)213 void LRUCacheShard::LRU_Remove(LRUHandle* e) {
214   assert(e->next != nullptr);
215   assert(e->prev != nullptr);
216   if (lru_low_pri_ == e) {
217     lru_low_pri_ = e->prev;
218   }
219   e->next->prev = e->prev;
220   e->prev->next = e->next;
221   e->prev = e->next = nullptr;
222   size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_);
223   assert(lru_usage_ >= total_charge);
224   lru_usage_ -= total_charge;
225   if (e->InHighPriPool()) {
226     assert(high_pri_pool_usage_ >= total_charge);
227     high_pri_pool_usage_ -= total_charge;
228   }
229 }
230 
LRU_Insert(LRUHandle * e)231 void LRUCacheShard::LRU_Insert(LRUHandle* e) {
232   assert(e->next == nullptr);
233   assert(e->prev == nullptr);
234   size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_);
235   if (high_pri_pool_ratio_ > 0 && (e->IsHighPri() || e->HasHit())) {
236     // Inset "e" to head of LRU list.
237     e->next = &lru_;
238     e->prev = lru_.prev;
239     e->prev->next = e;
240     e->next->prev = e;
241     e->SetInHighPriPool(true);
242     high_pri_pool_usage_ += total_charge;
243     MaintainPoolSize();
244   } else {
245     // Insert "e" to the head of low-pri pool. Note that when
246     // high_pri_pool_ratio is 0, head of low-pri pool is also head of LRU list.
247     e->next = lru_low_pri_->next;
248     e->prev = lru_low_pri_;
249     e->prev->next = e;
250     e->next->prev = e;
251     e->SetInHighPriPool(false);
252     lru_low_pri_ = e;
253   }
254   lru_usage_ += total_charge;
255 }
256 
MaintainPoolSize()257 void LRUCacheShard::MaintainPoolSize() {
258   while (high_pri_pool_usage_ > high_pri_pool_capacity_) {
259     // Overflow last entry in high-pri pool to low-pri pool.
260     lru_low_pri_ = lru_low_pri_->next;
261     assert(lru_low_pri_ != &lru_);
262     lru_low_pri_->SetInHighPriPool(false);
263     size_t total_charge =
264         lru_low_pri_->CalcTotalCharge(metadata_charge_policy_);
265     assert(high_pri_pool_usage_ >= total_charge);
266     high_pri_pool_usage_ -= total_charge;
267   }
268 }
269 
EvictFromLRU(size_t charge,autovector<LRUHandle * > * deleted)270 void LRUCacheShard::EvictFromLRU(size_t charge,
271                                  autovector<LRUHandle*>* deleted) {
272   while ((usage_ + charge) > capacity_ && lru_.next != &lru_) {
273     LRUHandle* old = lru_.next;
274     // LRU list contains only elements which can be evicted
275     assert(old->InCache() && !old->HasRefs());
276     LRU_Remove(old);
277     table_.Remove(old->key(), old->hash);
278     old->SetInCache(false);
279     size_t old_total_charge = old->CalcTotalCharge(metadata_charge_policy_);
280     assert(usage_ >= old_total_charge);
281     usage_ -= old_total_charge;
282     deleted->push_back(old);
283   }
284 }
285 
SetCapacity(size_t capacity)286 void LRUCacheShard::SetCapacity(size_t capacity) {
287   autovector<LRUHandle*> last_reference_list;
288   {
289     MutexLock l(&mutex_);
290     capacity_ = capacity;
291     high_pri_pool_capacity_ = capacity_ * high_pri_pool_ratio_;
292     EvictFromLRU(0, &last_reference_list);
293   }
294 
295   // Try to insert the evicted entries into tiered cache
296   // Free the entries outside of mutex for performance reasons
297   for (auto entry : last_reference_list) {
298     if (secondary_cache_ && entry->IsSecondaryCacheCompatible() &&
299         !entry->IsPromoted()) {
300       secondary_cache_->Insert(entry->key(), entry->value, entry->info_.helper)
301           .PermitUncheckedError();
302     }
303     entry->Free();
304   }
305 }
306 
SetStrictCapacityLimit(bool strict_capacity_limit)307 void LRUCacheShard::SetStrictCapacityLimit(bool strict_capacity_limit) {
308   MutexLock l(&mutex_);
309   strict_capacity_limit_ = strict_capacity_limit;
310 }
311 
InsertItem(LRUHandle * e,Cache::Handle ** handle)312 Status LRUCacheShard::InsertItem(LRUHandle* e, Cache::Handle** handle) {
313   Status s = Status::OK();
314   autovector<LRUHandle*> last_reference_list;
315   size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_);
316 
317   {
318     MutexLock l(&mutex_);
319 
320     // Free the space following strict LRU policy until enough space
321     // is freed or the lru list is empty
322     EvictFromLRU(total_charge, &last_reference_list);
323 
324     if ((usage_ + total_charge) > capacity_ &&
325         (strict_capacity_limit_ || handle == nullptr)) {
326       if (handle == nullptr) {
327         // Don't insert the entry but still return ok, as if the entry inserted
328         // into cache and get evicted immediately.
329         e->SetInCache(false);
330         last_reference_list.push_back(e);
331       } else {
332         delete[] reinterpret_cast<char*>(e);
333         *handle = nullptr;
334         s = Status::Incomplete("Insert failed due to LRU cache being full.");
335       }
336     } else {
337       // Insert into the cache. Note that the cache might get larger than its
338       // capacity if not enough space was freed up.
339       LRUHandle* old = table_.Insert(e);
340       usage_ += total_charge;
341       if (old != nullptr) {
342         s = Status::OkOverwritten();
343         assert(old->InCache());
344         old->SetInCache(false);
345         if (!old->HasRefs()) {
346           // old is on LRU because it's in cache and its reference count is 0
347           LRU_Remove(old);
348           size_t old_total_charge =
349               old->CalcTotalCharge(metadata_charge_policy_);
350           assert(usage_ >= old_total_charge);
351           usage_ -= old_total_charge;
352           last_reference_list.push_back(old);
353         }
354       }
355       if (handle == nullptr) {
356         LRU_Insert(e);
357       } else {
358         e->Ref();
359         *handle = reinterpret_cast<Cache::Handle*>(e);
360       }
361     }
362   }
363 
364   // Try to insert the evicted entries into the secondary cache
365   // Free the entries here outside of mutex for performance reasons
366   for (auto entry : last_reference_list) {
367     if (secondary_cache_ && entry->IsSecondaryCacheCompatible() &&
368         !entry->IsPromoted()) {
369       secondary_cache_->Insert(entry->key(), entry->value, entry->info_.helper)
370           .PermitUncheckedError();
371     }
372     entry->Free();
373   }
374 
375   return s;
376 }
377 
Lookup(const Slice & key,uint32_t hash,const ShardedCache::CacheItemHelper * helper,const ShardedCache::CreateCallback & create_cb,Cache::Priority priority,bool wait)378 Cache::Handle* LRUCacheShard::Lookup(
379     const Slice& key, uint32_t hash,
380     const ShardedCache::CacheItemHelper* helper,
381     const ShardedCache::CreateCallback& create_cb, Cache::Priority priority,
382     bool wait) {
383   LRUHandle* e = nullptr;
384   {
385     MutexLock l(&mutex_);
386     e = table_.Lookup(key, hash);
387     if (e != nullptr) {
388       assert(e->InCache());
389       if (!e->HasRefs()) {
390         // The entry is in LRU since it's in hash and has no external references
391         LRU_Remove(e);
392       }
393       e->Ref();
394       e->SetHit();
395     }
396   }
397 
398   // If handle table lookup failed, then allocate a handle outside the
399   // mutex if we're going to lookup in the secondary cache
400   // Only support synchronous for now
401   // TODO: Support asynchronous lookup in secondary cache
402   if (!e && secondary_cache_ && helper && helper->saveto_cb && wait) {
403     // For objects from the secondary cache, we expect the caller to provide
404     // a way to create/delete the primary cache object. The only case where
405     // a deleter would not be required is for dummy entries inserted for
406     // accounting purposes, which we won't demote to the secondary cache
407     // anyway.
408     assert(create_cb && helper->del_cb);
409     std::unique_ptr<SecondaryCacheHandle> secondary_handle =
410         secondary_cache_->Lookup(key, create_cb, wait);
411     if (secondary_handle != nullptr) {
412       void* value = nullptr;
413       e = reinterpret_cast<LRUHandle*>(
414           new char[sizeof(LRUHandle) - 1 + key.size()]);
415 
416       e->flags = 0;
417       e->SetPromoted(true);
418       e->SetSecondaryCacheCompatible(true);
419       e->info_.helper = helper;
420       e->key_length = key.size();
421       e->hash = hash;
422       e->refs = 0;
423       e->next = e->prev = nullptr;
424       e->SetInCache(true);
425       e->SetPriority(priority);
426       memcpy(e->key_data, key.data(), key.size());
427 
428       value = secondary_handle->Value();
429       e->value = value;
430       e->charge = secondary_handle->Size();
431 
432       // This call could nullify e if the cache is over capacity and
433       // strict_capacity_limit_ is true. In such a case, the caller will try
434       // to insert later, which might again fail, but its ok as this should
435       // not be common
436       // Being conservative here since there could be lookups that are
437       // actually ok to fail rather than succeed and bloat up the memory
438       // usage (preloading partitioned index blocks, for example).
439       Status s = InsertItem(e, reinterpret_cast<Cache::Handle**>(&e));
440       if (!s.ok()) {
441         assert(e == nullptr);
442         (*helper->del_cb)(key, value);
443       }
444     }
445   }
446   return reinterpret_cast<Cache::Handle*>(e);
447 }
448 
Ref(Cache::Handle * h)449 bool LRUCacheShard::Ref(Cache::Handle* h) {
450   LRUHandle* e = reinterpret_cast<LRUHandle*>(h);
451   MutexLock l(&mutex_);
452   // To create another reference - entry must be already externally referenced
453   assert(e->HasRefs());
454   e->Ref();
455   return true;
456 }
457 
SetHighPriorityPoolRatio(double high_pri_pool_ratio)458 void LRUCacheShard::SetHighPriorityPoolRatio(double high_pri_pool_ratio) {
459   MutexLock l(&mutex_);
460   high_pri_pool_ratio_ = high_pri_pool_ratio;
461   high_pri_pool_capacity_ = capacity_ * high_pri_pool_ratio_;
462   MaintainPoolSize();
463 }
464 
Release(Cache::Handle * handle,bool force_erase)465 bool LRUCacheShard::Release(Cache::Handle* handle, bool force_erase) {
466   if (handle == nullptr) {
467     return false;
468   }
469   LRUHandle* e = reinterpret_cast<LRUHandle*>(handle);
470   bool last_reference = false;
471   {
472     MutexLock l(&mutex_);
473     last_reference = e->Unref();
474     if (last_reference && e->InCache()) {
475       // The item is still in cache, and nobody else holds a reference to it
476       if (usage_ > capacity_ || force_erase) {
477         // The LRU list must be empty since the cache is full
478         assert(lru_.next == &lru_ || force_erase);
479         // Take this opportunity and remove the item
480         table_.Remove(e->key(), e->hash);
481         e->SetInCache(false);
482       } else {
483         // Put the item back on the LRU list, and don't free it
484         LRU_Insert(e);
485         last_reference = false;
486       }
487     }
488     if (last_reference) {
489       size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_);
490       assert(usage_ >= total_charge);
491       usage_ -= total_charge;
492     }
493   }
494 
495   // Free the entry here outside of mutex for performance reasons
496   if (last_reference) {
497     e->Free();
498   }
499   return last_reference;
500 }
501 
Insert(const Slice & key,uint32_t hash,void * value,size_t charge,void (* deleter)(const Slice & key,void * value),const Cache::CacheItemHelper * helper,Cache::Handle ** handle,Cache::Priority priority)502 Status LRUCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
503                              size_t charge,
504                              void (*deleter)(const Slice& key, void* value),
505                              const Cache::CacheItemHelper* helper,
506                              Cache::Handle** handle, Cache::Priority priority) {
507   // Allocate the memory here outside of the mutex
508   // If the cache is full, we'll have to release it
509   // It shouldn't happen very often though.
510   LRUHandle* e = reinterpret_cast<LRUHandle*>(
511       new char[sizeof(LRUHandle) - 1 + key.size()]);
512 
513   e->value = value;
514   e->flags = 0;
515   if (helper) {
516     e->SetSecondaryCacheCompatible(true);
517     e->info_.helper = helper;
518   } else {
519     e->info_.deleter = deleter;
520   }
521   e->charge = charge;
522   e->key_length = key.size();
523   e->hash = hash;
524   e->refs = 0;
525   e->next = e->prev = nullptr;
526   e->SetInCache(true);
527   e->SetPriority(priority);
528   memcpy(e->key_data, key.data(), key.size());
529 
530   return InsertItem(e, handle);
531 }
532 
Erase(const Slice & key,uint32_t hash)533 void LRUCacheShard::Erase(const Slice& key, uint32_t hash) {
534   LRUHandle* e;
535   bool last_reference = false;
536   {
537     MutexLock l(&mutex_);
538     e = table_.Remove(key, hash);
539     if (e != nullptr) {
540       assert(e->InCache());
541       e->SetInCache(false);
542       if (!e->HasRefs()) {
543         // The entry is in LRU since it's in hash and has no external references
544         LRU_Remove(e);
545         size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_);
546         assert(usage_ >= total_charge);
547         usage_ -= total_charge;
548         last_reference = true;
549       }
550     }
551   }
552 
553   // Free the entry here outside of mutex for performance reasons
554   // last_reference will only be true if e != nullptr
555   if (last_reference) {
556     e->Free();
557   }
558 }
559 
GetUsage() const560 size_t LRUCacheShard::GetUsage() const {
561   MutexLock l(&mutex_);
562   return usage_;
563 }
564 
GetPinnedUsage() const565 size_t LRUCacheShard::GetPinnedUsage() const {
566   MutexLock l(&mutex_);
567   assert(usage_ >= lru_usage_);
568   return usage_ - lru_usage_;
569 }
570 
GetPrintableOptions() const571 std::string LRUCacheShard::GetPrintableOptions() const {
572   const int kBufferSize = 200;
573   char buffer[kBufferSize];
574   {
575     MutexLock l(&mutex_);
576     snprintf(buffer, kBufferSize, "    high_pri_pool_ratio: %.3lf\n",
577              high_pri_pool_ratio_);
578   }
579   return std::string(buffer);
580 }
581 
LRUCache(size_t capacity,int num_shard_bits,bool strict_capacity_limit,double high_pri_pool_ratio,std::shared_ptr<MemoryAllocator> allocator,bool use_adaptive_mutex,CacheMetadataChargePolicy metadata_charge_policy,const std::shared_ptr<SecondaryCache> & secondary_cache)582 LRUCache::LRUCache(size_t capacity, int num_shard_bits,
583                    bool strict_capacity_limit, double high_pri_pool_ratio,
584                    std::shared_ptr<MemoryAllocator> allocator,
585                    bool use_adaptive_mutex,
586                    CacheMetadataChargePolicy metadata_charge_policy,
587                    const std::shared_ptr<SecondaryCache>& secondary_cache)
588     : ShardedCache(capacity, num_shard_bits, strict_capacity_limit,
589                    std::move(allocator)) {
590   num_shards_ = 1 << num_shard_bits;
591   shards_ = reinterpret_cast<LRUCacheShard*>(
592       port::cacheline_aligned_alloc(sizeof(LRUCacheShard) * num_shards_));
593   size_t per_shard = (capacity + (num_shards_ - 1)) / num_shards_;
594   for (int i = 0; i < num_shards_; i++) {
595     new (&shards_[i]) LRUCacheShard(
596         per_shard, strict_capacity_limit, high_pri_pool_ratio,
597         use_adaptive_mutex, metadata_charge_policy,
598         /* max_upper_hash_bits */ 32 - num_shard_bits, secondary_cache);
599   }
600 }
601 
~LRUCache()602 LRUCache::~LRUCache() {
603   if (shards_ != nullptr) {
604     assert(num_shards_ > 0);
605     for (int i = 0; i < num_shards_; i++) {
606       shards_[i].~LRUCacheShard();
607     }
608     port::cacheline_aligned_free(shards_);
609   }
610 }
611 
GetShard(uint32_t shard)612 CacheShard* LRUCache::GetShard(uint32_t shard) {
613   return reinterpret_cast<CacheShard*>(&shards_[shard]);
614 }
615 
GetShard(uint32_t shard) const616 const CacheShard* LRUCache::GetShard(uint32_t shard) const {
617   return reinterpret_cast<CacheShard*>(&shards_[shard]);
618 }
619 
Value(Handle * handle)620 void* LRUCache::Value(Handle* handle) {
621   return reinterpret_cast<const LRUHandle*>(handle)->value;
622 }
623 
GetCharge(Handle * handle) const624 size_t LRUCache::GetCharge(Handle* handle) const {
625   return reinterpret_cast<const LRUHandle*>(handle)->charge;
626 }
627 
GetDeleter(Handle * handle) const628 Cache::DeleterFn LRUCache::GetDeleter(Handle* handle) const {
629   auto h = reinterpret_cast<const LRUHandle*>(handle);
630   if (h->IsSecondaryCacheCompatible()) {
631     return h->info_.helper->del_cb;
632   } else {
633     return h->info_.deleter;
634   }
635 }
636 
GetHash(Handle * handle) const637 uint32_t LRUCache::GetHash(Handle* handle) const {
638   return reinterpret_cast<const LRUHandle*>(handle)->hash;
639 }
640 
DisownData()641 void LRUCache::DisownData() {
642 // Do not drop data if compile with ASAN to suppress leak warning.
643 #ifndef MUST_FREE_HEAP_ALLOCATIONS
644   shards_ = nullptr;
645   num_shards_ = 0;
646 #endif
647 }
648 
TEST_GetLRUSize()649 size_t LRUCache::TEST_GetLRUSize() {
650   size_t lru_size_of_all_shards = 0;
651   for (int i = 0; i < num_shards_; i++) {
652     lru_size_of_all_shards += shards_[i].TEST_GetLRUSize();
653   }
654   return lru_size_of_all_shards;
655 }
656 
GetHighPriPoolRatio()657 double LRUCache::GetHighPriPoolRatio() {
658   double result = 0.0;
659   if (num_shards_ > 0) {
660     result = shards_[0].GetHighPriPoolRatio();
661   }
662   return result;
663 }
664 
NewLRUCache(size_t capacity,int num_shard_bits,bool strict_capacity_limit,double high_pri_pool_ratio,std::shared_ptr<MemoryAllocator> memory_allocator,bool use_adaptive_mutex,CacheMetadataChargePolicy metadata_charge_policy,const std::shared_ptr<SecondaryCache> & secondary_cache)665 std::shared_ptr<Cache> NewLRUCache(
666     size_t capacity, int num_shard_bits, bool strict_capacity_limit,
667     double high_pri_pool_ratio,
668     std::shared_ptr<MemoryAllocator> memory_allocator, bool use_adaptive_mutex,
669     CacheMetadataChargePolicy metadata_charge_policy,
670     const std::shared_ptr<SecondaryCache>& secondary_cache) {
671   if (num_shard_bits >= 20) {
672     return nullptr;  // the cache cannot be sharded into too many fine pieces
673   }
674   if (high_pri_pool_ratio < 0.0 || high_pri_pool_ratio > 1.0) {
675     // invalid high_pri_pool_ratio
676     return nullptr;
677   }
678   if (num_shard_bits < 0) {
679     num_shard_bits = GetDefaultCacheShardBits(capacity);
680   }
681   return std::make_shared<LRUCache>(
682       capacity, num_shard_bits, strict_capacity_limit, high_pri_pool_ratio,
683       std::move(memory_allocator), use_adaptive_mutex, metadata_charge_policy,
684       secondary_cache);
685 }
686 
NewLRUCache(const LRUCacheOptions & cache_opts)687 std::shared_ptr<Cache> NewLRUCache(const LRUCacheOptions& cache_opts) {
688   return NewLRUCache(
689       cache_opts.capacity, cache_opts.num_shard_bits,
690       cache_opts.strict_capacity_limit, cache_opts.high_pri_pool_ratio,
691       cache_opts.memory_allocator, cache_opts.use_adaptive_mutex,
692       cache_opts.metadata_charge_policy, cache_opts.secondary_cache);
693 }
694 
NewLRUCache(size_t capacity,int num_shard_bits,bool strict_capacity_limit,double high_pri_pool_ratio,std::shared_ptr<MemoryAllocator> memory_allocator,bool use_adaptive_mutex,CacheMetadataChargePolicy metadata_charge_policy)695 std::shared_ptr<Cache> NewLRUCache(
696     size_t capacity, int num_shard_bits, bool strict_capacity_limit,
697     double high_pri_pool_ratio,
698     std::shared_ptr<MemoryAllocator> memory_allocator, bool use_adaptive_mutex,
699     CacheMetadataChargePolicy metadata_charge_policy) {
700   return NewLRUCache(capacity, num_shard_bits, strict_capacity_limit,
701                      high_pri_pool_ratio, memory_allocator, use_adaptive_mutex,
702                      metadata_charge_policy, nullptr);
703 }
704 }  // namespace ROCKSDB_NAMESPACE
705