1 //===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // Memory quarantine for AddressSanitizer and potentially other tools.
9 // Quarantine caches some specified amount of memory in per-thread caches,
10 // then evicts to global FIFO queue. When the queue reaches specified threshold,
11 // oldest memory is recycled.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #ifndef SANITIZER_QUARANTINE_H
16 #define SANITIZER_QUARANTINE_H
17 
18 #include "sanitizer_internal_defs.h"
19 #include "sanitizer_mutex.h"
20 #include "sanitizer_list.h"
21 
22 namespace __sanitizer {
23 
24 template<typename Node> class QuarantineCache;
25 
26 struct QuarantineBatch {
27   static const uptr kSize = 1024;
28   QuarantineBatch *next;
29   uptr size;
30   uptr count;
31   void *batch[kSize];
32 };
33 
34 // The callback interface is:
35 // void Callback::Recycle(Node *ptr);
36 // void *cb.Allocate(uptr size);
37 // void cb.Deallocate(void *ptr);
38 template<typename Callback, typename Node>
39 class Quarantine {
40  public:
41   typedef QuarantineCache<Callback> Cache;
42 
Quarantine(LinkerInitialized)43   explicit Quarantine(LinkerInitialized)
44       : cache_(LINKER_INITIALIZED) {
45   }
46 
Init(uptr size,uptr cache_size)47   void Init(uptr size, uptr cache_size) {
48     max_size_ = size;
49     min_size_ = size / 10 * 9;  // 90% of max size.
50     max_cache_size_ = cache_size;
51   }
52 
Put(Cache * c,Callback cb,Node * ptr,uptr size)53   void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
54     c->Enqueue(cb, ptr, size);
55     if (c->Size() > max_cache_size_)
56       Drain(c, cb);
57   }
58 
Drain(Cache * c,Callback cb)59   void NOINLINE Drain(Cache *c, Callback cb) {
60     {
61       SpinMutexLock l(&cache_mutex_);
62       cache_.Transfer(c);
63     }
64     if (cache_.Size() > max_size_ && recycle_mutex_.TryLock())
65       Recycle(cb);
66   }
67 
68  private:
69   // Read-only data.
70   char pad0_[kCacheLineSize];
71   uptr max_size_;
72   uptr min_size_;
73   uptr max_cache_size_;
74   char pad1_[kCacheLineSize];
75   SpinMutex cache_mutex_;
76   SpinMutex recycle_mutex_;
77   Cache cache_;
78   char pad2_[kCacheLineSize];
79 
Recycle(Callback cb)80   void NOINLINE Recycle(Callback cb) {
81     Cache tmp;
82     {
83       SpinMutexLock l(&cache_mutex_);
84       while (cache_.Size() > min_size_) {
85         QuarantineBatch *b = cache_.DequeueBatch();
86         tmp.EnqueueBatch(b);
87       }
88     }
89     recycle_mutex_.Unlock();
90     DoRecycle(&tmp, cb);
91   }
92 
DoRecycle(Cache * c,Callback cb)93   void NOINLINE DoRecycle(Cache *c, Callback cb) {
94     while (QuarantineBatch *b = c->DequeueBatch()) {
95       const uptr kPrefetch = 16;
96       for (uptr i = 0; i < kPrefetch; i++)
97         PREFETCH(b->batch[i]);
98       for (uptr i = 0; i < b->count; i++) {
99         PREFETCH(b->batch[i + kPrefetch]);
100         cb.Recycle((Node*)b->batch[i]);
101       }
102       cb.Deallocate(b);
103     }
104   }
105 };
106 
107 // Per-thread cache of memory blocks.
108 template<typename Callback>
109 class QuarantineCache {
110  public:
QuarantineCache(LinkerInitialized)111   explicit QuarantineCache(LinkerInitialized) {
112   }
113 
QuarantineCache()114   QuarantineCache()
115       : size_() {
116     list_.clear();
117   }
118 
Size()119   uptr Size() const {
120     return atomic_load(&size_, memory_order_relaxed);
121   }
122 
Enqueue(Callback cb,void * ptr,uptr size)123   void Enqueue(Callback cb, void *ptr, uptr size) {
124     if (list_.empty() || list_.back()->count == QuarantineBatch::kSize)
125       AllocBatch(cb);
126     QuarantineBatch *b = list_.back();
127     b->batch[b->count++] = ptr;
128     b->size += size;
129     SizeAdd(size);
130   }
131 
Transfer(QuarantineCache * c)132   void Transfer(QuarantineCache *c) {
133     list_.append_back(&c->list_);
134     SizeAdd(c->Size());
135     atomic_store(&c->size_, 0, memory_order_relaxed);
136   }
137 
EnqueueBatch(QuarantineBatch * b)138   void EnqueueBatch(QuarantineBatch *b) {
139     list_.push_back(b);
140     SizeAdd(b->size);
141   }
142 
DequeueBatch()143   QuarantineBatch *DequeueBatch() {
144     if (list_.empty())
145       return 0;
146     QuarantineBatch *b = list_.front();
147     list_.pop_front();
148     SizeAdd(-b->size);
149     return b;
150   }
151 
152  private:
153   IntrusiveList<QuarantineBatch> list_;
154   atomic_uintptr_t size_;
155 
SizeAdd(uptr add)156   void SizeAdd(uptr add) {
157     atomic_store(&size_, Size() + add, memory_order_relaxed);
158   }
159 
AllocBatch(Callback cb)160   NOINLINE QuarantineBatch* AllocBatch(Callback cb) {
161     QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
162     b->count = 0;
163     b->size = 0;
164     list_.push_back(b);
165     return b;
166   }
167 };
168 }  // namespace __sanitizer
169 
170 #endif  // #ifndef SANITIZER_QUARANTINE_H
171