1 //===-- sanitizer_allocator_primary32.h -------------------------*- C++ -*-===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // Part of the Sanitizer Allocator.
9 //
10 //===----------------------------------------------------------------------===//
11 #ifndef SANITIZER_ALLOCATOR_H
12 #error This file must be included inside sanitizer_allocator.h
13 #endif
14 
15 template<class SizeClassAllocator> struct SizeClassAllocator32LocalCache;
16 
17 // SizeClassAllocator32 -- allocator for 32-bit address space.
18 // This allocator can theoretically be used on 64-bit arch, but there it is less
19 // efficient than SizeClassAllocator64.
20 //
21 // [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
22 // be returned by MmapOrDie().
23 //
24 // Region:
25 //   a result of a single call to MmapAlignedOrDieOnFatalError(kRegionSize,
26 //                                                             kRegionSize).
27 // Since the regions are aligned by kRegionSize, there are exactly
28 // kNumPossibleRegions possible regions in the address space and so we keep
29 // a ByteMap possible_regions to store the size classes of each Region.
30 // 0 size class means the region is not used by the allocator.
31 //
32 // One Region is used to allocate chunks of a single size class.
33 // A Region looks like this:
34 // UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
35 //
36 // In order to avoid false sharing the objects of this class should be
37 // chache-line aligned.
38 
39 struct SizeClassAllocator32FlagMasks {  //  Bit masks.
40   enum {
41     kRandomShuffleChunks = 1,
42     kUseSeparateSizeClassForBatch = 2,
43   };
44 };
45 
46 template <class Params>
47 class SizeClassAllocator32 {
48  public:
49   static const uptr kSpaceBeg = Params::kSpaceBeg;
50   static const u64 kSpaceSize = Params::kSpaceSize;
51   static const uptr kMetadataSize = Params::kMetadataSize;
52   typedef typename Params::SizeClassMap SizeClassMap;
53   static const uptr kRegionSizeLog = Params::kRegionSizeLog;
54   typedef typename Params::ByteMap ByteMap;
55   typedef typename Params::MapUnmapCallback MapUnmapCallback;
56 
57   static const bool kRandomShuffleChunks = Params::kFlags &
58       SizeClassAllocator32FlagMasks::kRandomShuffleChunks;
59   static const bool kUseSeparateSizeClassForBatch = Params::kFlags &
60       SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
61 
62   struct TransferBatch {
63     static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2;
SetFromArrayTransferBatch64     void SetFromArray(uptr region_beg_unused, void *batch[], uptr count) {
65       count_ = count;
66       CHECK_LE(count_, kMaxNumCached);
67       for (uptr i = 0; i < count; i++)
68         batch_[i] = batch[i];
69     }
CountTransferBatch70     uptr Count() const { return count_; }
ClearTransferBatch71     void Clear() { count_ = 0; }
AddTransferBatch72     void Add(void *ptr) {
73       batch_[count_++] = ptr;
74       CHECK_LE(count_, kMaxNumCached);
75     }
CopyToArrayTransferBatch76     void CopyToArray(void *to_batch[]) {
77       for (uptr i = 0, n = Count(); i < n; i++)
78         to_batch[i] = batch_[i];
79     }
80 
81     // How much memory do we need for a batch containing n elements.
AllocationSizeRequiredForNElementsTransferBatch82     static uptr AllocationSizeRequiredForNElements(uptr n) {
83       return sizeof(uptr) * 2 + sizeof(void *) * n;
84     }
MaxCachedTransferBatch85     static uptr MaxCached(uptr class_id) {
86       return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(class_id));
87     }
88 
89     TransferBatch *next;
90 
91    private:
92     uptr count_;
93     void *batch_[kMaxNumCached];
94   };
95 
96   static const uptr kBatchSize = sizeof(TransferBatch);
97   COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0);
98   COMPILER_CHECK(kBatchSize == SizeClassMap::kMaxNumCachedHint * sizeof(uptr));
99 
ClassIdToSize(uptr class_id)100   static uptr ClassIdToSize(uptr class_id) {
101     return (class_id == SizeClassMap::kBatchClassID) ?
102         kBatchSize : SizeClassMap::Size(class_id);
103   }
104 
105   typedef SizeClassAllocator32<Params> ThisT;
106   typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
107 
Init(s32 release_to_os_interval_ms)108   void Init(s32 release_to_os_interval_ms) {
109     possible_regions.TestOnlyInit();
110     internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
111   }
112 
ReleaseToOSIntervalMs()113   s32 ReleaseToOSIntervalMs() const {
114     return kReleaseToOSIntervalNever;
115   }
116 
SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms)117   void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
118     // This is empty here. Currently only implemented in 64-bit allocator.
119   }
120 
MapWithCallback(uptr size)121   void *MapWithCallback(uptr size) {
122     void *res = MmapOrDie(size, "SizeClassAllocator32");
123     MapUnmapCallback().OnMap((uptr)res, size);
124     return res;
125   }
126 
UnmapWithCallback(uptr beg,uptr size)127   void UnmapWithCallback(uptr beg, uptr size) {
128     MapUnmapCallback().OnUnmap(beg, size);
129     UnmapOrDie(reinterpret_cast<void *>(beg), size);
130   }
131 
CanAllocate(uptr size,uptr alignment)132   static bool CanAllocate(uptr size, uptr alignment) {
133     return size <= SizeClassMap::kMaxSize &&
134       alignment <= SizeClassMap::kMaxSize;
135   }
136 
GetMetaData(const void * p)137   void *GetMetaData(const void *p) {
138     CHECK(PointerIsMine(p));
139     uptr mem = reinterpret_cast<uptr>(p);
140     uptr beg = ComputeRegionBeg(mem);
141     uptr size = ClassIdToSize(GetSizeClass(p));
142     u32 offset = mem - beg;
143     uptr n = offset / (u32)size;  // 32-bit division
144     uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
145     return reinterpret_cast<void*>(meta);
146   }
147 
AllocateBatch(AllocatorStats * stat,AllocatorCache * c,uptr class_id)148   NOINLINE TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
149                                         uptr class_id) {
150     CHECK_LT(class_id, kNumClasses);
151     SizeClassInfo *sci = GetSizeClassInfo(class_id);
152     SpinMutexLock l(&sci->mutex);
153     if (sci->free_list.empty() &&
154         UNLIKELY(!PopulateFreeList(stat, c, sci, class_id)))
155       return nullptr;
156     CHECK(!sci->free_list.empty());
157     TransferBatch *b = sci->free_list.front();
158     sci->free_list.pop_front();
159     return b;
160   }
161 
DeallocateBatch(AllocatorStats * stat,uptr class_id,TransferBatch * b)162   NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id,
163                                 TransferBatch *b) {
164     CHECK_LT(class_id, kNumClasses);
165     CHECK_GT(b->Count(), 0);
166     SizeClassInfo *sci = GetSizeClassInfo(class_id);
167     SpinMutexLock l(&sci->mutex);
168     sci->free_list.push_front(b);
169   }
170 
GetRegionBeginBySizeClass(uptr class_id)171   uptr GetRegionBeginBySizeClass(uptr class_id) { return 0; }
172 
PointerIsMine(const void * p)173   bool PointerIsMine(const void *p) {
174     uptr mem = reinterpret_cast<uptr>(p);
175     if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize)
176       return false;
177     return GetSizeClass(p) != 0;
178   }
179 
GetSizeClass(const void * p)180   uptr GetSizeClass(const void *p) {
181     return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
182   }
183 
GetBlockBegin(const void * p)184   void *GetBlockBegin(const void *p) {
185     CHECK(PointerIsMine(p));
186     uptr mem = reinterpret_cast<uptr>(p);
187     uptr beg = ComputeRegionBeg(mem);
188     uptr size = ClassIdToSize(GetSizeClass(p));
189     u32 offset = mem - beg;
190     u32 n = offset / (u32)size;  // 32-bit division
191     uptr res = beg + (n * (u32)size);
192     return reinterpret_cast<void*>(res);
193   }
194 
GetActuallyAllocatedSize(void * p)195   uptr GetActuallyAllocatedSize(void *p) {
196     CHECK(PointerIsMine(p));
197     return ClassIdToSize(GetSizeClass(p));
198   }
199 
ClassID(uptr size)200   uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
201 
TotalMemoryUsed()202   uptr TotalMemoryUsed() {
203     // No need to lock here.
204     uptr res = 0;
205     for (uptr i = 0; i < kNumPossibleRegions; i++)
206       if (possible_regions[i])
207         res += kRegionSize;
208     return res;
209   }
210 
TestOnlyUnmap()211   void TestOnlyUnmap() {
212     for (uptr i = 0; i < kNumPossibleRegions; i++)
213       if (possible_regions[i])
214         UnmapWithCallback((i * kRegionSize), kRegionSize);
215   }
216 
217   // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
218   // introspection API.
ForceLock()219   void ForceLock() {
220     for (uptr i = 0; i < kNumClasses; i++) {
221       GetSizeClassInfo(i)->mutex.Lock();
222     }
223   }
224 
ForceUnlock()225   void ForceUnlock() {
226     for (int i = kNumClasses - 1; i >= 0; i--) {
227       GetSizeClassInfo(i)->mutex.Unlock();
228     }
229   }
230 
231   // Iterate over all existing chunks.
232   // The allocator must be locked when calling this function.
ForEachChunk(ForEachChunkCallback callback,void * arg)233   void ForEachChunk(ForEachChunkCallback callback, void *arg) {
234     for (uptr region = 0; region < kNumPossibleRegions; region++)
235       if (possible_regions[region]) {
236         uptr chunk_size = ClassIdToSize(possible_regions[region]);
237         uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
238         uptr region_beg = region * kRegionSize;
239         for (uptr chunk = region_beg;
240              chunk < region_beg + max_chunks_in_region * chunk_size;
241              chunk += chunk_size) {
242           // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
243           callback(chunk, arg);
244         }
245       }
246   }
247 
PrintStats()248   void PrintStats() {
249   }
250 
AdditionalSize()251   static uptr AdditionalSize() {
252     return 0;
253   }
254 
255   typedef SizeClassMap SizeClassMapT;
256   static const uptr kNumClasses = SizeClassMap::kNumClasses;
257 
258  private:
259   static const uptr kRegionSize = 1 << kRegionSizeLog;
260   static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
261 
262   struct SizeClassInfo {
263     SpinMutex mutex;
264     IntrusiveList<TransferBatch> free_list;
265     char padding[kCacheLineSize - sizeof(uptr) -
266                  sizeof(IntrusiveList<TransferBatch>)];
267   };
268   COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
269 
ComputeRegionId(uptr mem)270   uptr ComputeRegionId(uptr mem) {
271     uptr res = mem >> kRegionSizeLog;
272     CHECK_LT(res, kNumPossibleRegions);
273     return res;
274   }
275 
ComputeRegionBeg(uptr mem)276   uptr ComputeRegionBeg(uptr mem) {
277     return mem & ~(kRegionSize - 1);
278   }
279 
AllocateRegion(AllocatorStats * stat,uptr class_id)280   uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
281     CHECK_LT(class_id, kNumClasses);
282     uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError(
283         kRegionSize, kRegionSize, "SizeClassAllocator32"));
284     if (UNLIKELY(!res))
285       return 0;
286     MapUnmapCallback().OnMap(res, kRegionSize);
287     stat->Add(AllocatorStatMapped, kRegionSize);
288     CHECK(IsAligned(res, kRegionSize));
289     possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
290     return res;
291   }
292 
GetSizeClassInfo(uptr class_id)293   SizeClassInfo *GetSizeClassInfo(uptr class_id) {
294     CHECK_LT(class_id, kNumClasses);
295     return &size_class_info_array[class_id];
296   }
297 
PopulateFreeList(AllocatorStats * stat,AllocatorCache * c,SizeClassInfo * sci,uptr class_id)298   bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
299                         SizeClassInfo *sci, uptr class_id) {
300     uptr size = ClassIdToSize(class_id);
301     uptr reg = AllocateRegion(stat, class_id);
302     if (UNLIKELY(!reg))
303       return false;
304     uptr n_chunks = kRegionSize / (size + kMetadataSize);
305     uptr max_count = TransferBatch::MaxCached(class_id);
306     CHECK_GT(max_count, 0);
307     TransferBatch *b = nullptr;
308     for (uptr i = reg; i < reg + n_chunks * size; i += size) {
309       if (!b) {
310         b = c->CreateBatch(class_id, this, (TransferBatch*)i);
311         if (UNLIKELY(!b))
312           return false;
313         b->Clear();
314       }
315       b->Add((void*)i);
316       if (b->Count() == max_count) {
317         sci->free_list.push_back(b);
318         b = nullptr;
319       }
320     }
321     if (b) {
322       CHECK_GT(b->Count(), 0);
323       sci->free_list.push_back(b);
324     }
325     return true;
326   }
327 
328   ByteMap possible_regions;
329   SizeClassInfo size_class_info_array[kNumClasses];
330 };
331