1 //===-- sanitizer_allocator_test.cpp --------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
10 // Tests for sanitizer_allocator.h.
11 //
12 //===----------------------------------------------------------------------===//
13 #include "sanitizer_common/sanitizer_allocator.h"
14 #include "sanitizer_common/sanitizer_allocator_internal.h"
15 #include "sanitizer_common/sanitizer_common.h"
16 
17 #include "sanitizer_test_utils.h"
18 #include "sanitizer_pthread_wrappers.h"
19 
20 #include "gtest/gtest.h"
21 
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <algorithm>
25 #include <vector>
26 #include <random>
27 #include <set>
28 
29 using namespace __sanitizer;
30 
31 // Too slow for debug build
32 #if !SANITIZER_DEBUG
33 
34 #if SANITIZER_CAN_USE_ALLOCATOR64
35 #if SANITIZER_WINDOWS
36 // On Windows 64-bit there is no easy way to find a large enough fixed address
37 // space that is always available. Thus, a dynamically allocated address space
38 // is used instead (i.e. ~(uptr)0).
39 static const vaddr kAllocatorSpace = ~(uptr)0;
40 static const uptr kAllocatorSize  =  0x8000000000ULL;  // 500G
41 static const u64 kAddressSpaceSize = 1ULL << 47;
42 typedef DefaultSizeClassMap SizeClassMap;
43 #elif SANITIZER_ANDROID && defined(__aarch64__)
44 static const vaddr kAllocatorSpace = 0x3000000000ULL;
45 static const uptr kAllocatorSize  = 0x2000000000ULL;
46 static const u64 kAddressSpaceSize = 1ULL << 39;
47 typedef VeryCompactSizeClassMap SizeClassMap;
48 #else
49 static const vaddr kAllocatorSpace = 0x700000000000ULL;
50 static const uptr kAllocatorSize  = 0x010000000000ULL;  // 1T.
51 static const u64 kAddressSpaceSize = 1ULL << 47;
52 typedef DefaultSizeClassMap SizeClassMap;
53 #endif
54 
55 template <typename AddressSpaceViewTy>
56 struct AP64 {  // Allocator Params. Short name for shorter demangled names..
57   static const vaddr kSpaceBeg = kAllocatorSpace;
58   static const usize kSpaceSize = kAllocatorSize;
59   static const usize kMetadataSize = 16;
60   typedef ::SizeClassMap SizeClassMap;
61   typedef NoOpMapUnmapCallback MapUnmapCallback;
62   static const usize kFlags = 0;
63   using AddressSpaceView = AddressSpaceViewTy;
64 };
65 
66 template <typename AddressSpaceViewTy>
67 struct AP64Dyn {
68   static const vaddr kSpaceBeg = ~(uptr)0;
69   static const usize kSpaceSize = kAllocatorSize;
70   static const usize kMetadataSize = 16;
71   typedef ::SizeClassMap SizeClassMap;
72   typedef NoOpMapUnmapCallback MapUnmapCallback;
73   static const usize kFlags = 0;
74   using AddressSpaceView = AddressSpaceViewTy;
75 };
76 
77 template <typename AddressSpaceViewTy>
78 struct AP64Compact {
79   static const vaddr kSpaceBeg = ~(uptr)0;
80   static const usize kSpaceSize = kAllocatorSize;
81   static const usize kMetadataSize = 16;
82   typedef CompactSizeClassMap SizeClassMap;
83   typedef NoOpMapUnmapCallback MapUnmapCallback;
84   static const usize kFlags = 0;
85   using AddressSpaceView = AddressSpaceViewTy;
86 };
87 
88 template <typename AddressSpaceViewTy>
89 struct AP64VeryCompact {
90   static const vaddr kSpaceBeg = ~(uptr)0;
91   static const usize kSpaceSize = 1ULL << 37;
92   static const usize kMetadataSize = 16;
93   typedef VeryCompactSizeClassMap SizeClassMap;
94   typedef NoOpMapUnmapCallback MapUnmapCallback;
95   static const usize kFlags = 0;
96   using AddressSpaceView = AddressSpaceViewTy;
97 };
98 
99 template <typename AddressSpaceViewTy>
100 struct AP64Dense {
101   static const vaddr kSpaceBeg = kAllocatorSpace;
102   static const usize kSpaceSize = kAllocatorSize;
103   static const usize kMetadataSize = 16;
104   typedef DenseSizeClassMap SizeClassMap;
105   typedef NoOpMapUnmapCallback MapUnmapCallback;
106   static const usize kFlags = 0;
107   using AddressSpaceView = AddressSpaceViewTy;
108 };
109 
110 template <typename AddressSpaceView>
111 using Allocator64ASVT = SizeClassAllocator64<AP64<AddressSpaceView>>;
112 using Allocator64 = Allocator64ASVT<LocalAddressSpaceView>;
113 
114 template <typename AddressSpaceView>
115 using Allocator64DynamicASVT = SizeClassAllocator64<AP64Dyn<AddressSpaceView>>;
116 using Allocator64Dynamic = Allocator64DynamicASVT<LocalAddressSpaceView>;
117 
118 template <typename AddressSpaceView>
119 using Allocator64CompactASVT =
120     SizeClassAllocator64<AP64Compact<AddressSpaceView>>;
121 using Allocator64Compact = Allocator64CompactASVT<LocalAddressSpaceView>;
122 
123 template <typename AddressSpaceView>
124 using Allocator64VeryCompactASVT =
125     SizeClassAllocator64<AP64VeryCompact<AddressSpaceView>>;
126 using Allocator64VeryCompact =
127     Allocator64VeryCompactASVT<LocalAddressSpaceView>;
128 
129 template <typename AddressSpaceView>
130 using Allocator64DenseASVT = SizeClassAllocator64<AP64Dense<AddressSpaceView>>;
131 using Allocator64Dense = Allocator64DenseASVT<LocalAddressSpaceView>;
132 
133 #elif defined(__mips64)
134 static const u64 kAddressSpaceSize = 1ULL << 40;
135 #elif defined(__aarch64__)
136 static const u64 kAddressSpaceSize = 1ULL << 39;
137 #elif defined(__s390x__)
138 static const u64 kAddressSpaceSize = 1ULL << 53;
139 #elif defined(__s390__)
140 static const u64 kAddressSpaceSize = 1ULL << 31;
141 #else
142 static const u64 kAddressSpaceSize = 1ULL << 32;
143 #endif
144 
145 static const usize kRegionSizeLog = FIRST_32_SECOND_64(20, 24);
146 
147 template <typename AddressSpaceViewTy>
148 struct AP32Compact {
149   static const vaddr kSpaceBeg = 0;
150   static const u64 kSpaceSize = kAddressSpaceSize;
151   static const usize kMetadataSize = 16;
152   typedef CompactSizeClassMap SizeClassMap;
153   static const usize kRegionSizeLog = ::kRegionSizeLog;
154   using AddressSpaceView = AddressSpaceViewTy;
155   typedef NoOpMapUnmapCallback MapUnmapCallback;
156   static const usize kFlags = 0;
157 };
158 template <typename AddressSpaceView>
159 using Allocator32CompactASVT =
160     SizeClassAllocator32<AP32Compact<AddressSpaceView>>;
161 using Allocator32Compact = Allocator32CompactASVT<LocalAddressSpaceView>;
162 
163 template <class SizeClassMap>
TestSizeClassMap()164 void TestSizeClassMap() {
165   typedef SizeClassMap SCMap;
166   SCMap::Print();
167   SCMap::Validate();
168 }
169 
TEST(SanitizerCommon,DefaultSizeClassMap)170 TEST(SanitizerCommon, DefaultSizeClassMap) {
171   TestSizeClassMap<DefaultSizeClassMap>();
172 }
173 
TEST(SanitizerCommon,CompactSizeClassMap)174 TEST(SanitizerCommon, CompactSizeClassMap) {
175   TestSizeClassMap<CompactSizeClassMap>();
176 }
177 
TEST(SanitizerCommon,VeryCompactSizeClassMap)178 TEST(SanitizerCommon, VeryCompactSizeClassMap) {
179   TestSizeClassMap<VeryCompactSizeClassMap>();
180 }
181 
TEST(SanitizerCommon,InternalSizeClassMap)182 TEST(SanitizerCommon, InternalSizeClassMap) {
183   TestSizeClassMap<InternalSizeClassMap>();
184 }
185 
TEST(SanitizerCommon,DenseSizeClassMap)186 TEST(SanitizerCommon, DenseSizeClassMap) {
187   TestSizeClassMap<VeryCompactSizeClassMap>();
188 }
189 
190 template <class Allocator>
TestSizeClassAllocator()191 void TestSizeClassAllocator() {
192   Allocator *a = new Allocator;
193   a->Init(kReleaseToOSIntervalNever);
194   typename Allocator::AllocatorCache cache;
195   memset(&cache, 0, sizeof(cache));
196   cache.Init(0);
197 
198   static const uptr sizes[] = {
199     1, 16,  30, 40, 100, 1000, 10000,
200     50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000
201   };
202 
203   std::vector<void *> allocated;
204 
205   uptr last_total_allocated = 0;
206   for (int i = 0; i < 3; i++) {
207     // Allocate a bunch of chunks.
208     for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
209       uptr size = sizes[s];
210       if (!a->CanAllocate(size, 1)) continue;
211       // printf("s = %ld\n", size);
212       uptr n_iter = std::max((uptr)6, 4000000 / size);
213       // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
214       for (uptr i = 0; i < n_iter; i++) {
215         uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
216         char *x = (char*)cache.Allocate(a, class_id0);
217         x[0] = 0;
218         x[size - 1] = 0;
219         x[size / 2] = 0;
220         allocated.push_back(x);
221         CHECK_EQ(x, a->GetBlockBegin(x));
222         CHECK_EQ(x, a->GetBlockBegin(x + size - 1));
223         CHECK(a->PointerIsMine(x));
224         CHECK(a->PointerIsMine(x + size - 1));
225         CHECK(a->PointerIsMine(x + size / 2));
226         CHECK_GE(a->GetActuallyAllocatedSize(x), size);
227         uptr class_id = a->GetSizeClass(x);
228         CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size));
229         uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
230         metadata[0] = reinterpret_cast<uptr>(x) + 1;
231         metadata[1] = 0xABCD;
232       }
233     }
234     // Deallocate all.
235     for (uptr i = 0; i < allocated.size(); i++) {
236       void *x = allocated[i];
237       uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
238       CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1);
239       CHECK_EQ(metadata[1], 0xABCD);
240       cache.Deallocate(a, a->GetSizeClass(x), x);
241     }
242     allocated.clear();
243     uptr total_allocated = a->TotalMemoryUsed();
244     if (last_total_allocated == 0)
245       last_total_allocated = total_allocated;
246     CHECK_EQ(last_total_allocated, total_allocated);
247   }
248 
249   // Check that GetBlockBegin never crashes.
250   for (uptr x = 0, step = kAddressSpaceSize / 100000;
251        x < kAddressSpaceSize - step; x += step)
252     if (a->PointerIsMine(reinterpret_cast<void *>(x)))
253       Ident(a->GetBlockBegin(reinterpret_cast<void *>(x)));
254 
255   a->TestOnlyUnmap();
256   delete a;
257 }
258 
259 #if SANITIZER_CAN_USE_ALLOCATOR64
260 // These tests can fail on Windows if memory is somewhat full and lit happens
261 // to run them all at the same time. FIXME: Make them not flaky and reenable.
262 #if !SANITIZER_WINDOWS
TEST(SanitizerCommon,SizeClassAllocator64)263 TEST(SanitizerCommon, SizeClassAllocator64) {
264   TestSizeClassAllocator<Allocator64>();
265 }
266 
TEST(SanitizerCommon,SizeClassAllocator64Dynamic)267 TEST(SanitizerCommon, SizeClassAllocator64Dynamic) {
268   TestSizeClassAllocator<Allocator64Dynamic>();
269 }
270 
271 #if !SANITIZER_ANDROID
272 //FIXME(kostyak): find values so that those work on Android as well.
TEST(SanitizerCommon,SizeClassAllocator64Compact)273 TEST(SanitizerCommon, SizeClassAllocator64Compact) {
274   TestSizeClassAllocator<Allocator64Compact>();
275 }
276 
TEST(SanitizerCommon,SizeClassAllocator64Dense)277 TEST(SanitizerCommon, SizeClassAllocator64Dense) {
278   TestSizeClassAllocator<Allocator64Dense>();
279 }
280 #endif
281 
TEST(SanitizerCommon,SizeClassAllocator64VeryCompact)282 TEST(SanitizerCommon, SizeClassAllocator64VeryCompact) {
283   TestSizeClassAllocator<Allocator64VeryCompact>();
284 }
285 #endif
286 #endif
287 
TEST(SanitizerCommon,SizeClassAllocator32Compact)288 TEST(SanitizerCommon, SizeClassAllocator32Compact) {
289   TestSizeClassAllocator<Allocator32Compact>();
290 }
291 
292 template <typename AddressSpaceViewTy>
293 struct AP32SeparateBatches {
294   static const vaddr kSpaceBeg = 0;
295   static const u64 kSpaceSize = kAddressSpaceSize;
296   static const usize kMetadataSize = 16;
297   typedef DefaultSizeClassMap SizeClassMap;
298   static const usize kRegionSizeLog = ::kRegionSizeLog;
299   using AddressSpaceView = AddressSpaceViewTy;
300   typedef NoOpMapUnmapCallback MapUnmapCallback;
301   static const usize kFlags =
302       SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
303 };
304 template <typename AddressSpaceView>
305 using Allocator32SeparateBatchesASVT =
306     SizeClassAllocator32<AP32SeparateBatches<AddressSpaceView>>;
307 using Allocator32SeparateBatches =
308     Allocator32SeparateBatchesASVT<LocalAddressSpaceView>;
309 
TEST(SanitizerCommon,SizeClassAllocator32SeparateBatches)310 TEST(SanitizerCommon, SizeClassAllocator32SeparateBatches) {
311   TestSizeClassAllocator<Allocator32SeparateBatches>();
312 }
313 
314 template <class Allocator>
SizeClassAllocatorMetadataStress()315 void SizeClassAllocatorMetadataStress() {
316   Allocator *a = new Allocator;
317   a->Init(kReleaseToOSIntervalNever);
318   typename Allocator::AllocatorCache cache;
319   memset(&cache, 0, sizeof(cache));
320   cache.Init(0);
321 
322   const uptr kNumAllocs = 1 << 13;
323   void *allocated[kNumAllocs];
324   void *meta[kNumAllocs];
325   for (uptr i = 0; i < kNumAllocs; i++) {
326     void *x = cache.Allocate(a, 1 + i % (Allocator::kNumClasses - 1));
327     allocated[i] = x;
328     meta[i] = a->GetMetaData(x);
329   }
330   // Get Metadata kNumAllocs^2 times.
331   for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
332     uptr idx = i % kNumAllocs;
333     void *m = a->GetMetaData(allocated[idx]);
334     EXPECT_EQ(m, meta[idx]);
335   }
336   for (uptr i = 0; i < kNumAllocs; i++) {
337     cache.Deallocate(a, 1 + i % (Allocator::kNumClasses - 1), allocated[i]);
338   }
339 
340   a->TestOnlyUnmap();
341   delete a;
342 }
343 
344 #if SANITIZER_CAN_USE_ALLOCATOR64
345 // These tests can fail on Windows if memory is somewhat full and lit happens
346 // to run them all at the same time. FIXME: Make them not flaky and reenable.
347 #if !SANITIZER_WINDOWS
TEST(SanitizerCommon,SizeClassAllocator64MetadataStress)348 TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {
349   SizeClassAllocatorMetadataStress<Allocator64>();
350 }
351 
TEST(SanitizerCommon,SizeClassAllocator64DynamicMetadataStress)352 TEST(SanitizerCommon, SizeClassAllocator64DynamicMetadataStress) {
353   SizeClassAllocatorMetadataStress<Allocator64Dynamic>();
354 }
355 
356 #if !SANITIZER_ANDROID
TEST(SanitizerCommon,SizeClassAllocator64CompactMetadataStress)357 TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
358   SizeClassAllocatorMetadataStress<Allocator64Compact>();
359 }
360 #endif
361 
362 #endif
363 #endif  // SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon,SizeClassAllocator32CompactMetadataStress)364 TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {
365   SizeClassAllocatorMetadataStress<Allocator32Compact>();
366 }
367 
368 template <class Allocator>
SizeClassAllocatorGetBlockBeginStress(u64 TotalSize)369 void SizeClassAllocatorGetBlockBeginStress(u64 TotalSize) {
370   Allocator *a = new Allocator;
371   a->Init(kReleaseToOSIntervalNever);
372   typename Allocator::AllocatorCache cache;
373   memset(&cache, 0, sizeof(cache));
374   cache.Init(0);
375 
376   uptr max_size_class = Allocator::SizeClassMapT::kLargestClassID;
377   uptr size = Allocator::SizeClassMapT::Size(max_size_class);
378   // Make sure we correctly compute GetBlockBegin() w/o overflow.
379   for (size_t i = 0; i <= TotalSize / size; i++) {
380     void *x = cache.Allocate(a, max_size_class);
381     void *beg = a->GetBlockBegin(x);
382     // if ((i & (i - 1)) == 0)
383     //   fprintf(stderr, "[%zd] %p %p\n", i, x, beg);
384     EXPECT_EQ(x, beg);
385   }
386 
387   a->TestOnlyUnmap();
388   delete a;
389 }
390 
391 #if SANITIZER_CAN_USE_ALLOCATOR64
392 // These tests can fail on Windows if memory is somewhat full and lit happens
393 // to run them all at the same time. FIXME: Make them not flaky and reenable.
394 #if !SANITIZER_WINDOWS
TEST(SanitizerCommon,SizeClassAllocator64GetBlockBegin)395 TEST(SanitizerCommon, SizeClassAllocator64GetBlockBegin) {
396   SizeClassAllocatorGetBlockBeginStress<Allocator64>(
397       1ULL << (SANITIZER_ANDROID ? 31 : 33));
398 }
TEST(SanitizerCommon,SizeClassAllocator64DynamicGetBlockBegin)399 TEST(SanitizerCommon, SizeClassAllocator64DynamicGetBlockBegin) {
400   SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>(
401       1ULL << (SANITIZER_ANDROID ? 31 : 33));
402 }
403 #if !SANITIZER_ANDROID
TEST(SanitizerCommon,SizeClassAllocator64CompactGetBlockBegin)404 TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) {
405   SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>(1ULL << 33);
406 }
407 #endif
TEST(SanitizerCommon,SizeClassAllocator64VeryCompactGetBlockBegin)408 TEST(SanitizerCommon, SizeClassAllocator64VeryCompactGetBlockBegin) {
409   // Does not have > 4Gb for each class.
410   SizeClassAllocatorGetBlockBeginStress<Allocator64VeryCompact>(1ULL << 31);
411 }
TEST(SanitizerCommon,SizeClassAllocator32CompactGetBlockBegin)412 TEST(SanitizerCommon, SizeClassAllocator32CompactGetBlockBegin) {
413   SizeClassAllocatorGetBlockBeginStress<Allocator32Compact>(1ULL << 33);
414 }
415 #endif
416 #endif  // SANITIZER_CAN_USE_ALLOCATOR64
417 
418 struct TestMapUnmapCallback {
419   static int map_count, unmap_count;
OnMapTestMapUnmapCallback420   void OnMap(uptr p, usize size) const { map_count++; }
OnUnmapTestMapUnmapCallback421   void OnUnmap(uptr p, usize size) const { unmap_count++; }
422 };
423 int TestMapUnmapCallback::map_count;
424 int TestMapUnmapCallback::unmap_count;
425 
426 #if SANITIZER_CAN_USE_ALLOCATOR64
427 // These tests can fail on Windows if memory is somewhat full and lit happens
428 // to run them all at the same time. FIXME: Make them not flaky and reenable.
429 #if !SANITIZER_WINDOWS
430 
431 template <typename AddressSpaceViewTy = LocalAddressSpaceView>
432 struct AP64WithCallback {
433   static const vaddr kSpaceBeg = kAllocatorSpace;
434   static const usize kSpaceSize = kAllocatorSize;
435   static const usize kMetadataSize = 16;
436   typedef ::SizeClassMap SizeClassMap;
437   typedef TestMapUnmapCallback MapUnmapCallback;
438   static const usize kFlags = 0;
439   using AddressSpaceView = AddressSpaceViewTy;
440 };
441 
TEST(SanitizerCommon,SizeClassAllocator64MapUnmapCallback)442 TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) {
443   TestMapUnmapCallback::map_count = 0;
444   TestMapUnmapCallback::unmap_count = 0;
445   typedef SizeClassAllocator64<AP64WithCallback<>> Allocator64WithCallBack;
446   Allocator64WithCallBack *a = new Allocator64WithCallBack;
447   a->Init(kReleaseToOSIntervalNever);
448   EXPECT_EQ(TestMapUnmapCallback::map_count, 1);  // Allocator state.
449   typename Allocator64WithCallBack::AllocatorCache cache;
450   memset(&cache, 0, sizeof(cache));
451   cache.Init(0);
452   AllocatorStats stats;
453   stats.Init();
454   const size_t kNumChunks = 128;
455   uint32_t chunks[kNumChunks];
456   a->GetFromAllocator(&stats, 30, chunks, kNumChunks);
457   // State + alloc + metadata + freearray.
458   EXPECT_EQ(TestMapUnmapCallback::map_count, 4);
459   a->TestOnlyUnmap();
460   EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);  // The whole thing.
461   delete a;
462 }
463 #endif
464 #endif
465 
466 template <typename AddressSpaceViewTy = LocalAddressSpaceView>
467 struct AP32WithCallback {
468   static const vaddr kSpaceBeg = 0;
469   static const u64 kSpaceSize = kAddressSpaceSize;
470   static const usize kMetadataSize = 16;
471   typedef CompactSizeClassMap SizeClassMap;
472   static const usize kRegionSizeLog = ::kRegionSizeLog;
473   using AddressSpaceView = AddressSpaceViewTy;
474   typedef TestMapUnmapCallback MapUnmapCallback;
475   static const usize kFlags = 0;
476 };
477 
TEST(SanitizerCommon,SizeClassAllocator32MapUnmapCallback)478 TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
479   TestMapUnmapCallback::map_count = 0;
480   TestMapUnmapCallback::unmap_count = 0;
481   typedef SizeClassAllocator32<AP32WithCallback<>> Allocator32WithCallBack;
482   Allocator32WithCallBack *a = new Allocator32WithCallBack;
483   a->Init(kReleaseToOSIntervalNever);
484   EXPECT_EQ(TestMapUnmapCallback::map_count, 0);
485   Allocator32WithCallBack::AllocatorCache cache;
486   memset(&cache, 0, sizeof(cache));
487   cache.Init(0);
488   AllocatorStats stats;
489   stats.Init();
490   a->AllocateBatch(&stats, &cache, 32);
491   EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
492   a->TestOnlyUnmap();
493   EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
494   delete a;
495   // fprintf(stderr, "Map: %d Unmap: %d\n",
496   //         TestMapUnmapCallback::map_count,
497   //         TestMapUnmapCallback::unmap_count);
498 }
499 
TEST(SanitizerCommon,LargeMmapAllocatorMapUnmapCallback)500 TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
501   TestMapUnmapCallback::map_count = 0;
502   TestMapUnmapCallback::unmap_count = 0;
503   LargeMmapAllocator<TestMapUnmapCallback> a;
504   a.Init();
505   AllocatorStats stats;
506   stats.Init();
507   void *x = a.Allocate(&stats, 1 << 20, 1);
508   EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
509   a.Deallocate(&stats, x);
510   EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
511 }
512 
513 // Don't test OOM conditions on Win64 because it causes other tests on the same
514 // machine to OOM.
515 #if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID
TEST(SanitizerCommon,SizeClassAllocator64Overflow)516 TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
517   Allocator64 a;
518   a.Init(kReleaseToOSIntervalNever);
519   Allocator64::AllocatorCache cache;
520   memset(&cache, 0, sizeof(cache));
521   cache.Init(0);
522   AllocatorStats stats;
523   stats.Init();
524 
525   const size_t kNumChunks = 128;
526   uint32_t chunks[kNumChunks];
527   bool allocation_failed = false;
528   for (int i = 0; i < 1000000; i++) {
529     if (!a.GetFromAllocator(&stats, 52, chunks, kNumChunks)) {
530       allocation_failed = true;
531       break;
532     }
533   }
534   EXPECT_EQ(allocation_failed, true);
535 
536   a.TestOnlyUnmap();
537 }
538 #endif
539 
TEST(SanitizerCommon,LargeMmapAllocator)540 TEST(SanitizerCommon, LargeMmapAllocator) {
541   LargeMmapAllocator<NoOpMapUnmapCallback> a;
542   a.Init();
543   AllocatorStats stats;
544   stats.Init();
545 
546   static const int kNumAllocs = 1000;
547   char *allocated[kNumAllocs];
548   static const uptr size = 4000;
549   // Allocate some.
550   for (int i = 0; i < kNumAllocs; i++) {
551     allocated[i] = (char *)a.Allocate(&stats, size, 1);
552     CHECK(a.PointerIsMine(allocated[i]));
553   }
554   // Deallocate all.
555   CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
556   for (int i = 0; i < kNumAllocs; i++) {
557     char *p = allocated[i];
558     CHECK(a.PointerIsMine(p));
559     a.Deallocate(&stats, p);
560   }
561   // Check that non left.
562   CHECK_EQ(a.TotalMemoryUsed(), 0);
563 
564   // Allocate some more, also add metadata.
565   for (int i = 0; i < kNumAllocs; i++) {
566     char *x = (char *)a.Allocate(&stats, size, 1);
567     CHECK_GE(a.GetActuallyAllocatedSize(x), size);
568     uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
569     *meta = i;
570     allocated[i] = x;
571   }
572   for (int i = 0; i < kNumAllocs * kNumAllocs; i++) {
573     char *p = allocated[i % kNumAllocs];
574     CHECK(a.PointerIsMine(p));
575     CHECK(a.PointerIsMine(p + 2000));
576   }
577   CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
578   // Deallocate all in reverse order.
579   for (int i = 0; i < kNumAllocs; i++) {
580     int idx = kNumAllocs - i - 1;
581     char *p = allocated[idx];
582     uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p));
583     CHECK_EQ(*meta, idx);
584     CHECK(a.PointerIsMine(p));
585     a.Deallocate(&stats, p);
586   }
587   CHECK_EQ(a.TotalMemoryUsed(), 0);
588 
589   // Test alignments. Test with 512MB alignment on x64 non-Windows machines.
590   // Windows doesn't overcommit, and many machines do not have 51.2GB of swap.
591   uptr max_alignment =
592       (SANITIZER_WORDSIZE == 64 && !SANITIZER_WINDOWS) ? (1 << 28) : (1 << 24);
593   for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) {
594     const uptr kNumAlignedAllocs = 100;
595     for (uptr i = 0; i < kNumAlignedAllocs; i++) {
596       uptr size = ((i % 10) + 1) * 4096;
597       char *p = allocated[i] = (char *)a.Allocate(&stats, size, alignment);
598       CHECK_EQ(p, a.GetBlockBegin(p));
599       CHECK_EQ(p, a.GetBlockBegin(p + size - 1));
600       CHECK_EQ(p, a.GetBlockBegin(p + size / 2));
601       CHECK_EQ(0, (uptr)allocated[i] % alignment);
602       p[0] = p[size - 1] = 0;
603     }
604     for (uptr i = 0; i < kNumAlignedAllocs; i++) {
605       a.Deallocate(&stats, allocated[i]);
606     }
607   }
608 
609   // Regression test for boundary condition in GetBlockBegin().
610   uptr page_size = GetPageSizeCached();
611   char *p = (char *)a.Allocate(&stats, page_size, 1);
612   CHECK_EQ(p, a.GetBlockBegin(p));
613   CHECK_EQ(p, (char *)a.GetBlockBegin(p + page_size - 1));
614   CHECK_NE(p, (char *)a.GetBlockBegin(p + page_size));
615   a.Deallocate(&stats, p);
616 }
617 
618 template <class PrimaryAllocator>
TestCombinedAllocator()619 void TestCombinedAllocator() {
620   typedef CombinedAllocator<PrimaryAllocator> Allocator;
621   Allocator *a = new Allocator;
622   a->Init(kReleaseToOSIntervalNever);
623   std::mt19937 r;
624 
625   typename Allocator::AllocatorCache cache;
626   memset(&cache, 0, sizeof(cache));
627   a->InitCache(&cache);
628 
629   EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
630   EXPECT_EQ(a->Allocate(&cache, -1, 1024), (void*)0);
631   EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0);
632   EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0);
633   EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0);
634   EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
635 
636   const uptr kNumAllocs = 100000;
637   const uptr kNumIter = 10;
638   for (uptr iter = 0; iter < kNumIter; iter++) {
639     std::vector<void*> allocated;
640     for (uptr i = 0; i < kNumAllocs; i++) {
641       uptr size = (i % (1 << 14)) + 1;
642       if ((i % 1024) == 0)
643         size = 1 << (10 + (i % 14));
644       void *x = a->Allocate(&cache, size, 1);
645       uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
646       CHECK_EQ(*meta, 0);
647       *meta = size;
648       allocated.push_back(x);
649     }
650 
651     std::shuffle(allocated.begin(), allocated.end(), r);
652 
653     // Test ForEachChunk(...)
654     {
655       std::set<void *> reported_chunks;
656       auto cb = [](uptr chunk, void *arg) {
657         auto reported_chunks_ptr = reinterpret_cast<std::set<void *> *>(arg);
658         auto pair =
659             reported_chunks_ptr->insert(reinterpret_cast<void *>(chunk));
660         // Check chunk is never reported more than once.
661         ASSERT_TRUE(pair.second);
662       };
663       a->ForEachChunk(cb, reinterpret_cast<void *>(&reported_chunks));
664       for (const auto &allocated_ptr : allocated) {
665         ASSERT_NE(reported_chunks.find(allocated_ptr), reported_chunks.end());
666       }
667     }
668 
669     for (uptr i = 0; i < kNumAllocs; i++) {
670       void *x = allocated[i];
671       uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
672       CHECK_NE(*meta, 0);
673       CHECK(a->PointerIsMine(x));
674       *meta = 0;
675       a->Deallocate(&cache, x);
676     }
677     allocated.clear();
678     a->SwallowCache(&cache);
679   }
680   a->DestroyCache(&cache);
681   a->TestOnlyUnmap();
682 }
683 
684 #if SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon,CombinedAllocator64)685 TEST(SanitizerCommon, CombinedAllocator64) {
686   TestCombinedAllocator<Allocator64>();
687 }
688 
TEST(SanitizerCommon,CombinedAllocator64Dynamic)689 TEST(SanitizerCommon, CombinedAllocator64Dynamic) {
690   TestCombinedAllocator<Allocator64Dynamic>();
691 }
692 
693 #if !SANITIZER_ANDROID
TEST(SanitizerCommon,CombinedAllocator64Compact)694 TEST(SanitizerCommon, CombinedAllocator64Compact) {
695   TestCombinedAllocator<Allocator64Compact>();
696 }
697 #endif
698 
TEST(SanitizerCommon,CombinedAllocator64VeryCompact)699 TEST(SanitizerCommon, CombinedAllocator64VeryCompact) {
700   TestCombinedAllocator<Allocator64VeryCompact>();
701 }
702 #endif
703 
TEST(SanitizerCommon,CombinedAllocator32Compact)704 TEST(SanitizerCommon, CombinedAllocator32Compact) {
705   TestCombinedAllocator<Allocator32Compact>();
706 }
707 
708 template <class Allocator>
TestSizeClassAllocatorLocalCache()709 void TestSizeClassAllocatorLocalCache() {
710   using AllocatorCache = typename Allocator::AllocatorCache;
711   AllocatorCache cache;
712   Allocator *a = new Allocator();
713 
714   a->Init(kReleaseToOSIntervalNever);
715   memset(&cache, 0, sizeof(cache));
716   cache.Init(0);
717 
718   const uptr kNumAllocs = 10000;
719   const int kNumIter = 100;
720   uptr saved_total = 0;
721   for (int class_id = 1; class_id <= 5; class_id++) {
722     for (int it = 0; it < kNumIter; it++) {
723       void *allocated[kNumAllocs];
724       for (uptr i = 0; i < kNumAllocs; i++) {
725         allocated[i] = cache.Allocate(a, class_id);
726       }
727       for (uptr i = 0; i < kNumAllocs; i++) {
728         cache.Deallocate(a, class_id, allocated[i]);
729       }
730       cache.Drain(a);
731       uptr total_allocated = a->TotalMemoryUsed();
732       if (it)
733         CHECK_EQ(saved_total, total_allocated);
734       saved_total = total_allocated;
735     }
736   }
737 
738   a->TestOnlyUnmap();
739   delete a;
740 }
741 
742 #if SANITIZER_CAN_USE_ALLOCATOR64
743 // These tests can fail on Windows if memory is somewhat full and lit happens
744 // to run them all at the same time. FIXME: Make them not flaky and reenable.
745 #if !SANITIZER_WINDOWS
TEST(SanitizerCommon,SizeClassAllocator64LocalCache)746 TEST(SanitizerCommon, SizeClassAllocator64LocalCache) {
747   TestSizeClassAllocatorLocalCache<Allocator64>();
748 }
749 
TEST(SanitizerCommon,SizeClassAllocator64DynamicLocalCache)750 TEST(SanitizerCommon, SizeClassAllocator64DynamicLocalCache) {
751   TestSizeClassAllocatorLocalCache<Allocator64Dynamic>();
752 }
753 
754 #if !SANITIZER_ANDROID
TEST(SanitizerCommon,SizeClassAllocator64CompactLocalCache)755 TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) {
756   TestSizeClassAllocatorLocalCache<Allocator64Compact>();
757 }
758 #endif
TEST(SanitizerCommon,SizeClassAllocator64VeryCompactLocalCache)759 TEST(SanitizerCommon, SizeClassAllocator64VeryCompactLocalCache) {
760   TestSizeClassAllocatorLocalCache<Allocator64VeryCompact>();
761 }
762 #endif
763 #endif
764 
TEST(SanitizerCommon,SizeClassAllocator32CompactLocalCache)765 TEST(SanitizerCommon, SizeClassAllocator32CompactLocalCache) {
766   TestSizeClassAllocatorLocalCache<Allocator32Compact>();
767 }
768 
769 #if SANITIZER_CAN_USE_ALLOCATOR64
770 typedef Allocator64::AllocatorCache AllocatorCache;
771 static AllocatorCache static_allocator_cache;
772 
AllocatorLeakTestWorker(void * arg)773 void *AllocatorLeakTestWorker(void *arg) {
774   typedef AllocatorCache::Allocator Allocator;
775   Allocator *a = (Allocator*)(arg);
776   static_allocator_cache.Allocate(a, 10);
777   static_allocator_cache.Drain(a);
778   return 0;
779 }
780 
TEST(SanitizerCommon,AllocatorLeakTest)781 TEST(SanitizerCommon, AllocatorLeakTest) {
782   typedef AllocatorCache::Allocator Allocator;
783   Allocator a;
784   a.Init(kReleaseToOSIntervalNever);
785   uptr total_used_memory = 0;
786   for (int i = 0; i < 100; i++) {
787     pthread_t t;
788     PTHREAD_CREATE(&t, 0, AllocatorLeakTestWorker, &a);
789     PTHREAD_JOIN(t, 0);
790     if (i == 0)
791       total_used_memory = a.TotalMemoryUsed();
792     EXPECT_EQ(a.TotalMemoryUsed(), total_used_memory);
793   }
794 
795   a.TestOnlyUnmap();
796 }
797 
798 // Struct which is allocated to pass info to new threads.  The new thread frees
799 // it.
800 struct NewThreadParams {
801   AllocatorCache *thread_cache;
802   AllocatorCache::Allocator *allocator;
803   uptr class_id;
804 };
805 
806 // Called in a new thread.  Just frees its argument.
DeallocNewThreadWorker(void * arg)807 static void *DeallocNewThreadWorker(void *arg) {
808   NewThreadParams *params = reinterpret_cast<NewThreadParams*>(arg);
809   params->thread_cache->Deallocate(params->allocator, params->class_id, params);
810   return NULL;
811 }
812 
813 // The allocator cache is supposed to be POD and zero initialized.  We should be
814 // able to call Deallocate on a zeroed cache, and it will self-initialize.
TEST(Allocator,AllocatorCacheDeallocNewThread)815 TEST(Allocator, AllocatorCacheDeallocNewThread) {
816   AllocatorCache::Allocator allocator;
817   allocator.Init(kReleaseToOSIntervalNever);
818   AllocatorCache main_cache;
819   AllocatorCache child_cache;
820   memset(&main_cache, 0, sizeof(main_cache));
821   memset(&child_cache, 0, sizeof(child_cache));
822 
823   uptr class_id = DefaultSizeClassMap::ClassID(sizeof(NewThreadParams));
824   NewThreadParams *params = reinterpret_cast<NewThreadParams*>(
825       main_cache.Allocate(&allocator, class_id));
826   params->thread_cache = &child_cache;
827   params->allocator = &allocator;
828   params->class_id = class_id;
829   pthread_t t;
830   PTHREAD_CREATE(&t, 0, DeallocNewThreadWorker, params);
831   PTHREAD_JOIN(t, 0);
832 
833   allocator.TestOnlyUnmap();
834 }
835 #endif
836 
TEST(Allocator,Basic)837 TEST(Allocator, Basic) {
838   char *p = (char*)InternalAlloc(10);
839   EXPECT_NE(p, (char*)0);
840   char *p2 = (char*)InternalAlloc(20);
841   EXPECT_NE(p2, (char*)0);
842   EXPECT_NE(p2, p);
843   InternalFree(p);
844   InternalFree(p2);
845 }
846 
TEST(Allocator,Stress)847 TEST(Allocator, Stress) {
848   const int kCount = 1000;
849   char *ptrs[kCount];
850   unsigned rnd = 42;
851   for (int i = 0; i < kCount; i++) {
852     uptr sz = my_rand_r(&rnd) % 1000;
853     char *p = (char*)InternalAlloc(sz);
854     EXPECT_NE(p, (char*)0);
855     ptrs[i] = p;
856   }
857   for (int i = 0; i < kCount; i++) {
858     InternalFree(ptrs[i]);
859   }
860 }
861 
TEST(Allocator,LargeAlloc)862 TEST(Allocator, LargeAlloc) {
863   void *p = InternalAlloc(10 << 20);
864   InternalFree(p);
865 }
866 
TEST(Allocator,ScopedBuffer)867 TEST(Allocator, ScopedBuffer) {
868   const int kSize = 512;
869   {
870     InternalMmapVector<int> int_buf(kSize);
871     EXPECT_EQ((uptr)kSize, int_buf.size());
872   }
873   InternalMmapVector<char> char_buf(kSize);
874   EXPECT_EQ((uptr)kSize, char_buf.size());
875   internal_memset(char_buf.data(), 'c', kSize);
876   for (int i = 0; i < kSize; i++) {
877     EXPECT_EQ('c', char_buf[i]);
878   }
879 }
880 
IterationTestCallback(uptr chunk,void * arg)881 void IterationTestCallback(uptr chunk, void *arg) {
882   reinterpret_cast<std::set<uptr> *>(arg)->insert(chunk);
883 }
884 
885 template <class Allocator>
TestSizeClassAllocatorIteration()886 void TestSizeClassAllocatorIteration() {
887   Allocator *a = new Allocator;
888   a->Init(kReleaseToOSIntervalNever);
889   typename Allocator::AllocatorCache cache;
890   memset(&cache, 0, sizeof(cache));
891   cache.Init(0);
892 
893   static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
894     50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
895 
896   std::vector<void *> allocated;
897 
898   // Allocate a bunch of chunks.
899   for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
900     uptr size = sizes[s];
901     if (!a->CanAllocate(size, 1)) continue;
902     // printf("s = %ld\n", size);
903     uptr n_iter = std::max((uptr)6, 80000 / size);
904     // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
905     for (uptr j = 0; j < n_iter; j++) {
906       uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
907       void *x = cache.Allocate(a, class_id0);
908       allocated.push_back(x);
909     }
910   }
911 
912   std::set<uptr> reported_chunks;
913   a->ForceLock();
914   a->ForEachChunk(IterationTestCallback, &reported_chunks);
915   a->ForceUnlock();
916 
917   for (uptr i = 0; i < allocated.size(); i++) {
918     // Don't use EXPECT_NE. Reporting the first mismatch is enough.
919     ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
920               reported_chunks.end());
921   }
922 
923   a->TestOnlyUnmap();
924   delete a;
925 }
926 
927 #if SANITIZER_CAN_USE_ALLOCATOR64
928 // These tests can fail on Windows if memory is somewhat full and lit happens
929 // to run them all at the same time. FIXME: Make them not flaky and reenable.
930 #if !SANITIZER_WINDOWS
TEST(SanitizerCommon,SizeClassAllocator64Iteration)931 TEST(SanitizerCommon, SizeClassAllocator64Iteration) {
932   TestSizeClassAllocatorIteration<Allocator64>();
933 }
TEST(SanitizerCommon,SizeClassAllocator64DynamicIteration)934 TEST(SanitizerCommon, SizeClassAllocator64DynamicIteration) {
935   TestSizeClassAllocatorIteration<Allocator64Dynamic>();
936 }
937 #endif
938 #endif
939 
TEST(SanitizerCommon,SizeClassAllocator32Iteration)940 TEST(SanitizerCommon, SizeClassAllocator32Iteration) {
941   TestSizeClassAllocatorIteration<Allocator32Compact>();
942 }
943 
TEST(SanitizerCommon,LargeMmapAllocatorIteration)944 TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
945   LargeMmapAllocator<NoOpMapUnmapCallback> a;
946   a.Init();
947   AllocatorStats stats;
948   stats.Init();
949 
950   static const uptr kNumAllocs = 1000;
951   char *allocated[kNumAllocs];
952   static const uptr size = 40;
953   // Allocate some.
954   for (uptr i = 0; i < kNumAllocs; i++)
955     allocated[i] = (char *)a.Allocate(&stats, size, 1);
956 
957   std::set<uptr> reported_chunks;
958   a.ForceLock();
959   a.ForEachChunk(IterationTestCallback, &reported_chunks);
960   a.ForceUnlock();
961 
962   for (uptr i = 0; i < kNumAllocs; i++) {
963     // Don't use EXPECT_NE. Reporting the first mismatch is enough.
964     ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
965               reported_chunks.end());
966   }
967   for (uptr i = 0; i < kNumAllocs; i++)
968     a.Deallocate(&stats, allocated[i]);
969 }
970 
TEST(SanitizerCommon,LargeMmapAllocatorBlockBegin)971 TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) {
972   LargeMmapAllocator<NoOpMapUnmapCallback> a;
973   a.Init();
974   AllocatorStats stats;
975   stats.Init();
976 
977   static const uptr kNumAllocs = 1024;
978   static const uptr kNumExpectedFalseLookups = 10000000;
979   char *allocated[kNumAllocs];
980   static const uptr size = 4096;
981   // Allocate some.
982   for (uptr i = 0; i < kNumAllocs; i++) {
983     allocated[i] = (char *)a.Allocate(&stats, size, 1);
984   }
985 
986   a.ForceLock();
987   for (uptr i = 0; i < kNumAllocs  * kNumAllocs; i++) {
988     // if ((i & (i - 1)) == 0) fprintf(stderr, "[%zd]\n", i);
989     char *p1 = allocated[i % kNumAllocs];
990     EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1));
991     EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size / 2));
992     EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size - 1));
993     EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 - 100));
994   }
995 
996   for (uptr i = 0; i < kNumExpectedFalseLookups; i++) {
997     void *p = reinterpret_cast<void *>(i % 1024);
998     EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));
999     p = reinterpret_cast<void *>(~0L - (i % 1024));
1000     EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));
1001   }
1002   a.ForceUnlock();
1003 
1004   for (uptr i = 0; i < kNumAllocs; i++)
1005     a.Deallocate(&stats, allocated[i]);
1006 }
1007 
1008 
1009 // Don't test OOM conditions on Win64 because it causes other tests on the same
1010 // machine to OOM.
1011 #if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID
1012 typedef __sanitizer::SizeClassMap<3, 4, 8, 38, 128, 16> SpecialSizeClassMap;
1013 template <typename AddressSpaceViewTy = LocalAddressSpaceView>
1014 struct AP64_SpecialSizeClassMap {
1015   static const vaddr kSpaceBeg = kAllocatorSpace;
1016   static const usize kSpaceSize = kAllocatorSize;
1017   static const usize kMetadataSize = 0;
1018   typedef SpecialSizeClassMap SizeClassMap;
1019   typedef NoOpMapUnmapCallback MapUnmapCallback;
1020   static const usize kFlags = 0;
1021   using AddressSpaceView = AddressSpaceViewTy;
1022 };
1023 
1024 // Regression test for out-of-memory condition in PopulateFreeList().
TEST(SanitizerCommon,SizeClassAllocator64PopulateFreeListOOM)1025 TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) {
1026   // In a world where regions are small and chunks are huge...
1027   typedef SizeClassAllocator64<AP64_SpecialSizeClassMap<>> SpecialAllocator64;
1028   const uptr kRegionSize =
1029       kAllocatorSize / SpecialSizeClassMap::kNumClassesRounded;
1030   SpecialAllocator64 *a = new SpecialAllocator64;
1031   a->Init(kReleaseToOSIntervalNever);
1032   SpecialAllocator64::AllocatorCache cache;
1033   memset(&cache, 0, sizeof(cache));
1034   cache.Init(0);
1035 
1036   // ...one man is on a mission to overflow a region with a series of
1037   // successive allocations.
1038 
1039   const uptr kClassID = 107;
1040   const uptr kAllocationSize = SpecialSizeClassMap::Size(kClassID);
1041   ASSERT_LT(2 * kAllocationSize, kRegionSize);
1042   ASSERT_GT(3 * kAllocationSize, kRegionSize);
1043   EXPECT_NE(cache.Allocate(a, kClassID), nullptr);
1044   EXPECT_NE(cache.Allocate(a, kClassID), nullptr);
1045   EXPECT_EQ(cache.Allocate(a, kClassID), nullptr);
1046 
1047   const uptr Class2 = 100;
1048   const uptr Size2 = SpecialSizeClassMap::Size(Class2);
1049   ASSERT_EQ(Size2 * 8, kRegionSize);
1050   char *p[7];
1051   for (int i = 0; i < 7; i++) {
1052     p[i] = (char*)cache.Allocate(a, Class2);
1053     EXPECT_NE(p[i], nullptr);
1054     fprintf(stderr, "p[%d] %p s = %lx\n", i, (void*)p[i], Size2);
1055     p[i][Size2 - 1] = 42;
1056     if (i) ASSERT_LT(p[i - 1], p[i]);
1057   }
1058   EXPECT_EQ(cache.Allocate(a, Class2), nullptr);
1059   cache.Deallocate(a, Class2, p[0]);
1060   cache.Drain(a);
1061   ASSERT_EQ(p[6][Size2 - 1], 42);
1062   a->TestOnlyUnmap();
1063   delete a;
1064 }
1065 
1066 #endif
1067 
1068 #if SANITIZER_CAN_USE_ALLOCATOR64
1069 
1070 class NoMemoryMapper {
1071  public:
1072   uptr last_request_buffer_size;
1073 
NoMemoryMapper()1074   NoMemoryMapper() : last_request_buffer_size(0) {}
1075 
MapPackedCounterArrayBuffer(uptr buffer_size)1076   uptr MapPackedCounterArrayBuffer(uptr buffer_size) {
1077     last_request_buffer_size = buffer_size;
1078     return 0;
1079   }
UnmapPackedCounterArrayBuffer(uptr buffer,uptr buffer_size)1080   void UnmapPackedCounterArrayBuffer(uptr buffer, uptr buffer_size) {}
1081 };
1082 
1083 class RedZoneMemoryMapper {
1084  public:
RedZoneMemoryMapper()1085   RedZoneMemoryMapper() {
1086     const auto page_size = GetPageSize();
1087     buffer = MmapOrDie(3ULL * page_size, "");
1088     MprotectNoAccess(reinterpret_cast<uptr>(buffer), page_size);
1089     MprotectNoAccess(reinterpret_cast<uptr>(buffer) + page_size * 2, page_size);
1090   }
~RedZoneMemoryMapper()1091   ~RedZoneMemoryMapper() {
1092     UnmapOrDie(buffer, 3 * GetPageSize());
1093   }
1094 
MapPackedCounterArrayBuffer(uptr buffer_size)1095   uptr MapPackedCounterArrayBuffer(uptr buffer_size) {
1096     const auto page_size = GetPageSize();
1097     CHECK_EQ(buffer_size, page_size);
1098     memset(reinterpret_cast<void*>(reinterpret_cast<uptr>(buffer) + page_size),
1099            0, page_size);
1100     return reinterpret_cast<uptr>(buffer) + page_size;
1101   }
UnmapPackedCounterArrayBuffer(uptr buffer,uptr buffer_size)1102   void UnmapPackedCounterArrayBuffer(uptr buffer, uptr buffer_size) {}
1103 
1104  private:
1105   void *buffer;
1106 };
1107 
TEST(SanitizerCommon,SizeClassAllocator64PackedCounterArray)1108 TEST(SanitizerCommon, SizeClassAllocator64PackedCounterArray) {
1109   NoMemoryMapper no_memory_mapper;
1110   typedef Allocator64::PackedCounterArray<NoMemoryMapper>
1111       NoMemoryPackedCounterArray;
1112 
1113   for (int i = 0; i < 64; i++) {
1114     // Various valid counter's max values packed into one word.
1115     NoMemoryPackedCounterArray counters_2n(1, 1ULL << i, &no_memory_mapper);
1116     EXPECT_EQ(8ULL, no_memory_mapper.last_request_buffer_size);
1117 
1118     // Check the "all bit set" values too.
1119     NoMemoryPackedCounterArray counters_2n1_1(1, ~0ULL >> i, &no_memory_mapper);
1120     EXPECT_EQ(8ULL, no_memory_mapper.last_request_buffer_size);
1121 
1122     // Verify the packing ratio, the counter is expected to be packed into the
1123     // closest power of 2 bits.
1124     NoMemoryPackedCounterArray counters(64, 1ULL << i, &no_memory_mapper);
1125     EXPECT_EQ(8ULL * RoundUpToPowerOfTwo(i + 1),
1126               no_memory_mapper.last_request_buffer_size);
1127   }
1128 
1129   RedZoneMemoryMapper memory_mapper;
1130   typedef Allocator64::PackedCounterArray<RedZoneMemoryMapper>
1131       RedZonePackedCounterArray;
1132   // Go through 1, 2, 4, 8, .. 64 bits per counter.
1133   for (int i = 0; i < 7; i++) {
1134     // Make sure counters request one memory page for the buffer.
1135     const u64 kNumCounters = (GetPageSize() / 8) * (64 >> i);
1136     RedZonePackedCounterArray counters(kNumCounters,
1137                                        1ULL << ((1 << i) - 1),
1138                                        &memory_mapper);
1139     counters.Inc(0);
1140     for (u64 c = 1; c < kNumCounters - 1; c++) {
1141       ASSERT_EQ(0ULL, counters.Get(c));
1142       counters.Inc(c);
1143       ASSERT_EQ(1ULL, counters.Get(c - 1));
1144     }
1145     ASSERT_EQ(0ULL, counters.Get(kNumCounters - 1));
1146     counters.Inc(kNumCounters - 1);
1147 
1148     if (i > 0) {
1149       counters.IncRange(0, kNumCounters - 1);
1150       for (u64 c = 0; c < kNumCounters; c++)
1151         ASSERT_EQ(2ULL, counters.Get(c));
1152     }
1153   }
1154 }
1155 
1156 class RangeRecorder {
1157  public:
1158   std::string reported_pages;
1159 
RangeRecorder()1160   RangeRecorder()
1161       : page_size_scaled_log(
1162             Log2(GetPageSizeCached() >> Allocator64::kCompactPtrScale)),
1163         last_page_reported(0) {}
1164 
ReleasePageRangeToOS(u32 from,u32 to)1165   void ReleasePageRangeToOS(u32 from, u32 to) {
1166     from >>= page_size_scaled_log;
1167     to >>= page_size_scaled_log;
1168     ASSERT_LT(from, to);
1169     if (!reported_pages.empty())
1170       ASSERT_LT(last_page_reported, from);
1171     reported_pages.append(from - last_page_reported, '.');
1172     reported_pages.append(to - from, 'x');
1173     last_page_reported = to;
1174   }
1175  private:
1176   const uptr page_size_scaled_log;
1177   u32 last_page_reported;
1178 };
1179 
TEST(SanitizerCommon,SizeClassAllocator64FreePagesRangeTracker)1180 TEST(SanitizerCommon, SizeClassAllocator64FreePagesRangeTracker) {
1181   typedef Allocator64::FreePagesRangeTracker<RangeRecorder> RangeTracker;
1182 
1183   // 'x' denotes a page to be released, '.' denotes a page to be kept around.
1184   const char* test_cases[] = {
1185       "",
1186       ".",
1187       "x",
1188       "........",
1189       "xxxxxxxxxxx",
1190       "..............xxxxx",
1191       "xxxxxxxxxxxxxxxxxx.....",
1192       "......xxxxxxxx........",
1193       "xxx..........xxxxxxxxxxxxxxx",
1194       "......xxxx....xxxx........",
1195       "xxx..........xxxxxxxx....xxxxxxx",
1196       "x.x.x.x.x.x.x.x.x.x.x.x.",
1197       ".x.x.x.x.x.x.x.x.x.x.x.x",
1198       ".x.x.x.x.x.x.x.x.x.x.x.x.",
1199       "x.x.x.x.x.x.x.x.x.x.x.x.x",
1200   };
1201 
1202   for (auto test_case : test_cases) {
1203     RangeRecorder range_recorder;
1204     RangeTracker tracker(&range_recorder);
1205     for (int i = 0; test_case[i] != 0; i++)
1206       tracker.NextPage(test_case[i] == 'x');
1207     tracker.Done();
1208     // Strip trailing '.'-pages before comparing the results as they are not
1209     // going to be reported to range_recorder anyway.
1210     const char* last_x = strrchr(test_case, 'x');
1211     std::string expected(
1212         test_case,
1213         last_x == nullptr ? 0 : (last_x - test_case + 1));
1214     EXPECT_STREQ(expected.c_str(), range_recorder.reported_pages.c_str());
1215   }
1216 }
1217 
1218 class ReleasedPagesTrackingMemoryMapper {
1219  public:
1220   std::set<u32> reported_pages;
1221 
MapPackedCounterArrayBuffer(uptr buffer_size)1222   uptr MapPackedCounterArrayBuffer(uptr buffer_size) {
1223     reported_pages.clear();
1224     return reinterpret_cast<uptr>(calloc(1, buffer_size));
1225   }
UnmapPackedCounterArrayBuffer(uptr buffer,uptr buffer_size)1226   void UnmapPackedCounterArrayBuffer(uptr buffer, uptr buffer_size) {
1227     free(reinterpret_cast<void*>(buffer));
1228   }
1229 
ReleasePageRangeToOS(u32 from,u32 to)1230   void ReleasePageRangeToOS(u32 from, u32 to) {
1231     uptr page_size_scaled =
1232         GetPageSizeCached() >> Allocator64::kCompactPtrScale;
1233     for (u32 i = from; i < to; i += page_size_scaled)
1234       reported_pages.insert(i);
1235   }
1236 };
1237 
1238 template <class Allocator>
TestReleaseFreeMemoryToOS()1239 void TestReleaseFreeMemoryToOS() {
1240   ReleasedPagesTrackingMemoryMapper memory_mapper;
1241   const uptr kAllocatedPagesCount = 1024;
1242   const uptr page_size = GetPageSizeCached();
1243   const uptr page_size_scaled = page_size >> Allocator::kCompactPtrScale;
1244   std::mt19937 r;
1245   uint32_t rnd_state = 42;
1246 
1247   for (uptr class_id = 1; class_id <= Allocator::SizeClassMapT::kLargestClassID;
1248       class_id++) {
1249     const uptr chunk_size = Allocator::SizeClassMapT::Size(class_id);
1250     const uptr chunk_size_scaled = chunk_size >> Allocator::kCompactPtrScale;
1251     const uptr max_chunks =
1252         kAllocatedPagesCount * GetPageSizeCached() / chunk_size;
1253 
1254     // Generate the random free list.
1255     std::vector<u32> free_array;
1256     bool in_free_range = false;
1257     uptr current_range_end = 0;
1258     for (uptr i = 0; i < max_chunks; i++) {
1259       if (i == current_range_end) {
1260         in_free_range = (my_rand_r(&rnd_state) & 1U) == 1;
1261         current_range_end += my_rand_r(&rnd_state) % 100 + 1;
1262       }
1263       if (in_free_range)
1264         free_array.push_back(i * chunk_size_scaled);
1265     }
1266     if (free_array.empty())
1267       continue;
1268     // Shuffle free_list to verify that ReleaseFreeMemoryToOS does not depend on
1269     // the list ordering.
1270     std::shuffle(free_array.begin(), free_array.end(), r);
1271 
1272     Allocator::ReleaseFreeMemoryToOS(&free_array[0], free_array.size(),
1273                                      chunk_size, kAllocatedPagesCount,
1274                                      &memory_mapper);
1275 
1276     // Verify that there are no released pages touched by used chunks and all
1277     // ranges of free chunks big enough to contain the entire memory pages had
1278     // these pages released.
1279     uptr verified_released_pages = 0;
1280     std::set<u32> free_chunks(free_array.begin(), free_array.end());
1281 
1282     u32 current_chunk = 0;
1283     in_free_range = false;
1284     u32 current_free_range_start = 0;
1285     for (uptr i = 0; i <= max_chunks; i++) {
1286       bool is_free_chunk = free_chunks.find(current_chunk) != free_chunks.end();
1287 
1288       if (is_free_chunk) {
1289         if (!in_free_range) {
1290           in_free_range = true;
1291           current_free_range_start = current_chunk;
1292         }
1293       } else {
1294         // Verify that this used chunk does not touch any released page.
1295         for (uptr i_page = current_chunk / page_size_scaled;
1296              i_page <= (current_chunk + chunk_size_scaled - 1) /
1297                        page_size_scaled;
1298              i_page++) {
1299           bool page_released =
1300               memory_mapper.reported_pages.find(i_page * page_size_scaled) !=
1301               memory_mapper.reported_pages.end();
1302           ASSERT_EQ(false, page_released);
1303         }
1304 
1305         if (in_free_range) {
1306           in_free_range = false;
1307           // Verify that all entire memory pages covered by this range of free
1308           // chunks were released.
1309           u32 page = RoundUpTo(current_free_range_start, page_size_scaled);
1310           while (page + page_size_scaled <= current_chunk) {
1311             bool page_released =
1312                 memory_mapper.reported_pages.find(page) !=
1313                 memory_mapper.reported_pages.end();
1314             ASSERT_EQ(true, page_released);
1315             verified_released_pages++;
1316             page += page_size_scaled;
1317           }
1318         }
1319       }
1320 
1321       current_chunk += chunk_size_scaled;
1322     }
1323 
1324     ASSERT_EQ(memory_mapper.reported_pages.size(), verified_released_pages);
1325   }
1326 }
1327 
TEST(SanitizerCommon,SizeClassAllocator64ReleaseFreeMemoryToOS)1328 TEST(SanitizerCommon, SizeClassAllocator64ReleaseFreeMemoryToOS) {
1329   TestReleaseFreeMemoryToOS<Allocator64>();
1330 }
1331 
1332 #if !SANITIZER_ANDROID
TEST(SanitizerCommon,SizeClassAllocator64CompactReleaseFreeMemoryToOS)1333 TEST(SanitizerCommon, SizeClassAllocator64CompactReleaseFreeMemoryToOS) {
1334   TestReleaseFreeMemoryToOS<Allocator64Compact>();
1335 }
1336 
TEST(SanitizerCommon,SizeClassAllocator64VeryCompactReleaseFreeMemoryToOS)1337 TEST(SanitizerCommon, SizeClassAllocator64VeryCompactReleaseFreeMemoryToOS) {
1338   TestReleaseFreeMemoryToOS<Allocator64VeryCompact>();
1339 }
1340 #endif  // !SANITIZER_ANDROID
1341 
1342 #endif  // SANITIZER_CAN_USE_ALLOCATOR64
1343 
TEST(SanitizerCommon,TwoLevelByteMap)1344 TEST(SanitizerCommon, TwoLevelByteMap) {
1345   const u64 kSize1 = 1 << 6, kSize2 = 1 << 12;
1346   const u64 n = kSize1 * kSize2;
1347   TwoLevelByteMap<kSize1, kSize2> m;
1348   m.Init();
1349   for (u64 i = 0; i < n; i += 7) {
1350     m.set(i, (i % 100) + 1);
1351   }
1352   for (u64 j = 0; j < n; j++) {
1353     if (j % 7)
1354       EXPECT_EQ(m[j], 0);
1355     else
1356       EXPECT_EQ(m[j], (j % 100) + 1);
1357   }
1358 
1359   m.TestOnlyUnmap();
1360 }
1361 
1362 template <typename AddressSpaceView>
1363 using TestByteMapASVT =
1364     TwoLevelByteMap<1 << 12, 1 << 13, AddressSpaceView, TestMapUnmapCallback>;
1365 using TestByteMap = TestByteMapASVT<LocalAddressSpaceView>;
1366 
1367 struct TestByteMapParam {
1368   TestByteMap *m;
1369   size_t shard;
1370   size_t num_shards;
1371 };
1372 
TwoLevelByteMapUserThread(void * param)1373 void *TwoLevelByteMapUserThread(void *param) {
1374   TestByteMapParam *p = (TestByteMapParam*)param;
1375   for (size_t i = p->shard; i < p->m->size(); i += p->num_shards) {
1376     size_t val = (i % 100) + 1;
1377     p->m->set(i, val);
1378     EXPECT_EQ((*p->m)[i], val);
1379   }
1380   return 0;
1381 }
1382 
TEST(SanitizerCommon,ThreadedTwoLevelByteMap)1383 TEST(SanitizerCommon, ThreadedTwoLevelByteMap) {
1384   TestByteMap m;
1385   m.Init();
1386   TestMapUnmapCallback::map_count = 0;
1387   TestMapUnmapCallback::unmap_count = 0;
1388   static const int kNumThreads = 4;
1389   pthread_t t[kNumThreads];
1390   TestByteMapParam p[kNumThreads];
1391   for (int i = 0; i < kNumThreads; i++) {
1392     p[i].m = &m;
1393     p[i].shard = i;
1394     p[i].num_shards = kNumThreads;
1395     PTHREAD_CREATE(&t[i], 0, TwoLevelByteMapUserThread, &p[i]);
1396   }
1397   for (int i = 0; i < kNumThreads; i++) {
1398     PTHREAD_JOIN(t[i], 0);
1399   }
1400   EXPECT_EQ((uptr)TestMapUnmapCallback::map_count, m.size1());
1401   EXPECT_EQ((uptr)TestMapUnmapCallback::unmap_count, 0UL);
1402   m.TestOnlyUnmap();
1403   EXPECT_EQ((uptr)TestMapUnmapCallback::map_count, m.size1());
1404   EXPECT_EQ((uptr)TestMapUnmapCallback::unmap_count, m.size1());
1405 }
1406 
TEST(SanitizerCommon,LowLevelAllocatorShouldRoundUpSizeOnAlloc)1407 TEST(SanitizerCommon, LowLevelAllocatorShouldRoundUpSizeOnAlloc) {
1408   // When allocating a memory block slightly bigger than a memory page and
1409   // LowLevelAllocator calls MmapOrDie for the internal buffer, it should round
1410   // the size up to the page size, so that subsequent calls to the allocator
1411   // can use the remaining space in the last allocated page.
1412   static LowLevelAllocator allocator;
1413   char *ptr1 = (char *)allocator.Allocate(GetPageSizeCached() + 16);
1414   char *ptr2 = (char *)allocator.Allocate(16);
1415   EXPECT_EQ(ptr2, ptr1 + GetPageSizeCached() + 16);
1416 }
1417 
1418 #endif  // #if !SANITIZER_DEBUG
1419