1 //===-- sanitizer_allocator_test.cpp --------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
10 // Tests for sanitizer_allocator.h.
11 //
12 //===----------------------------------------------------------------------===//
13 #include "sanitizer_common/sanitizer_allocator.h"
14 #include "sanitizer_common/sanitizer_allocator_internal.h"
15 #include "sanitizer_common/sanitizer_common.h"
16 
17 #include "sanitizer_test_utils.h"
18 #include "sanitizer_pthread_wrappers.h"
19 
20 #include "gtest/gtest.h"
21 
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <algorithm>
25 #include <vector>
26 #include <random>
27 #include <set>
28 
29 using namespace __sanitizer;
30 
31 #if SANITIZER_SOLARIS && defined(__sparcv9)
32 // FIXME: These tests probably fail because Solaris/sparcv9 uses the full
33 // 64-bit address space.  Needs more investigation
34 #define SKIP_ON_SOLARIS_SPARCV9(x) DISABLED_##x
35 #else
36 #define SKIP_ON_SOLARIS_SPARCV9(x) x
37 #endif
38 
39 // On 64-bit systems with small virtual address spaces (e.g. 39-bit) we can't
40 // use size class maps with a large number of classes, as that will make the
41 // SizeClassAllocator64 region size too small (< 2^32).
42 #if SANITIZER_ANDROID && defined(__aarch64__)
43 #define ALLOCATOR64_SMALL_SIZE 1
44 #elif SANITIZER_RISCV64
45 #define ALLOCATOR64_SMALL_SIZE 1
46 #else
47 #define ALLOCATOR64_SMALL_SIZE 0
48 #endif
49 
50 // Too slow for debug build
51 #if !SANITIZER_DEBUG
52 
53 #if SANITIZER_CAN_USE_ALLOCATOR64
54 #if SANITIZER_WINDOWS
55 // On Windows 64-bit there is no easy way to find a large enough fixed address
56 // space that is always available. Thus, a dynamically allocated address space
57 // is used instead (i.e. ~(uptr)0).
58 static const uptr kAllocatorSpace = ~(uptr)0;
59 static const uptr kAllocatorSize  =  0x8000000000ULL;  // 500G
60 static const u64 kAddressSpaceSize = 1ULL << 47;
61 typedef DefaultSizeClassMap SizeClassMap;
62 #elif SANITIZER_ANDROID && defined(__aarch64__)
63 static const uptr kAllocatorSpace = 0x3000000000ULL;
64 static const uptr kAllocatorSize  = 0x2000000000ULL;
65 static const u64 kAddressSpaceSize = 1ULL << 39;
66 typedef VeryCompactSizeClassMap SizeClassMap;
67 #elif SANITIZER_RISCV64
68 const uptr kAllocatorSpace = ~(uptr)0;
69 const uptr kAllocatorSize = 0x2000000000ULL;  // 128G.
70 static const u64 kAddressSpaceSize = 1ULL << 38;
71 typedef VeryDenseSizeClassMap SizeClassMap;
72 #else
73 static const uptr kAllocatorSpace = 0x700000000000ULL;
74 static const uptr kAllocatorSize  = 0x010000000000ULL;  // 1T.
75 static const u64 kAddressSpaceSize = 1ULL << 47;
76 typedef DefaultSizeClassMap SizeClassMap;
77 #endif
78 
79 template <typename AddressSpaceViewTy>
80 struct AP64 {  // Allocator Params. Short name for shorter demangled names..
81   static const uptr kSpaceBeg = kAllocatorSpace;
82   static const uptr kSpaceSize = kAllocatorSize;
83   static const uptr kMetadataSize = 16;
84   typedef ::SizeClassMap SizeClassMap;
85   typedef NoOpMapUnmapCallback MapUnmapCallback;
86   static const uptr kFlags = 0;
87   using AddressSpaceView = AddressSpaceViewTy;
88 };
89 
90 template <typename AddressSpaceViewTy>
91 struct AP64Dyn {
92   static const uptr kSpaceBeg = ~(uptr)0;
93   static const uptr kSpaceSize = kAllocatorSize;
94   static const uptr kMetadataSize = 16;
95   typedef ::SizeClassMap SizeClassMap;
96   typedef NoOpMapUnmapCallback MapUnmapCallback;
97   static const uptr kFlags = 0;
98   using AddressSpaceView = AddressSpaceViewTy;
99 };
100 
101 template <typename AddressSpaceViewTy>
102 struct AP64Compact {
103   static const uptr kSpaceBeg = ~(uptr)0;
104   static const uptr kSpaceSize = kAllocatorSize;
105   static const uptr kMetadataSize = 16;
106   typedef CompactSizeClassMap SizeClassMap;
107   typedef NoOpMapUnmapCallback MapUnmapCallback;
108   static const uptr kFlags = 0;
109   using AddressSpaceView = AddressSpaceViewTy;
110 };
111 
112 template <typename AddressSpaceViewTy>
113 struct AP64VeryCompact {
114   static const uptr kSpaceBeg = ~(uptr)0;
115   static const uptr kSpaceSize = 1ULL << 37;
116   static const uptr kMetadataSize = 16;
117   typedef VeryCompactSizeClassMap SizeClassMap;
118   typedef NoOpMapUnmapCallback MapUnmapCallback;
119   static const uptr kFlags = 0;
120   using AddressSpaceView = AddressSpaceViewTy;
121 };
122 
123 template <typename AddressSpaceViewTy>
124 struct AP64Dense {
125   static const uptr kSpaceBeg = kAllocatorSpace;
126   static const uptr kSpaceSize = kAllocatorSize;
127   static const uptr kMetadataSize = 16;
128   typedef DenseSizeClassMap SizeClassMap;
129   typedef NoOpMapUnmapCallback MapUnmapCallback;
130   static const uptr kFlags = 0;
131   using AddressSpaceView = AddressSpaceViewTy;
132 };
133 
134 template <typename AddressSpaceView>
135 using Allocator64ASVT = SizeClassAllocator64<AP64<AddressSpaceView>>;
136 using Allocator64 = Allocator64ASVT<LocalAddressSpaceView>;
137 
138 template <typename AddressSpaceView>
139 using Allocator64DynamicASVT = SizeClassAllocator64<AP64Dyn<AddressSpaceView>>;
140 using Allocator64Dynamic = Allocator64DynamicASVT<LocalAddressSpaceView>;
141 
142 template <typename AddressSpaceView>
143 using Allocator64CompactASVT =
144     SizeClassAllocator64<AP64Compact<AddressSpaceView>>;
145 using Allocator64Compact = Allocator64CompactASVT<LocalAddressSpaceView>;
146 
147 template <typename AddressSpaceView>
148 using Allocator64VeryCompactASVT =
149     SizeClassAllocator64<AP64VeryCompact<AddressSpaceView>>;
150 using Allocator64VeryCompact =
151     Allocator64VeryCompactASVT<LocalAddressSpaceView>;
152 
153 template <typename AddressSpaceView>
154 using Allocator64DenseASVT = SizeClassAllocator64<AP64Dense<AddressSpaceView>>;
155 using Allocator64Dense = Allocator64DenseASVT<LocalAddressSpaceView>;
156 
157 #elif defined(__mips64)
158 static const u64 kAddressSpaceSize = 1ULL << 40;
159 #elif defined(__aarch64__)
160 static const u64 kAddressSpaceSize = 1ULL << 39;
161 #elif defined(__s390x__)
162 static const u64 kAddressSpaceSize = 1ULL << 53;
163 #elif defined(__s390__)
164 static const u64 kAddressSpaceSize = 1ULL << 31;
165 #else
166 static const u64 kAddressSpaceSize = 1ULL << 32;
167 #endif
168 
169 static const uptr kRegionSizeLog = FIRST_32_SECOND_64(20, 24);
170 
171 template <typename AddressSpaceViewTy>
172 struct AP32Compact {
173   static const uptr kSpaceBeg = 0;
174   static const u64 kSpaceSize = kAddressSpaceSize;
175   static const uptr kMetadataSize = 16;
176   typedef CompactSizeClassMap SizeClassMap;
177   static const uptr kRegionSizeLog = ::kRegionSizeLog;
178   using AddressSpaceView = AddressSpaceViewTy;
179   typedef NoOpMapUnmapCallback MapUnmapCallback;
180   static const uptr kFlags = 0;
181 };
182 template <typename AddressSpaceView>
183 using Allocator32CompactASVT =
184     SizeClassAllocator32<AP32Compact<AddressSpaceView>>;
185 using Allocator32Compact = Allocator32CompactASVT<LocalAddressSpaceView>;
186 
187 template <class SizeClassMap>
TestSizeClassMap()188 void TestSizeClassMap() {
189   typedef SizeClassMap SCMap;
190   SCMap::Print();
191   SCMap::Validate();
192 }
193 
TEST(SanitizerCommon,DefaultSizeClassMap)194 TEST(SanitizerCommon, DefaultSizeClassMap) {
195   TestSizeClassMap<DefaultSizeClassMap>();
196 }
197 
TEST(SanitizerCommon,CompactSizeClassMap)198 TEST(SanitizerCommon, CompactSizeClassMap) {
199   TestSizeClassMap<CompactSizeClassMap>();
200 }
201 
TEST(SanitizerCommon,VeryCompactSizeClassMap)202 TEST(SanitizerCommon, VeryCompactSizeClassMap) {
203   TestSizeClassMap<VeryCompactSizeClassMap>();
204 }
205 
TEST(SanitizerCommon,InternalSizeClassMap)206 TEST(SanitizerCommon, InternalSizeClassMap) {
207   TestSizeClassMap<InternalSizeClassMap>();
208 }
209 
TEST(SanitizerCommon,DenseSizeClassMap)210 TEST(SanitizerCommon, DenseSizeClassMap) {
211   TestSizeClassMap<VeryCompactSizeClassMap>();
212 }
213 
214 template <class Allocator>
TestSizeClassAllocator(uptr premapped_heap=0)215 void TestSizeClassAllocator(uptr premapped_heap = 0) {
216   Allocator *a = new Allocator;
217   a->Init(kReleaseToOSIntervalNever, premapped_heap);
218   typename Allocator::AllocatorCache cache;
219   memset(&cache, 0, sizeof(cache));
220   cache.Init(0);
221 
222   static const uptr sizes[] = {
223     1, 16,  30, 40, 100, 1000, 10000,
224     50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000
225   };
226 
227   std::vector<void *> allocated;
228 
229   uptr last_total_allocated = 0;
230   for (int i = 0; i < 3; i++) {
231     // Allocate a bunch of chunks.
232     for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
233       uptr size = sizes[s];
234       if (!a->CanAllocate(size, 1)) continue;
235       // printf("s = %ld\n", size);
236       uptr n_iter = std::max((uptr)6, 4000000 / size);
237       // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
238       for (uptr i = 0; i < n_iter; i++) {
239         uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
240         char *x = (char*)cache.Allocate(a, class_id0);
241         x[0] = 0;
242         x[size - 1] = 0;
243         x[size / 2] = 0;
244         allocated.push_back(x);
245         CHECK_EQ(x, a->GetBlockBegin(x));
246         CHECK_EQ(x, a->GetBlockBegin(x + size - 1));
247         CHECK(a->PointerIsMine(x));
248         CHECK(a->PointerIsMine(x + size - 1));
249         CHECK(a->PointerIsMine(x + size / 2));
250         CHECK_GE(a->GetActuallyAllocatedSize(x), size);
251         uptr class_id = a->GetSizeClass(x);
252         CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size));
253         uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
254         metadata[0] = reinterpret_cast<uptr>(x) + 1;
255         metadata[1] = 0xABCD;
256       }
257     }
258     // Deallocate all.
259     for (uptr i = 0; i < allocated.size(); i++) {
260       void *x = allocated[i];
261       uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
262       CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1);
263       CHECK_EQ(metadata[1], 0xABCD);
264       cache.Deallocate(a, a->GetSizeClass(x), x);
265     }
266     allocated.clear();
267     uptr total_allocated = a->TotalMemoryUsed();
268     if (last_total_allocated == 0)
269       last_total_allocated = total_allocated;
270     CHECK_EQ(last_total_allocated, total_allocated);
271   }
272 
273   // Check that GetBlockBegin never crashes.
274   for (uptr x = 0, step = kAddressSpaceSize / 100000;
275        x < kAddressSpaceSize - step; x += step)
276     if (a->PointerIsMine(reinterpret_cast<void *>(x)))
277       Ident(a->GetBlockBegin(reinterpret_cast<void *>(x)));
278 
279   a->TestOnlyUnmap();
280   delete a;
281 }
282 
283 #if SANITIZER_CAN_USE_ALLOCATOR64
284 
285 // Allocates kAllocatorSize aligned bytes on construction and frees it on
286 // destruction.
287 class ScopedPremappedHeap {
288  public:
ScopedPremappedHeap()289   ScopedPremappedHeap() {
290     BasePtr = MmapNoReserveOrDie(2 * kAllocatorSize, "preallocated heap");
291     AlignedAddr = RoundUpTo(reinterpret_cast<uptr>(BasePtr), kAllocatorSize);
292   }
293 
~ScopedPremappedHeap()294   ~ScopedPremappedHeap() { UnmapOrDie(BasePtr, kAllocatorSize); }
295 
Addr()296   uptr Addr() { return AlignedAddr; }
297 
298  private:
299   void *BasePtr;
300   uptr AlignedAddr;
301 };
302 
303 // These tests can fail on Windows if memory is somewhat full and lit happens
304 // to run them all at the same time. FIXME: Make them not flaky and reenable.
305 #if !SANITIZER_WINDOWS
TEST(SanitizerCommon,SizeClassAllocator64)306 TEST(SanitizerCommon, SizeClassAllocator64) {
307   TestSizeClassAllocator<Allocator64>();
308 }
309 
TEST(SanitizerCommon,SizeClassAllocator64Dynamic)310 TEST(SanitizerCommon, SizeClassAllocator64Dynamic) {
311   TestSizeClassAllocator<Allocator64Dynamic>();
312 }
313 
314 #if !ALLOCATOR64_SMALL_SIZE
315 // Android only has 39-bit address space, so mapping 2 * kAllocatorSize
316 // sometimes fails.
TEST(SanitizerCommon,SizeClassAllocator64DynamicPremapped)317 TEST(SanitizerCommon, SizeClassAllocator64DynamicPremapped) {
318   ScopedPremappedHeap h;
319   TestSizeClassAllocator<Allocator64Dynamic>(h.Addr());
320 }
321 
TEST(SanitizerCommon,SizeClassAllocator64Compact)322 TEST(SanitizerCommon, SizeClassAllocator64Compact) {
323   TestSizeClassAllocator<Allocator64Compact>();
324 }
325 
TEST(SanitizerCommon,SizeClassAllocator64Dense)326 TEST(SanitizerCommon, SizeClassAllocator64Dense) {
327   TestSizeClassAllocator<Allocator64Dense>();
328 }
329 #endif
330 
TEST(SanitizerCommon,SizeClassAllocator64VeryCompact)331 TEST(SanitizerCommon, SizeClassAllocator64VeryCompact) {
332   TestSizeClassAllocator<Allocator64VeryCompact>();
333 }
334 #endif
335 #endif
336 
TEST(SanitizerCommon,SizeClassAllocator32Compact)337 TEST(SanitizerCommon, SizeClassAllocator32Compact) {
338   TestSizeClassAllocator<Allocator32Compact>();
339 }
340 
341 template <typename AddressSpaceViewTy>
342 struct AP32SeparateBatches {
343   static const uptr kSpaceBeg = 0;
344   static const u64 kSpaceSize = kAddressSpaceSize;
345   static const uptr kMetadataSize = 16;
346   typedef DefaultSizeClassMap SizeClassMap;
347   static const uptr kRegionSizeLog = ::kRegionSizeLog;
348   using AddressSpaceView = AddressSpaceViewTy;
349   typedef NoOpMapUnmapCallback MapUnmapCallback;
350   static const uptr kFlags =
351       SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
352 };
353 template <typename AddressSpaceView>
354 using Allocator32SeparateBatchesASVT =
355     SizeClassAllocator32<AP32SeparateBatches<AddressSpaceView>>;
356 using Allocator32SeparateBatches =
357     Allocator32SeparateBatchesASVT<LocalAddressSpaceView>;
358 
TEST(SanitizerCommon,SizeClassAllocator32SeparateBatches)359 TEST(SanitizerCommon, SizeClassAllocator32SeparateBatches) {
360   TestSizeClassAllocator<Allocator32SeparateBatches>();
361 }
362 
363 template <class Allocator>
SizeClassAllocatorMetadataStress(uptr premapped_heap=0)364 void SizeClassAllocatorMetadataStress(uptr premapped_heap = 0) {
365   Allocator *a = new Allocator;
366   a->Init(kReleaseToOSIntervalNever, premapped_heap);
367   typename Allocator::AllocatorCache cache;
368   memset(&cache, 0, sizeof(cache));
369   cache.Init(0);
370 
371   const uptr kNumAllocs = 1 << 13;
372   void *allocated[kNumAllocs];
373   void *meta[kNumAllocs];
374   for (uptr i = 0; i < kNumAllocs; i++) {
375     void *x = cache.Allocate(a, 1 + i % (Allocator::kNumClasses - 1));
376     allocated[i] = x;
377     meta[i] = a->GetMetaData(x);
378   }
379   // Get Metadata kNumAllocs^2 times.
380   for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
381     uptr idx = i % kNumAllocs;
382     void *m = a->GetMetaData(allocated[idx]);
383     EXPECT_EQ(m, meta[idx]);
384   }
385   for (uptr i = 0; i < kNumAllocs; i++) {
386     cache.Deallocate(a, 1 + i % (Allocator::kNumClasses - 1), allocated[i]);
387   }
388 
389   a->TestOnlyUnmap();
390   delete a;
391 }
392 
393 #if SANITIZER_CAN_USE_ALLOCATOR64
394 // These tests can fail on Windows if memory is somewhat full and lit happens
395 // to run them all at the same time. FIXME: Make them not flaky and reenable.
396 #if !SANITIZER_WINDOWS
TEST(SanitizerCommon,SizeClassAllocator64MetadataStress)397 TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {
398   SizeClassAllocatorMetadataStress<Allocator64>();
399 }
400 
TEST(SanitizerCommon,SizeClassAllocator64DynamicMetadataStress)401 TEST(SanitizerCommon, SizeClassAllocator64DynamicMetadataStress) {
402   SizeClassAllocatorMetadataStress<Allocator64Dynamic>();
403 }
404 
405 #if !ALLOCATOR64_SMALL_SIZE
TEST(SanitizerCommon,SizeClassAllocator64DynamicPremappedMetadataStress)406 TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedMetadataStress) {
407   ScopedPremappedHeap h;
408   SizeClassAllocatorMetadataStress<Allocator64Dynamic>(h.Addr());
409 }
410 
TEST(SanitizerCommon,SizeClassAllocator64CompactMetadataStress)411 TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
412   SizeClassAllocatorMetadataStress<Allocator64Compact>();
413 }
414 #endif
415 
416 #endif
417 #endif  // SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon,SizeClassAllocator32CompactMetadataStress)418 TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {
419   SizeClassAllocatorMetadataStress<Allocator32Compact>();
420 }
421 
422 template <class Allocator>
SizeClassAllocatorGetBlockBeginStress(u64 TotalSize,uptr premapped_heap=0)423 void SizeClassAllocatorGetBlockBeginStress(u64 TotalSize,
424                                            uptr premapped_heap = 0) {
425   Allocator *a = new Allocator;
426   a->Init(kReleaseToOSIntervalNever, premapped_heap);
427   typename Allocator::AllocatorCache cache;
428   memset(&cache, 0, sizeof(cache));
429   cache.Init(0);
430 
431   uptr max_size_class = Allocator::SizeClassMapT::kLargestClassID;
432   uptr size = Allocator::SizeClassMapT::Size(max_size_class);
433   // Make sure we correctly compute GetBlockBegin() w/o overflow.
434   for (size_t i = 0; i <= TotalSize / size; i++) {
435     void *x = cache.Allocate(a, max_size_class);
436     void *beg = a->GetBlockBegin(x);
437     // if ((i & (i - 1)) == 0)
438     //   fprintf(stderr, "[%zd] %p %p\n", i, x, beg);
439     EXPECT_EQ(x, beg);
440   }
441 
442   a->TestOnlyUnmap();
443   delete a;
444 }
445 
446 #if SANITIZER_CAN_USE_ALLOCATOR64
447 // These tests can fail on Windows if memory is somewhat full and lit happens
448 // to run them all at the same time. FIXME: Make them not flaky and reenable.
449 #if !SANITIZER_WINDOWS
TEST(SanitizerCommon,SizeClassAllocator64GetBlockBegin)450 TEST(SanitizerCommon, SizeClassAllocator64GetBlockBegin) {
451   SizeClassAllocatorGetBlockBeginStress<Allocator64>(
452       1ULL << (SANITIZER_ANDROID ? 31 : 33));
453 }
TEST(SanitizerCommon,SizeClassAllocator64DynamicGetBlockBegin)454 TEST(SanitizerCommon, SizeClassAllocator64DynamicGetBlockBegin) {
455   SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>(
456       1ULL << (SANITIZER_ANDROID ? 31 : 33));
457 }
458 #if !ALLOCATOR64_SMALL_SIZE
TEST(SanitizerCommon,SizeClassAllocator64DynamicPremappedGetBlockBegin)459 TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedGetBlockBegin) {
460   ScopedPremappedHeap h;
461   SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>(
462       1ULL << (SANITIZER_ANDROID ? 31 : 33), h.Addr());
463 }
TEST(SanitizerCommon,SizeClassAllocator64CompactGetBlockBegin)464 TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) {
465   SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>(1ULL << 33);
466 }
467 #endif
TEST(SanitizerCommon,SizeClassAllocator64VeryCompactGetBlockBegin)468 TEST(SanitizerCommon, SizeClassAllocator64VeryCompactGetBlockBegin) {
469   // Does not have > 4Gb for each class.
470   SizeClassAllocatorGetBlockBeginStress<Allocator64VeryCompact>(1ULL << 31);
471 }
TEST(SanitizerCommon,SizeClassAllocator32CompactGetBlockBegin)472 TEST(SanitizerCommon, SizeClassAllocator32CompactGetBlockBegin) {
473   SizeClassAllocatorGetBlockBeginStress<Allocator32Compact>(1ULL << 33);
474 }
475 #endif
476 #endif  // SANITIZER_CAN_USE_ALLOCATOR64
477 
478 struct TestMapUnmapCallback {
479   static int map_count, unmap_count;
OnMapTestMapUnmapCallback480   void OnMap(uptr p, uptr size) const { map_count++; }
OnUnmapTestMapUnmapCallback481   void OnUnmap(uptr p, uptr size) const { unmap_count++; }
482 };
483 int TestMapUnmapCallback::map_count;
484 int TestMapUnmapCallback::unmap_count;
485 
486 #if SANITIZER_CAN_USE_ALLOCATOR64
487 // These tests can fail on Windows if memory is somewhat full and lit happens
488 // to run them all at the same time. FIXME: Make them not flaky and reenable.
489 #if !SANITIZER_WINDOWS
490 
491 template <typename AddressSpaceViewTy = LocalAddressSpaceView>
492 struct AP64WithCallback {
493   static const uptr kSpaceBeg = kAllocatorSpace;
494   static const uptr kSpaceSize = kAllocatorSize;
495   static const uptr kMetadataSize = 16;
496   typedef ::SizeClassMap SizeClassMap;
497   typedef TestMapUnmapCallback MapUnmapCallback;
498   static const uptr kFlags = 0;
499   using AddressSpaceView = AddressSpaceViewTy;
500 };
501 
TEST(SanitizerCommon,SizeClassAllocator64MapUnmapCallback)502 TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) {
503   TestMapUnmapCallback::map_count = 0;
504   TestMapUnmapCallback::unmap_count = 0;
505   typedef SizeClassAllocator64<AP64WithCallback<>> Allocator64WithCallBack;
506   Allocator64WithCallBack *a = new Allocator64WithCallBack;
507   a->Init(kReleaseToOSIntervalNever);
508   EXPECT_EQ(TestMapUnmapCallback::map_count, 1);  // Allocator state.
509   typename Allocator64WithCallBack::AllocatorCache cache;
510   memset(&cache, 0, sizeof(cache));
511   cache.Init(0);
512   AllocatorStats stats;
513   stats.Init();
514   const size_t kNumChunks = 128;
515   uint32_t chunks[kNumChunks];
516   a->GetFromAllocator(&stats, 30, chunks, kNumChunks);
517   // State + alloc + metadata + freearray.
518   EXPECT_EQ(TestMapUnmapCallback::map_count, 4);
519   a->TestOnlyUnmap();
520   EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);  // The whole thing.
521   delete a;
522 }
523 #endif
524 #endif
525 
526 template <typename AddressSpaceViewTy = LocalAddressSpaceView>
527 struct AP32WithCallback {
528   static const uptr kSpaceBeg = 0;
529   static const u64 kSpaceSize = kAddressSpaceSize;
530   static const uptr kMetadataSize = 16;
531   typedef CompactSizeClassMap SizeClassMap;
532   static const uptr kRegionSizeLog = ::kRegionSizeLog;
533   using AddressSpaceView = AddressSpaceViewTy;
534   typedef TestMapUnmapCallback MapUnmapCallback;
535   static const uptr kFlags = 0;
536 };
537 
TEST(SanitizerCommon,SizeClassAllocator32MapUnmapCallback)538 TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
539   TestMapUnmapCallback::map_count = 0;
540   TestMapUnmapCallback::unmap_count = 0;
541   typedef SizeClassAllocator32<AP32WithCallback<>> Allocator32WithCallBack;
542   Allocator32WithCallBack *a = new Allocator32WithCallBack;
543   a->Init(kReleaseToOSIntervalNever);
544   EXPECT_EQ(TestMapUnmapCallback::map_count, 0);
545   Allocator32WithCallBack::AllocatorCache cache;
546   memset(&cache, 0, sizeof(cache));
547   cache.Init(0);
548   AllocatorStats stats;
549   stats.Init();
550   a->AllocateBatch(&stats, &cache, 32);
551   EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
552   a->TestOnlyUnmap();
553   EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
554   delete a;
555   // fprintf(stderr, "Map: %d Unmap: %d\n",
556   //         TestMapUnmapCallback::map_count,
557   //         TestMapUnmapCallback::unmap_count);
558 }
559 
TEST(SanitizerCommon,LargeMmapAllocatorMapUnmapCallback)560 TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
561   TestMapUnmapCallback::map_count = 0;
562   TestMapUnmapCallback::unmap_count = 0;
563   LargeMmapAllocator<TestMapUnmapCallback> a;
564   a.Init();
565   AllocatorStats stats;
566   stats.Init();
567   void *x = a.Allocate(&stats, 1 << 20, 1);
568   EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
569   a.Deallocate(&stats, x);
570   EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
571 }
572 
573 // Don't test OOM conditions on Win64 because it causes other tests on the same
574 // machine to OOM.
575 #if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64
TEST(SanitizerCommon,SizeClassAllocator64Overflow)576 TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
577   Allocator64 a;
578   a.Init(kReleaseToOSIntervalNever);
579   Allocator64::AllocatorCache cache;
580   memset(&cache, 0, sizeof(cache));
581   cache.Init(0);
582   AllocatorStats stats;
583   stats.Init();
584 
585   const size_t kNumChunks = 128;
586   uint32_t chunks[kNumChunks];
587   bool allocation_failed = false;
588   for (int i = 0; i < 1000000; i++) {
589     uptr class_id = a.kNumClasses - 1;
590     if (!a.GetFromAllocator(&stats, class_id, chunks, kNumChunks)) {
591       allocation_failed = true;
592       break;
593     }
594   }
595   EXPECT_EQ(allocation_failed, true);
596 
597   a.TestOnlyUnmap();
598 }
599 #endif
600 
TEST(SanitizerCommon,LargeMmapAllocator)601 TEST(SanitizerCommon, LargeMmapAllocator) {
602   LargeMmapAllocator<NoOpMapUnmapCallback> a;
603   a.Init();
604   AllocatorStats stats;
605   stats.Init();
606 
607   static const int kNumAllocs = 1000;
608   char *allocated[kNumAllocs];
609   static const uptr size = 4000;
610   // Allocate some.
611   for (int i = 0; i < kNumAllocs; i++) {
612     allocated[i] = (char *)a.Allocate(&stats, size, 1);
613     CHECK(a.PointerIsMine(allocated[i]));
614   }
615   // Deallocate all.
616   CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
617   for (int i = 0; i < kNumAllocs; i++) {
618     char *p = allocated[i];
619     CHECK(a.PointerIsMine(p));
620     a.Deallocate(&stats, p);
621   }
622   // Check that non left.
623   CHECK_EQ(a.TotalMemoryUsed(), 0);
624 
625   // Allocate some more, also add metadata.
626   for (int i = 0; i < kNumAllocs; i++) {
627     char *x = (char *)a.Allocate(&stats, size, 1);
628     CHECK_GE(a.GetActuallyAllocatedSize(x), size);
629     uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
630     *meta = i;
631     allocated[i] = x;
632   }
633   for (int i = 0; i < kNumAllocs * kNumAllocs; i++) {
634     char *p = allocated[i % kNumAllocs];
635     CHECK(a.PointerIsMine(p));
636     CHECK(a.PointerIsMine(p + 2000));
637   }
638   CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
639   // Deallocate all in reverse order.
640   for (int i = 0; i < kNumAllocs; i++) {
641     int idx = kNumAllocs - i - 1;
642     char *p = allocated[idx];
643     uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p));
644     CHECK_EQ(*meta, idx);
645     CHECK(a.PointerIsMine(p));
646     a.Deallocate(&stats, p);
647   }
648   CHECK_EQ(a.TotalMemoryUsed(), 0);
649 
650   // Test alignments. Test with 512MB alignment on x64 non-Windows machines.
651   // Windows doesn't overcommit, and many machines do not have 51.2GB of swap.
652   uptr max_alignment =
653       (SANITIZER_WORDSIZE == 64 && !SANITIZER_WINDOWS) ? (1 << 28) : (1 << 24);
654   for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) {
655     const uptr kNumAlignedAllocs = 100;
656     for (uptr i = 0; i < kNumAlignedAllocs; i++) {
657       uptr size = ((i % 10) + 1) * 4096;
658       char *p = allocated[i] = (char *)a.Allocate(&stats, size, alignment);
659       CHECK_EQ(p, a.GetBlockBegin(p));
660       CHECK_EQ(p, a.GetBlockBegin(p + size - 1));
661       CHECK_EQ(p, a.GetBlockBegin(p + size / 2));
662       CHECK_EQ(0, (uptr)allocated[i] % alignment);
663       p[0] = p[size - 1] = 0;
664     }
665     for (uptr i = 0; i < kNumAlignedAllocs; i++) {
666       a.Deallocate(&stats, allocated[i]);
667     }
668   }
669 
670   // Regression test for boundary condition in GetBlockBegin().
671   uptr page_size = GetPageSizeCached();
672   char *p = (char *)a.Allocate(&stats, page_size, 1);
673   CHECK_EQ(p, a.GetBlockBegin(p));
674   CHECK_EQ(p, (char *)a.GetBlockBegin(p + page_size - 1));
675   CHECK_NE(p, (char *)a.GetBlockBegin(p + page_size));
676   a.Deallocate(&stats, p);
677 }
678 
679 template <class PrimaryAllocator>
TestCombinedAllocator(uptr premapped_heap=0)680 void TestCombinedAllocator(uptr premapped_heap = 0) {
681   typedef CombinedAllocator<PrimaryAllocator> Allocator;
682   Allocator *a = new Allocator;
683   a->Init(kReleaseToOSIntervalNever, premapped_heap);
684   std::mt19937 r;
685 
686   typename Allocator::AllocatorCache cache;
687   memset(&cache, 0, sizeof(cache));
688   a->InitCache(&cache);
689 
690   EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
691   EXPECT_EQ(a->Allocate(&cache, -1, 1024), (void*)0);
692   EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0);
693   EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0);
694   EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0);
695   EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
696 
697   const uptr kNumAllocs = 100000;
698   const uptr kNumIter = 10;
699   for (uptr iter = 0; iter < kNumIter; iter++) {
700     std::vector<void*> allocated;
701     for (uptr i = 0; i < kNumAllocs; i++) {
702       uptr size = (i % (1 << 14)) + 1;
703       if ((i % 1024) == 0)
704         size = 1 << (10 + (i % 14));
705       void *x = a->Allocate(&cache, size, 1);
706       uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
707       CHECK_EQ(*meta, 0);
708       *meta = size;
709       allocated.push_back(x);
710     }
711 
712     std::shuffle(allocated.begin(), allocated.end(), r);
713 
714     // Test ForEachChunk(...)
715     {
716       std::set<void *> reported_chunks;
717       auto cb = [](uptr chunk, void *arg) {
718         auto reported_chunks_ptr = reinterpret_cast<std::set<void *> *>(arg);
719         auto pair =
720             reported_chunks_ptr->insert(reinterpret_cast<void *>(chunk));
721         // Check chunk is never reported more than once.
722         ASSERT_TRUE(pair.second);
723       };
724       a->ForEachChunk(cb, reinterpret_cast<void *>(&reported_chunks));
725       for (const auto &allocated_ptr : allocated) {
726         ASSERT_NE(reported_chunks.find(allocated_ptr), reported_chunks.end());
727       }
728     }
729 
730     for (uptr i = 0; i < kNumAllocs; i++) {
731       void *x = allocated[i];
732       uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
733       CHECK_NE(*meta, 0);
734       CHECK(a->PointerIsMine(x));
735       *meta = 0;
736       a->Deallocate(&cache, x);
737     }
738     allocated.clear();
739     a->SwallowCache(&cache);
740   }
741   a->DestroyCache(&cache);
742   a->TestOnlyUnmap();
743 }
744 
745 #if SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon,CombinedAllocator64)746 TEST(SanitizerCommon, CombinedAllocator64) {
747   TestCombinedAllocator<Allocator64>();
748 }
749 
TEST(SanitizerCommon,CombinedAllocator64Dynamic)750 TEST(SanitizerCommon, CombinedAllocator64Dynamic) {
751   TestCombinedAllocator<Allocator64Dynamic>();
752 }
753 
754 #if !ALLOCATOR64_SMALL_SIZE
755 #if !SANITIZER_WINDOWS
756 // Windows fails to map 1TB, so disable this test.
TEST(SanitizerCommon,CombinedAllocator64DynamicPremapped)757 TEST(SanitizerCommon, CombinedAllocator64DynamicPremapped) {
758   ScopedPremappedHeap h;
759   TestCombinedAllocator<Allocator64Dynamic>(h.Addr());
760 }
761 #endif
762 
TEST(SanitizerCommon,CombinedAllocator64Compact)763 TEST(SanitizerCommon, CombinedAllocator64Compact) {
764   TestCombinedAllocator<Allocator64Compact>();
765 }
766 #endif
767 
TEST(SanitizerCommon,CombinedAllocator64VeryCompact)768 TEST(SanitizerCommon, CombinedAllocator64VeryCompact) {
769   TestCombinedAllocator<Allocator64VeryCompact>();
770 }
771 #endif
772 
TEST(SanitizerCommon,SKIP_ON_SOLARIS_SPARCV9 (CombinedAllocator32Compact))773 TEST(SanitizerCommon, SKIP_ON_SOLARIS_SPARCV9(CombinedAllocator32Compact)) {
774   TestCombinedAllocator<Allocator32Compact>();
775 }
776 
777 template <class Allocator>
TestSizeClassAllocatorLocalCache(uptr premapped_heap=0)778 void TestSizeClassAllocatorLocalCache(uptr premapped_heap = 0) {
779   using AllocatorCache = typename Allocator::AllocatorCache;
780   AllocatorCache cache;
781   Allocator *a = new Allocator();
782 
783   a->Init(kReleaseToOSIntervalNever, premapped_heap);
784   memset(&cache, 0, sizeof(cache));
785   cache.Init(0);
786 
787   const uptr kNumAllocs = 10000;
788   const int kNumIter = 100;
789   uptr saved_total = 0;
790   for (int class_id = 1; class_id <= 5; class_id++) {
791     for (int it = 0; it < kNumIter; it++) {
792       void *allocated[kNumAllocs];
793       for (uptr i = 0; i < kNumAllocs; i++) {
794         allocated[i] = cache.Allocate(a, class_id);
795       }
796       for (uptr i = 0; i < kNumAllocs; i++) {
797         cache.Deallocate(a, class_id, allocated[i]);
798       }
799       cache.Drain(a);
800       uptr total_allocated = a->TotalMemoryUsed();
801       if (it)
802         CHECK_EQ(saved_total, total_allocated);
803       saved_total = total_allocated;
804     }
805   }
806 
807   a->TestOnlyUnmap();
808   delete a;
809 }
810 
811 #if SANITIZER_CAN_USE_ALLOCATOR64
812 // These tests can fail on Windows if memory is somewhat full and lit happens
813 // to run them all at the same time. FIXME: Make them not flaky and reenable.
814 #if !SANITIZER_WINDOWS
TEST(SanitizerCommon,SizeClassAllocator64LocalCache)815 TEST(SanitizerCommon, SizeClassAllocator64LocalCache) {
816   TestSizeClassAllocatorLocalCache<Allocator64>();
817 }
818 
TEST(SanitizerCommon,SizeClassAllocator64DynamicLocalCache)819 TEST(SanitizerCommon, SizeClassAllocator64DynamicLocalCache) {
820   TestSizeClassAllocatorLocalCache<Allocator64Dynamic>();
821 }
822 
823 #if !ALLOCATOR64_SMALL_SIZE
TEST(SanitizerCommon,SizeClassAllocator64DynamicPremappedLocalCache)824 TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedLocalCache) {
825   ScopedPremappedHeap h;
826   TestSizeClassAllocatorLocalCache<Allocator64Dynamic>(h.Addr());
827 }
828 
TEST(SanitizerCommon,SizeClassAllocator64CompactLocalCache)829 TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) {
830   TestSizeClassAllocatorLocalCache<Allocator64Compact>();
831 }
832 #endif
TEST(SanitizerCommon,SizeClassAllocator64VeryCompactLocalCache)833 TEST(SanitizerCommon, SizeClassAllocator64VeryCompactLocalCache) {
834   TestSizeClassAllocatorLocalCache<Allocator64VeryCompact>();
835 }
836 #endif
837 #endif
838 
TEST(SanitizerCommon,SizeClassAllocator32CompactLocalCache)839 TEST(SanitizerCommon, SizeClassAllocator32CompactLocalCache) {
840   TestSizeClassAllocatorLocalCache<Allocator32Compact>();
841 }
842 
843 #if SANITIZER_CAN_USE_ALLOCATOR64
844 typedef Allocator64::AllocatorCache AllocatorCache;
845 static AllocatorCache static_allocator_cache;
846 
AllocatorLeakTestWorker(void * arg)847 void *AllocatorLeakTestWorker(void *arg) {
848   typedef AllocatorCache::Allocator Allocator;
849   Allocator *a = (Allocator*)(arg);
850   static_allocator_cache.Allocate(a, 10);
851   static_allocator_cache.Drain(a);
852   return 0;
853 }
854 
TEST(SanitizerCommon,AllocatorLeakTest)855 TEST(SanitizerCommon, AllocatorLeakTest) {
856   typedef AllocatorCache::Allocator Allocator;
857   Allocator a;
858   a.Init(kReleaseToOSIntervalNever);
859   uptr total_used_memory = 0;
860   for (int i = 0; i < 100; i++) {
861     pthread_t t;
862     PTHREAD_CREATE(&t, 0, AllocatorLeakTestWorker, &a);
863     PTHREAD_JOIN(t, 0);
864     if (i == 0)
865       total_used_memory = a.TotalMemoryUsed();
866     EXPECT_EQ(a.TotalMemoryUsed(), total_used_memory);
867   }
868 
869   a.TestOnlyUnmap();
870 }
871 
872 // Struct which is allocated to pass info to new threads.  The new thread frees
873 // it.
874 struct NewThreadParams {
875   AllocatorCache *thread_cache;
876   AllocatorCache::Allocator *allocator;
877   uptr class_id;
878 };
879 
880 // Called in a new thread.  Just frees its argument.
DeallocNewThreadWorker(void * arg)881 static void *DeallocNewThreadWorker(void *arg) {
882   NewThreadParams *params = reinterpret_cast<NewThreadParams*>(arg);
883   params->thread_cache->Deallocate(params->allocator, params->class_id, params);
884   return NULL;
885 }
886 
887 // The allocator cache is supposed to be POD and zero initialized.  We should be
888 // able to call Deallocate on a zeroed cache, and it will self-initialize.
TEST(Allocator,AllocatorCacheDeallocNewThread)889 TEST(Allocator, AllocatorCacheDeallocNewThread) {
890   AllocatorCache::Allocator allocator;
891   allocator.Init(kReleaseToOSIntervalNever);
892   AllocatorCache main_cache;
893   AllocatorCache child_cache;
894   memset(&main_cache, 0, sizeof(main_cache));
895   memset(&child_cache, 0, sizeof(child_cache));
896 
897   uptr class_id = DefaultSizeClassMap::ClassID(sizeof(NewThreadParams));
898   NewThreadParams *params = reinterpret_cast<NewThreadParams*>(
899       main_cache.Allocate(&allocator, class_id));
900   params->thread_cache = &child_cache;
901   params->allocator = &allocator;
902   params->class_id = class_id;
903   pthread_t t;
904   PTHREAD_CREATE(&t, 0, DeallocNewThreadWorker, params);
905   PTHREAD_JOIN(t, 0);
906 
907   allocator.TestOnlyUnmap();
908 }
909 #endif
910 
TEST(Allocator,Basic)911 TEST(Allocator, Basic) {
912   char *p = (char*)InternalAlloc(10);
913   EXPECT_NE(p, (char*)0);
914   char *p2 = (char*)InternalAlloc(20);
915   EXPECT_NE(p2, (char*)0);
916   EXPECT_NE(p2, p);
917   InternalFree(p);
918   InternalFree(p2);
919 }
920 
TEST(Allocator,Stress)921 TEST(Allocator, Stress) {
922   const int kCount = 1000;
923   char *ptrs[kCount];
924   unsigned rnd = 42;
925   for (int i = 0; i < kCount; i++) {
926     uptr sz = my_rand_r(&rnd) % 1000;
927     char *p = (char*)InternalAlloc(sz);
928     EXPECT_NE(p, (char*)0);
929     ptrs[i] = p;
930   }
931   for (int i = 0; i < kCount; i++) {
932     InternalFree(ptrs[i]);
933   }
934 }
935 
TEST(Allocator,LargeAlloc)936 TEST(Allocator, LargeAlloc) {
937   void *p = InternalAlloc(10 << 20);
938   InternalFree(p);
939 }
940 
TEST(Allocator,ScopedBuffer)941 TEST(Allocator, ScopedBuffer) {
942   const int kSize = 512;
943   {
944     InternalMmapVector<int> int_buf(kSize);
945     EXPECT_EQ((uptr)kSize, int_buf.size());
946   }
947   InternalMmapVector<char> char_buf(kSize);
948   EXPECT_EQ((uptr)kSize, char_buf.size());
949   internal_memset(char_buf.data(), 'c', kSize);
950   for (int i = 0; i < kSize; i++) {
951     EXPECT_EQ('c', char_buf[i]);
952   }
953 }
954 
IterationTestCallback(uptr chunk,void * arg)955 void IterationTestCallback(uptr chunk, void *arg) {
956   reinterpret_cast<std::set<uptr> *>(arg)->insert(chunk);
957 }
958 
959 template <class Allocator>
TestSizeClassAllocatorIteration(uptr premapped_heap=0)960 void TestSizeClassAllocatorIteration(uptr premapped_heap = 0) {
961   Allocator *a = new Allocator;
962   a->Init(kReleaseToOSIntervalNever, premapped_heap);
963   typename Allocator::AllocatorCache cache;
964   memset(&cache, 0, sizeof(cache));
965   cache.Init(0);
966 
967   static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
968     50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
969 
970   std::vector<void *> allocated;
971 
972   // Allocate a bunch of chunks.
973   for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
974     uptr size = sizes[s];
975     if (!a->CanAllocate(size, 1)) continue;
976     // printf("s = %ld\n", size);
977     uptr n_iter = std::max((uptr)6, 80000 / size);
978     // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
979     for (uptr j = 0; j < n_iter; j++) {
980       uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
981       void *x = cache.Allocate(a, class_id0);
982       allocated.push_back(x);
983     }
984   }
985 
986   std::set<uptr> reported_chunks;
987   a->ForceLock();
988   a->ForEachChunk(IterationTestCallback, &reported_chunks);
989   a->ForceUnlock();
990 
991   for (uptr i = 0; i < allocated.size(); i++) {
992     // Don't use EXPECT_NE. Reporting the first mismatch is enough.
993     ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
994               reported_chunks.end());
995   }
996 
997   a->TestOnlyUnmap();
998   delete a;
999 }
1000 
1001 #if SANITIZER_CAN_USE_ALLOCATOR64
1002 // These tests can fail on Windows if memory is somewhat full and lit happens
1003 // to run them all at the same time. FIXME: Make them not flaky and reenable.
1004 #if !SANITIZER_WINDOWS
TEST(SanitizerCommon,SizeClassAllocator64Iteration)1005 TEST(SanitizerCommon, SizeClassAllocator64Iteration) {
1006   TestSizeClassAllocatorIteration<Allocator64>();
1007 }
TEST(SanitizerCommon,SizeClassAllocator64DynamicIteration)1008 TEST(SanitizerCommon, SizeClassAllocator64DynamicIteration) {
1009   TestSizeClassAllocatorIteration<Allocator64Dynamic>();
1010 }
1011 #if !ALLOCATOR64_SMALL_SIZE
TEST(SanitizerCommon,SizeClassAllocator64DynamicPremappedIteration)1012 TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedIteration) {
1013   ScopedPremappedHeap h;
1014   TestSizeClassAllocatorIteration<Allocator64Dynamic>(h.Addr());
1015 }
1016 #endif
1017 #endif
1018 #endif
1019 
TEST(SanitizerCommon,SKIP_ON_SOLARIS_SPARCV9 (SizeClassAllocator32Iteration))1020 TEST(SanitizerCommon, SKIP_ON_SOLARIS_SPARCV9(SizeClassAllocator32Iteration)) {
1021   TestSizeClassAllocatorIteration<Allocator32Compact>();
1022 }
1023 
TEST(SanitizerCommon,LargeMmapAllocatorIteration)1024 TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
1025   LargeMmapAllocator<NoOpMapUnmapCallback> a;
1026   a.Init();
1027   AllocatorStats stats;
1028   stats.Init();
1029 
1030   static const uptr kNumAllocs = 1000;
1031   char *allocated[kNumAllocs];
1032   static const uptr size = 40;
1033   // Allocate some.
1034   for (uptr i = 0; i < kNumAllocs; i++)
1035     allocated[i] = (char *)a.Allocate(&stats, size, 1);
1036 
1037   std::set<uptr> reported_chunks;
1038   a.ForceLock();
1039   a.ForEachChunk(IterationTestCallback, &reported_chunks);
1040   a.ForceUnlock();
1041 
1042   for (uptr i = 0; i < kNumAllocs; i++) {
1043     // Don't use EXPECT_NE. Reporting the first mismatch is enough.
1044     ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
1045               reported_chunks.end());
1046   }
1047   for (uptr i = 0; i < kNumAllocs; i++)
1048     a.Deallocate(&stats, allocated[i]);
1049 }
1050 
TEST(SanitizerCommon,LargeMmapAllocatorBlockBegin)1051 TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) {
1052   LargeMmapAllocator<NoOpMapUnmapCallback> a;
1053   a.Init();
1054   AllocatorStats stats;
1055   stats.Init();
1056 
1057   static const uptr kNumAllocs = 1024;
1058   static const uptr kNumExpectedFalseLookups = 10000000;
1059   char *allocated[kNumAllocs];
1060   static const uptr size = 4096;
1061   // Allocate some.
1062   for (uptr i = 0; i < kNumAllocs; i++) {
1063     allocated[i] = (char *)a.Allocate(&stats, size, 1);
1064   }
1065 
1066   a.ForceLock();
1067   for (uptr i = 0; i < kNumAllocs  * kNumAllocs; i++) {
1068     // if ((i & (i - 1)) == 0) fprintf(stderr, "[%zd]\n", i);
1069     char *p1 = allocated[i % kNumAllocs];
1070     EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1));
1071     EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size / 2));
1072     EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size - 1));
1073     EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 - 100));
1074   }
1075 
1076   for (uptr i = 0; i < kNumExpectedFalseLookups; i++) {
1077     void *p = reinterpret_cast<void *>(i % 1024);
1078     EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));
1079     p = reinterpret_cast<void *>(~0L - (i % 1024));
1080     EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));
1081   }
1082   a.ForceUnlock();
1083 
1084   for (uptr i = 0; i < kNumAllocs; i++)
1085     a.Deallocate(&stats, allocated[i]);
1086 }
1087 
1088 
1089 // Don't test OOM conditions on Win64 because it causes other tests on the same
1090 // machine to OOM.
1091 #if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64
1092 typedef __sanitizer::SizeClassMap<2, 22, 22, 34, 128, 16> SpecialSizeClassMap;
1093 template <typename AddressSpaceViewTy = LocalAddressSpaceView>
1094 struct AP64_SpecialSizeClassMap {
1095   static const uptr kSpaceBeg = kAllocatorSpace;
1096   static const uptr kSpaceSize = kAllocatorSize;
1097   static const uptr kMetadataSize = 0;
1098   typedef SpecialSizeClassMap SizeClassMap;
1099   typedef NoOpMapUnmapCallback MapUnmapCallback;
1100   static const uptr kFlags = 0;
1101   using AddressSpaceView = AddressSpaceViewTy;
1102 };
1103 
1104 // Regression test for out-of-memory condition in PopulateFreeList().
TEST(SanitizerCommon,SizeClassAllocator64PopulateFreeListOOM)1105 TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) {
1106   // In a world where regions are small and chunks are huge...
1107   typedef SizeClassAllocator64<AP64_SpecialSizeClassMap<>> SpecialAllocator64;
1108   const uptr kRegionSize =
1109       kAllocatorSize / SpecialSizeClassMap::kNumClassesRounded;
1110   SpecialAllocator64 *a = new SpecialAllocator64;
1111   a->Init(kReleaseToOSIntervalNever);
1112   SpecialAllocator64::AllocatorCache cache;
1113   memset(&cache, 0, sizeof(cache));
1114   cache.Init(0);
1115 
1116   // ...one man is on a mission to overflow a region with a series of
1117   // successive allocations.
1118 
1119   const uptr kClassID = ALLOCATOR64_SMALL_SIZE ? 18 : 24;
1120   const uptr kAllocationSize = SpecialSizeClassMap::Size(kClassID);
1121   ASSERT_LT(2 * kAllocationSize, kRegionSize);
1122   ASSERT_GT(3 * kAllocationSize, kRegionSize);
1123   EXPECT_NE(cache.Allocate(a, kClassID), nullptr);
1124   EXPECT_NE(cache.Allocate(a, kClassID), nullptr);
1125   EXPECT_EQ(cache.Allocate(a, kClassID), nullptr);
1126 
1127   const uptr Class2 = ALLOCATOR64_SMALL_SIZE ? 15 : 21;
1128   const uptr Size2 = SpecialSizeClassMap::Size(Class2);
1129   ASSERT_EQ(Size2 * 8, kRegionSize);
1130   char *p[7];
1131   for (int i = 0; i < 7; i++) {
1132     p[i] = (char*)cache.Allocate(a, Class2);
1133     EXPECT_NE(p[i], nullptr);
1134     fprintf(stderr, "p[%d] %p s = %lx\n", i, (void*)p[i], Size2);
1135     p[i][Size2 - 1] = 42;
1136     if (i) ASSERT_LT(p[i - 1], p[i]);
1137   }
1138   EXPECT_EQ(cache.Allocate(a, Class2), nullptr);
1139   cache.Deallocate(a, Class2, p[0]);
1140   cache.Drain(a);
1141   ASSERT_EQ(p[6][Size2 - 1], 42);
1142   a->TestOnlyUnmap();
1143   delete a;
1144 }
1145 
1146 #endif
1147 
1148 #if SANITIZER_CAN_USE_ALLOCATOR64
1149 
1150 class NoMemoryMapper {
1151  public:
1152   uptr last_request_buffer_size = 0;
1153 
MapPackedCounterArrayBuffer(uptr buffer_size)1154   u64 *MapPackedCounterArrayBuffer(uptr buffer_size) {
1155     last_request_buffer_size = buffer_size * sizeof(u64);
1156     return nullptr;
1157   }
1158 };
1159 
1160 class RedZoneMemoryMapper {
1161  public:
RedZoneMemoryMapper()1162   RedZoneMemoryMapper() {
1163     const auto page_size = GetPageSize();
1164     buffer = MmapOrDie(3ULL * page_size, "");
1165     MprotectNoAccess(reinterpret_cast<uptr>(buffer), page_size);
1166     MprotectNoAccess(reinterpret_cast<uptr>(buffer) + page_size * 2, page_size);
1167   }
~RedZoneMemoryMapper()1168   ~RedZoneMemoryMapper() { UnmapOrDie(buffer, 3 * GetPageSize()); }
1169 
MapPackedCounterArrayBuffer(uptr buffer_size)1170   u64 *MapPackedCounterArrayBuffer(uptr buffer_size) {
1171     buffer_size *= sizeof(u64);
1172     const auto page_size = GetPageSize();
1173     CHECK_EQ(buffer_size, page_size);
1174     u64 *p =
1175         reinterpret_cast<u64 *>(reinterpret_cast<uptr>(buffer) + page_size);
1176     memset(p, 0, page_size);
1177     return p;
1178   }
1179 
1180  private:
1181   void *buffer;
1182 };
1183 
TEST(SanitizerCommon,SizeClassAllocator64PackedCounterArray)1184 TEST(SanitizerCommon, SizeClassAllocator64PackedCounterArray) {
1185   NoMemoryMapper no_memory_mapper;
1186   for (int i = 0; i < 64; i++) {
1187     // Various valid counter's max values packed into one word.
1188     Allocator64::PackedCounterArray counters_2n(1, 1ULL << i,
1189                                                 &no_memory_mapper);
1190     EXPECT_EQ(8ULL, no_memory_mapper.last_request_buffer_size);
1191 
1192     // Check the "all bit set" values too.
1193     Allocator64::PackedCounterArray counters_2n1_1(1, ~0ULL >> i,
1194                                                    &no_memory_mapper);
1195     EXPECT_EQ(8ULL, no_memory_mapper.last_request_buffer_size);
1196 
1197     // Verify the packing ratio, the counter is expected to be packed into the
1198     // closest power of 2 bits.
1199     Allocator64::PackedCounterArray counters(64, 1ULL << i, &no_memory_mapper);
1200     EXPECT_EQ(8ULL * RoundUpToPowerOfTwo(i + 1),
1201               no_memory_mapper.last_request_buffer_size);
1202   }
1203 
1204   RedZoneMemoryMapper memory_mapper;
1205   // Go through 1, 2, 4, 8, .. 64 bits per counter.
1206   for (int i = 0; i < 7; i++) {
1207     // Make sure counters request one memory page for the buffer.
1208     const u64 kNumCounters = (GetPageSize() / 8) * (64 >> i);
1209     Allocator64::PackedCounterArray counters(
1210         kNumCounters, 1ULL << ((1 << i) - 1), &memory_mapper);
1211     counters.Inc(0);
1212     for (u64 c = 1; c < kNumCounters - 1; c++) {
1213       ASSERT_EQ(0ULL, counters.Get(c));
1214       counters.Inc(c);
1215       ASSERT_EQ(1ULL, counters.Get(c - 1));
1216     }
1217     ASSERT_EQ(0ULL, counters.Get(kNumCounters - 1));
1218     counters.Inc(kNumCounters - 1);
1219 
1220     if (i > 0) {
1221       counters.IncRange(0, kNumCounters - 1);
1222       for (u64 c = 0; c < kNumCounters; c++)
1223         ASSERT_EQ(2ULL, counters.Get(c));
1224     }
1225   }
1226 }
1227 
1228 class RangeRecorder {
1229  public:
1230   std::string reported_pages;
1231 
RangeRecorder()1232   RangeRecorder()
1233       : page_size_scaled_log(
1234             Log2(GetPageSizeCached() >> Allocator64::kCompactPtrScale)),
1235         last_page_reported(0) {}
1236 
ReleasePageRangeToOS(u32 class_id,u32 from,u32 to)1237   void ReleasePageRangeToOS(u32 class_id, u32 from, u32 to) {
1238     from >>= page_size_scaled_log;
1239     to >>= page_size_scaled_log;
1240     ASSERT_LT(from, to);
1241     if (!reported_pages.empty())
1242       ASSERT_LT(last_page_reported, from);
1243     reported_pages.append(from - last_page_reported, '.');
1244     reported_pages.append(to - from, 'x');
1245     last_page_reported = to;
1246   }
1247 
1248  private:
1249   const uptr page_size_scaled_log;
1250   u32 last_page_reported;
1251 };
1252 
TEST(SanitizerCommon,SizeClassAllocator64FreePagesRangeTracker)1253 TEST(SanitizerCommon, SizeClassAllocator64FreePagesRangeTracker) {
1254   typedef Allocator64::FreePagesRangeTracker<RangeRecorder> RangeTracker;
1255 
1256   // 'x' denotes a page to be released, '.' denotes a page to be kept around.
1257   const char* test_cases[] = {
1258       "",
1259       ".",
1260       "x",
1261       "........",
1262       "xxxxxxxxxxx",
1263       "..............xxxxx",
1264       "xxxxxxxxxxxxxxxxxx.....",
1265       "......xxxxxxxx........",
1266       "xxx..........xxxxxxxxxxxxxxx",
1267       "......xxxx....xxxx........",
1268       "xxx..........xxxxxxxx....xxxxxxx",
1269       "x.x.x.x.x.x.x.x.x.x.x.x.",
1270       ".x.x.x.x.x.x.x.x.x.x.x.x",
1271       ".x.x.x.x.x.x.x.x.x.x.x.x.",
1272       "x.x.x.x.x.x.x.x.x.x.x.x.x",
1273   };
1274 
1275   for (auto test_case : test_cases) {
1276     RangeRecorder range_recorder;
1277     RangeTracker tracker(&range_recorder, 1);
1278     for (int i = 0; test_case[i] != 0; i++)
1279       tracker.NextPage(test_case[i] == 'x');
1280     tracker.Done();
1281     // Strip trailing '.'-pages before comparing the results as they are not
1282     // going to be reported to range_recorder anyway.
1283     const char* last_x = strrchr(test_case, 'x');
1284     std::string expected(
1285         test_case,
1286         last_x == nullptr ? 0 : (last_x - test_case + 1));
1287     EXPECT_STREQ(expected.c_str(), range_recorder.reported_pages.c_str());
1288   }
1289 }
1290 
1291 class ReleasedPagesTrackingMemoryMapper {
1292  public:
1293   std::set<u32> reported_pages;
1294   std::vector<u64> buffer;
1295 
MapPackedCounterArrayBuffer(uptr buffer_size)1296   u64 *MapPackedCounterArrayBuffer(uptr buffer_size) {
1297     reported_pages.clear();
1298     buffer.assign(buffer_size, 0);
1299     return buffer.data();
1300   }
ReleasePageRangeToOS(u32 class_id,u32 from,u32 to)1301   void ReleasePageRangeToOS(u32 class_id, u32 from, u32 to) {
1302     uptr page_size_scaled =
1303         GetPageSizeCached() >> Allocator64::kCompactPtrScale;
1304     for (u32 i = from; i < to; i += page_size_scaled)
1305       reported_pages.insert(i);
1306   }
1307 };
1308 
1309 template <class Allocator>
TestReleaseFreeMemoryToOS()1310 void TestReleaseFreeMemoryToOS() {
1311   ReleasedPagesTrackingMemoryMapper memory_mapper;
1312   const uptr kAllocatedPagesCount = 1024;
1313   const uptr page_size = GetPageSizeCached();
1314   const uptr page_size_scaled = page_size >> Allocator::kCompactPtrScale;
1315   std::mt19937 r;
1316   uint32_t rnd_state = 42;
1317 
1318   for (uptr class_id = 1; class_id <= Allocator::SizeClassMapT::kLargestClassID;
1319       class_id++) {
1320     const uptr chunk_size = Allocator::SizeClassMapT::Size(class_id);
1321     const uptr chunk_size_scaled = chunk_size >> Allocator::kCompactPtrScale;
1322     const uptr max_chunks =
1323         kAllocatedPagesCount * GetPageSizeCached() / chunk_size;
1324 
1325     // Generate the random free list.
1326     std::vector<u32> free_array;
1327     bool in_free_range = false;
1328     uptr current_range_end = 0;
1329     for (uptr i = 0; i < max_chunks; i++) {
1330       if (i == current_range_end) {
1331         in_free_range = (my_rand_r(&rnd_state) & 1U) == 1;
1332         current_range_end += my_rand_r(&rnd_state) % 100 + 1;
1333       }
1334       if (in_free_range)
1335         free_array.push_back(i * chunk_size_scaled);
1336     }
1337     if (free_array.empty())
1338       continue;
1339     // Shuffle free_list to verify that ReleaseFreeMemoryToOS does not depend on
1340     // the list ordering.
1341     std::shuffle(free_array.begin(), free_array.end(), r);
1342 
1343     Allocator::ReleaseFreeMemoryToOS(&free_array[0], free_array.size(),
1344                                      chunk_size, kAllocatedPagesCount,
1345                                      &memory_mapper, class_id);
1346 
1347     // Verify that there are no released pages touched by used chunks and all
1348     // ranges of free chunks big enough to contain the entire memory pages had
1349     // these pages released.
1350     uptr verified_released_pages = 0;
1351     std::set<u32> free_chunks(free_array.begin(), free_array.end());
1352 
1353     u32 current_chunk = 0;
1354     in_free_range = false;
1355     u32 current_free_range_start = 0;
1356     for (uptr i = 0; i <= max_chunks; i++) {
1357       bool is_free_chunk = free_chunks.find(current_chunk) != free_chunks.end();
1358 
1359       if (is_free_chunk) {
1360         if (!in_free_range) {
1361           in_free_range = true;
1362           current_free_range_start = current_chunk;
1363         }
1364       } else {
1365         // Verify that this used chunk does not touch any released page.
1366         for (uptr i_page = current_chunk / page_size_scaled;
1367              i_page <= (current_chunk + chunk_size_scaled - 1) /
1368                        page_size_scaled;
1369              i_page++) {
1370           bool page_released =
1371               memory_mapper.reported_pages.find(i_page * page_size_scaled) !=
1372               memory_mapper.reported_pages.end();
1373           ASSERT_EQ(false, page_released);
1374         }
1375 
1376         if (in_free_range) {
1377           in_free_range = false;
1378           // Verify that all entire memory pages covered by this range of free
1379           // chunks were released.
1380           u32 page = RoundUpTo(current_free_range_start, page_size_scaled);
1381           while (page + page_size_scaled <= current_chunk) {
1382             bool page_released =
1383                 memory_mapper.reported_pages.find(page) !=
1384                 memory_mapper.reported_pages.end();
1385             ASSERT_EQ(true, page_released);
1386             verified_released_pages++;
1387             page += page_size_scaled;
1388           }
1389         }
1390       }
1391 
1392       current_chunk += chunk_size_scaled;
1393     }
1394 
1395     ASSERT_EQ(memory_mapper.reported_pages.size(), verified_released_pages);
1396   }
1397 }
1398 
TEST(SanitizerCommon,SizeClassAllocator64ReleaseFreeMemoryToOS)1399 TEST(SanitizerCommon, SizeClassAllocator64ReleaseFreeMemoryToOS) {
1400   TestReleaseFreeMemoryToOS<Allocator64>();
1401 }
1402 
1403 #if !ALLOCATOR64_SMALL_SIZE
TEST(SanitizerCommon,SizeClassAllocator64CompactReleaseFreeMemoryToOS)1404 TEST(SanitizerCommon, SizeClassAllocator64CompactReleaseFreeMemoryToOS) {
1405   TestReleaseFreeMemoryToOS<Allocator64Compact>();
1406 }
1407 
TEST(SanitizerCommon,SizeClassAllocator64VeryCompactReleaseFreeMemoryToOS)1408 TEST(SanitizerCommon, SizeClassAllocator64VeryCompactReleaseFreeMemoryToOS) {
1409   TestReleaseFreeMemoryToOS<Allocator64VeryCompact>();
1410 }
1411 #endif  // !ALLOCATOR64_SMALL_SIZE
1412 
1413 #endif  // SANITIZER_CAN_USE_ALLOCATOR64
1414 
TEST(SanitizerCommon,LowLevelAllocatorShouldRoundUpSizeOnAlloc)1415 TEST(SanitizerCommon, LowLevelAllocatorShouldRoundUpSizeOnAlloc) {
1416   // When allocating a memory block slightly bigger than a memory page and
1417   // LowLevelAllocator calls MmapOrDie for the internal buffer, it should round
1418   // the size up to the page size, so that subsequent calls to the allocator
1419   // can use the remaining space in the last allocated page.
1420   static LowLevelAllocator allocator;
1421   char *ptr1 = (char *)allocator.Allocate(GetPageSizeCached() + 16);
1422   char *ptr2 = (char *)allocator.Allocate(16);
1423   EXPECT_EQ(ptr2, ptr1 + GetPageSizeCached() + 16);
1424 }
1425 
1426 #endif  // #if !SANITIZER_DEBUG
1427