1 //===-- tsan_dense_alloc.h --------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 // A DenseSlabAlloc is a freelist-based allocator of fixed-size objects.
12 // DenseSlabAllocCache is a thread-local cache for DenseSlabAlloc.
13 // The only difference with traditional slab allocators is that DenseSlabAlloc
14 // allocates/free indices of objects and provide a functionality to map
15 // the index onto the real pointer. The index is u32, that is, 2 times smaller
16 // than uptr (hense the Dense prefix).
17 //===----------------------------------------------------------------------===//
18 #ifndef TSAN_DENSE_ALLOC_H
19 #define TSAN_DENSE_ALLOC_H
20 
21 #include "sanitizer_common/sanitizer_common.h"
22 #include "tsan_defs.h"
23 
24 namespace __tsan {
25 
26 class DenseSlabAllocCache {
27   static const uptr kSize = 128;
28   typedef u32 IndexT;
29   uptr pos;
30   IndexT cache[kSize];
31   template <typename, uptr, uptr, u64>
32   friend class DenseSlabAlloc;
33 };
34 
35 template <typename T, uptr kL1Size, uptr kL2Size, u64 kReserved = 0>
36 class DenseSlabAlloc {
37  public:
38   typedef DenseSlabAllocCache Cache;
39   typedef typename Cache::IndexT IndexT;
40 
41   static_assert((kL1Size & (kL1Size - 1)) == 0,
42                 "kL1Size must be a power-of-two");
43   static_assert((kL2Size & (kL2Size - 1)) == 0,
44                 "kL2Size must be a power-of-two");
45   static_assert((kL1Size * kL2Size) <= (1ull << (sizeof(IndexT) * 8)),
46                 "kL1Size/kL2Size are too large");
47   static_assert(((kL1Size * kL2Size - 1) & kReserved) == 0,
48                 "reserved bits don't fit");
49   static_assert(sizeof(T) > sizeof(IndexT),
50                 "it doesn't make sense to use dense alloc");
51 
52   explicit DenseSlabAlloc(LinkerInitialized, const char *name) {
53     freelist_ = 0;
54     fillpos_ = 0;
55     name_ = name;
56   }
57 
58   explicit DenseSlabAlloc(const char *name)
59       : DenseSlabAlloc(LINKER_INITIALIZED, name) {
60     // It can be very large.
61     // Don't page it in for linker initialized objects.
62     internal_memset(map_, 0, sizeof(map_));
63   }
64 
65   ~DenseSlabAlloc() {
66     for (uptr i = 0; i < kL1Size; i++) {
67       if (map_[i] != 0)
68         UnmapOrDie(map_[i], kL2Size * sizeof(T));
69     }
70   }
71 
72   IndexT Alloc(Cache *c) {
73     if (c->pos == 0)
74       Refill(c);
75     return c->cache[--c->pos];
76   }
77 
78   void Free(Cache *c, IndexT idx) {
79     DCHECK_NE(idx, 0);
80     if (c->pos == Cache::kSize)
81       Drain(c);
82     c->cache[c->pos++] = idx;
83   }
84 
85   T *Map(IndexT idx) {
86     DCHECK_NE(idx, 0);
87     DCHECK_LE(idx, kL1Size * kL2Size);
88     return &map_[idx / kL2Size][idx % kL2Size];
89   }
90 
91   void FlushCache(Cache *c) {
92     SpinMutexLock lock(&mtx_);
93     while (c->pos) {
94       IndexT idx = c->cache[--c->pos];
95       *(IndexT*)Map(idx) = freelist_;
96       freelist_ = idx;
97     }
98   }
99 
100   void InitCache(Cache *c) {
101     c->pos = 0;
102     internal_memset(c->cache, 0, sizeof(c->cache));
103   }
104 
105  private:
106   T *map_[kL1Size];
107   SpinMutex mtx_;
108   IndexT freelist_;
109   uptr fillpos_;
110   const char *name_;
111 
112   void Refill(Cache *c) {
113     SpinMutexLock lock(&mtx_);
114     if (freelist_ == 0) {
115       if (fillpos_ == kL1Size) {
116         Printf("ThreadSanitizer: %s overflow (%zu*%zu). Dying.\n",
117             name_, kL1Size, kL2Size);
118         Die();
119       }
120       VPrintf(2, "ThreadSanitizer: growing %s: %zu out of %zu*%zu\n",
121           name_, fillpos_, kL1Size, kL2Size);
122       T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), name_);
123       // Reserve 0 as invalid index.
124       IndexT start = fillpos_ == 0 ? 1 : 0;
125       for (IndexT i = start; i < kL2Size; i++) {
126         new(batch + i) T;
127         *(IndexT*)(batch + i) = i + 1 + fillpos_ * kL2Size;
128       }
129       *(IndexT*)(batch + kL2Size - 1) = 0;
130       freelist_ = fillpos_ * kL2Size + start;
131       map_[fillpos_++] = batch;
132     }
133     for (uptr i = 0; i < Cache::kSize / 2 && freelist_ != 0; i++) {
134       IndexT idx = freelist_;
135       c->cache[c->pos++] = idx;
136       freelist_ = *(IndexT*)Map(idx);
137     }
138   }
139 
140   void Drain(Cache *c) {
141     SpinMutexLock lock(&mtx_);
142     for (uptr i = 0; i < Cache::kSize / 2; i++) {
143       IndexT idx = c->cache[--c->pos];
144       *(IndexT*)Map(idx) = freelist_;
145       freelist_ = idx;
146     }
147   }
148 };
149 
150 }  // namespace __tsan
151 
152 #endif  // TSAN_DENSE_ALLOC_H
153