1 //===-- sanitizer_stack_store.cpp -------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "sanitizer_stack_store.h"
10 
11 #include "sanitizer_atomic.h"
12 #include "sanitizer_common.h"
13 #include "sanitizer_internal_defs.h"
14 #include "sanitizer_leb128.h"
15 #include "sanitizer_lzw.h"
16 #include "sanitizer_placement_new.h"
17 #include "sanitizer_stacktrace.h"
18 
19 namespace __sanitizer {
20 
21 namespace {
22 struct StackTraceHeader {
23   static constexpr u32 kStackSizeBits = 8;
24 
25   u8 size;
26   u8 tag;
27   explicit StackTraceHeader(const StackTrace &trace)
28       : size(Min<uptr>(trace.size, (1u << 8) - 1)), tag(trace.tag) {
29     CHECK_EQ(trace.tag, static_cast<uptr>(tag));
30   }
31   explicit StackTraceHeader(uptr h)
32       : size(h & ((1 << kStackSizeBits) - 1)), tag(h >> kStackSizeBits) {}
33 
34   uptr ToUptr() const {
35     return static_cast<uptr>(size) | (static_cast<uptr>(tag) << kStackSizeBits);
36   }
37 };
38 }  // namespace
39 
40 StackStore::Id StackStore::Store(const StackTrace &trace, uptr *pack) {
41   if (!trace.size && !trace.tag)
42     return 0;
43   StackTraceHeader h(trace);
44   uptr idx = 0;
45   *pack = 0;
46   uptr *stack_trace = Alloc(h.size + 1, &idx, pack);
47   *stack_trace = h.ToUptr();
48   internal_memcpy(stack_trace + 1, trace.trace, h.size * sizeof(uptr));
49   *pack += blocks_[GetBlockIdx(idx)].Stored(h.size + 1);
50   return OffsetToId(idx);
51 }
52 
53 StackTrace StackStore::Load(Id id) {
54   if (!id)
55     return {};
56   uptr idx = IdToOffset(id);
57   uptr block_idx = GetBlockIdx(idx);
58   CHECK_LT(block_idx, ARRAY_SIZE(blocks_));
59   const uptr *stack_trace = blocks_[block_idx].GetOrUnpack(this);
60   if (!stack_trace)
61     return {};
62   stack_trace += GetInBlockIdx(idx);
63   StackTraceHeader h(*stack_trace);
64   return StackTrace(stack_trace + 1, h.size, h.tag);
65 }
66 
67 uptr StackStore::Allocated() const {
68   return atomic_load_relaxed(&allocated_) + sizeof(*this);
69 }
70 
71 uptr *StackStore::Alloc(uptr count, uptr *idx, uptr *pack) {
72   for (;;) {
73     // Optimisic lock-free allocation, essentially try to bump the
74     // total_frames_.
75     uptr start = atomic_fetch_add(&total_frames_, count, memory_order_relaxed);
76     uptr block_idx = GetBlockIdx(start);
77     uptr last_idx = GetBlockIdx(start + count - 1);
78     if (LIKELY(block_idx == last_idx)) {
79       // Fits into the a single block.
80       CHECK_LT(block_idx, ARRAY_SIZE(blocks_));
81       *idx = start;
82       return blocks_[block_idx].GetOrCreate(this) + GetInBlockIdx(start);
83     }
84 
85     // Retry. We can't use range allocated in two different blocks.
86     CHECK_LE(count, kBlockSizeFrames);
87     uptr in_first = kBlockSizeFrames - GetInBlockIdx(start);
88     // Mark tail/head of these blocks as "stored".to avoid waiting before we can
89     // Pack().
90     *pack += blocks_[block_idx].Stored(in_first);
91     *pack += blocks_[last_idx].Stored(count - in_first);
92   }
93 }
94 
95 void *StackStore::Map(uptr size, const char *mem_type) {
96   atomic_fetch_add(&allocated_, size, memory_order_relaxed);
97   return MmapNoReserveOrDie(size, mem_type);
98 }
99 
100 void StackStore::Unmap(void *addr, uptr size) {
101   atomic_fetch_sub(&allocated_, size, memory_order_relaxed);
102   UnmapOrDie(addr, size);
103 }
104 
105 uptr StackStore::Pack(Compression type) {
106   uptr res = 0;
107   for (BlockInfo &b : blocks_) res += b.Pack(type, this);
108   return res;
109 }
110 
111 void StackStore::LockAll() {
112   for (BlockInfo &b : blocks_) b.Lock();
113 }
114 
115 void StackStore::UnlockAll() {
116   for (BlockInfo &b : blocks_) b.Unlock();
117 }
118 
119 void StackStore::TestOnlyUnmap() {
120   for (BlockInfo &b : blocks_) b.TestOnlyUnmap(this);
121   internal_memset(this, 0, sizeof(*this));
122 }
123 
124 uptr *StackStore::BlockInfo::Get() const {
125   // Idiomatic double-checked locking uses memory_order_acquire here. But
126   // relaxed is fine for us, justification is similar to
127   // TwoLevelMap::GetOrCreate.
128   return reinterpret_cast<uptr *>(atomic_load_relaxed(&data_));
129 }
130 
131 uptr *StackStore::BlockInfo::Create(StackStore *store) {
132   SpinMutexLock l(&mtx_);
133   uptr *ptr = Get();
134   if (!ptr) {
135     ptr = reinterpret_cast<uptr *>(store->Map(kBlockSizeBytes, "StackStore"));
136     atomic_store(&data_, reinterpret_cast<uptr>(ptr), memory_order_release);
137   }
138   return ptr;
139 }
140 
141 uptr *StackStore::BlockInfo::GetOrCreate(StackStore *store) {
142   uptr *ptr = Get();
143   if (LIKELY(ptr))
144     return ptr;
145   return Create(store);
146 }
147 
148 class SLeb128Encoder {
149  public:
150   SLeb128Encoder(u8 *begin, u8 *end) : begin(begin), end(end) {}
151 
152   bool operator==(const SLeb128Encoder &other) const {
153     return begin == other.begin;
154   }
155 
156   bool operator!=(const SLeb128Encoder &other) const {
157     return begin != other.begin;
158   }
159 
160   SLeb128Encoder &operator=(uptr v) {
161     sptr diff = v - previous;
162     begin = EncodeSLEB128(diff, begin, end);
163     previous = v;
164     return *this;
165   }
166   SLeb128Encoder &operator*() { return *this; }
167   SLeb128Encoder &operator++() { return *this; }
168 
169   u8 *base() const { return begin; }
170 
171  private:
172   u8 *begin;
173   u8 *end;
174   uptr previous = 0;
175 };
176 
177 class SLeb128Decoder {
178  public:
179   SLeb128Decoder(const u8 *begin, const u8 *end) : begin(begin), end(end) {}
180 
181   bool operator==(const SLeb128Decoder &other) const {
182     return begin == other.begin;
183   }
184 
185   bool operator!=(const SLeb128Decoder &other) const {
186     return begin != other.begin;
187   }
188 
189   uptr operator*() {
190     sptr diff;
191     begin = DecodeSLEB128(begin, end, &diff);
192     previous += diff;
193     return previous;
194   }
195   SLeb128Decoder &operator++() { return *this; }
196 
197   SLeb128Decoder operator++(int) { return *this; }
198 
199  private:
200   const u8 *begin;
201   const u8 *end;
202   uptr previous = 0;
203 };
204 
205 static u8 *CompressDelta(const uptr *from, const uptr *from_end, u8 *to,
206                          u8 *to_end) {
207   SLeb128Encoder encoder(to, to_end);
208   for (; from != from_end; ++from, ++encoder) *encoder = *from;
209   return encoder.base();
210 }
211 
212 static uptr *UncompressDelta(const u8 *from, const u8 *from_end, uptr *to,
213                              uptr *to_end) {
214   SLeb128Decoder decoder(from, from_end);
215   SLeb128Decoder end(from_end, from_end);
216   for (; decoder != end; ++to, ++decoder) *to = *decoder;
217   CHECK_EQ(to, to_end);
218   return to;
219 }
220 
221 static u8 *CompressLzw(const uptr *from, const uptr *from_end, u8 *to,
222                        u8 *to_end) {
223   SLeb128Encoder encoder(to, to_end);
224   encoder = LzwEncode<uptr>(from, from_end, encoder);
225   return encoder.base();
226 }
227 
228 static uptr *UncompressLzw(const u8 *from, const u8 *from_end, uptr *to,
229                            uptr *to_end) {
230   SLeb128Decoder decoder(from, from_end);
231   SLeb128Decoder end(from_end, from_end);
232   to = LzwDecode<uptr>(decoder, end, to);
233   CHECK_EQ(to, to_end);
234   return to;
235 }
236 
237 #if defined(_MSC_VER) && !defined(__clang__)
238 #  pragma warning(push)
239 // Disable 'nonstandard extension used: zero-sized array in struct/union'.
240 #  pragma warning(disable : 4200)
241 #endif
242 namespace {
243 struct PackedHeader {
244   uptr size;
245   StackStore::Compression type;
246   u8 data[];
247 };
248 }  // namespace
249 #if defined(_MSC_VER) && !defined(__clang__)
250 #  pragma warning(pop)
251 #endif
252 
253 uptr *StackStore::BlockInfo::GetOrUnpack(StackStore *store) {
254   SpinMutexLock l(&mtx_);
255   switch (state) {
256     case State::Storing:
257       state = State::Unpacked;
258       FALLTHROUGH;
259     case State::Unpacked:
260       return Get();
261     case State::Packed:
262       break;
263   }
264 
265   u8 *ptr = reinterpret_cast<u8 *>(Get());
266   CHECK_NE(nullptr, ptr);
267   const PackedHeader *header = reinterpret_cast<const PackedHeader *>(ptr);
268   CHECK_LE(header->size, kBlockSizeBytes);
269   CHECK_GE(header->size, sizeof(PackedHeader));
270 
271   uptr packed_size_aligned = RoundUpTo(header->size, GetPageSizeCached());
272 
273   uptr *unpacked =
274       reinterpret_cast<uptr *>(store->Map(kBlockSizeBytes, "StackStoreUnpack"));
275 
276   uptr *unpacked_end;
277   switch (header->type) {
278     case Compression::Delta:
279       unpacked_end = UncompressDelta(header->data, ptr + header->size, unpacked,
280                                      unpacked + kBlockSizeFrames);
281       break;
282     case Compression::LZW:
283       unpacked_end = UncompressLzw(header->data, ptr + header->size, unpacked,
284                                    unpacked + kBlockSizeFrames);
285       break;
286     default:
287       UNREACHABLE("Unexpected type");
288       break;
289   }
290 
291   CHECK_EQ(kBlockSizeFrames, unpacked_end - unpacked);
292 
293   MprotectReadOnly(reinterpret_cast<uptr>(unpacked), kBlockSizeBytes);
294   atomic_store(&data_, reinterpret_cast<uptr>(unpacked), memory_order_release);
295   store->Unmap(ptr, packed_size_aligned);
296 
297   state = State::Unpacked;
298   return Get();
299 }
300 
301 uptr StackStore::BlockInfo::Pack(Compression type, StackStore *store) {
302   if (type == Compression::None)
303     return 0;
304 
305   SpinMutexLock l(&mtx_);
306   switch (state) {
307     case State::Unpacked:
308     case State::Packed:
309       return 0;
310     case State::Storing:
311       break;
312   }
313 
314   uptr *ptr = Get();
315   if (!ptr || !Stored(0))
316     return 0;
317 
318   u8 *packed =
319       reinterpret_cast<u8 *>(store->Map(kBlockSizeBytes, "StackStorePack"));
320   PackedHeader *header = reinterpret_cast<PackedHeader *>(packed);
321   u8 *alloc_end = packed + kBlockSizeBytes;
322 
323   u8 *packed_end = nullptr;
324   switch (type) {
325     case Compression::Delta:
326       packed_end =
327           CompressDelta(ptr, ptr + kBlockSizeFrames, header->data, alloc_end);
328       break;
329     case Compression::LZW:
330       packed_end =
331           CompressLzw(ptr, ptr + kBlockSizeFrames, header->data, alloc_end);
332       break;
333     default:
334       UNREACHABLE("Unexpected type");
335       break;
336   }
337 
338   header->type = type;
339   header->size = packed_end - packed;
340 
341   VPrintf(1, "Packed block of %zu KiB to %zu KiB\n", kBlockSizeBytes >> 10,
342           header->size >> 10);
343 
344   if (kBlockSizeBytes - header->size < kBlockSizeBytes / 8) {
345     VPrintf(1, "Undo and keep block unpacked\n");
346     MprotectReadOnly(reinterpret_cast<uptr>(ptr), kBlockSizeBytes);
347     store->Unmap(packed, kBlockSizeBytes);
348     state = State::Unpacked;
349     return 0;
350   }
351 
352   uptr packed_size_aligned = RoundUpTo(header->size, GetPageSizeCached());
353   store->Unmap(packed + packed_size_aligned,
354                kBlockSizeBytes - packed_size_aligned);
355   MprotectReadOnly(reinterpret_cast<uptr>(packed), packed_size_aligned);
356 
357   atomic_store(&data_, reinterpret_cast<uptr>(packed), memory_order_release);
358   store->Unmap(ptr, kBlockSizeBytes);
359 
360   state = State::Packed;
361   return kBlockSizeBytes - packed_size_aligned;
362 }
363 
364 void StackStore::BlockInfo::TestOnlyUnmap(StackStore *store) {
365   if (uptr *ptr = Get())
366     store->Unmap(ptr, kBlockSizeBytes);
367 }
368 
369 bool StackStore::BlockInfo::Stored(uptr n) {
370   return n + atomic_fetch_add(&stored_, n, memory_order_release) ==
371          kBlockSizeFrames;
372 }
373 
374 bool StackStore::BlockInfo::IsPacked() const {
375   SpinMutexLock l(&mtx_);
376   return state == State::Packed;
377 }
378 
379 }  // namespace __sanitizer
380