1 //===-- sanitizer_stack_store.cpp -------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "sanitizer_stack_store.h"
10 
11 #include "sanitizer_atomic.h"
12 #include "sanitizer_common.h"
13 #include "sanitizer_internal_defs.h"
14 #include "sanitizer_leb128.h"
15 #include "sanitizer_lzw.h"
16 #include "sanitizer_placement_new.h"
17 #include "sanitizer_stacktrace.h"
18 
19 namespace __sanitizer {
20 
21 namespace {
22 struct StackTraceHeader {
23   static constexpr u32 kStackSizeBits = 8;
24 
25   u8 size;
26   u8 tag;
StackTraceHeader__sanitizer::__anon53f89bd50111::StackTraceHeader27   explicit StackTraceHeader(const StackTrace &trace)
28       : size(Min<uptr>(trace.size, (1u << 8) - 1)), tag(trace.tag) {
29     CHECK_EQ(trace.tag, static_cast<uptr>(tag));
30   }
StackTraceHeader__sanitizer::__anon53f89bd50111::StackTraceHeader31   explicit StackTraceHeader(uptr h)
32       : size(h & ((1 << kStackSizeBits) - 1)), tag(h >> kStackSizeBits) {}
33 
ToUptr__sanitizer::__anon53f89bd50111::StackTraceHeader34   uptr ToUptr() const {
35     return static_cast<uptr>(size) | (static_cast<uptr>(tag) << kStackSizeBits);
36   }
37 };
38 }  // namespace
39 
Store(const StackTrace & trace,uptr * pack)40 StackStore::Id StackStore::Store(const StackTrace &trace, uptr *pack) {
41   if (!trace.size && !trace.tag)
42     return 0;
43   StackTraceHeader h(trace);
44   uptr idx = 0;
45   *pack = 0;
46   uptr *stack_trace = Alloc(h.size + 1, &idx, pack);
47   // No more space.
48   if (stack_trace == nullptr)
49     return 0;
50   *stack_trace = h.ToUptr();
51   internal_memcpy(stack_trace + 1, trace.trace, h.size * sizeof(uptr));
52   *pack += blocks_[GetBlockIdx(idx)].Stored(h.size + 1);
53   return OffsetToId(idx);
54 }
55 
Load(Id id)56 StackTrace StackStore::Load(Id id) {
57   if (!id)
58     return {};
59   uptr idx = IdToOffset(id);
60   uptr block_idx = GetBlockIdx(idx);
61   CHECK_LT(block_idx, ARRAY_SIZE(blocks_));
62   const uptr *stack_trace = blocks_[block_idx].GetOrUnpack(this);
63   if (!stack_trace)
64     return {};
65   stack_trace += GetInBlockIdx(idx);
66   StackTraceHeader h(*stack_trace);
67   return StackTrace(stack_trace + 1, h.size, h.tag);
68 }
69 
Allocated() const70 uptr StackStore::Allocated() const {
71   return atomic_load_relaxed(&allocated_) + sizeof(*this);
72 }
73 
Alloc(uptr count,uptr * idx,uptr * pack)74 uptr *StackStore::Alloc(uptr count, uptr *idx, uptr *pack) {
75   for (;;) {
76     // Optimisic lock-free allocation, essentially try to bump the
77     // total_frames_.
78     uptr start = atomic_fetch_add(&total_frames_, count, memory_order_relaxed);
79     uptr block_idx = GetBlockIdx(start);
80     uptr last_idx = GetBlockIdx(start + count - 1);
81     if (LIKELY(block_idx == last_idx)) {
82       // Fits into a single block.
83       // No more available blocks.  Indicate inability to allocate more memory.
84       if (block_idx >= ARRAY_SIZE(blocks_))
85         return nullptr;
86       *idx = start;
87       return blocks_[block_idx].GetOrCreate(this) + GetInBlockIdx(start);
88     }
89 
90     // Retry. We can't use range allocated in two different blocks.
91     CHECK_LE(count, kBlockSizeFrames);
92     uptr in_first = kBlockSizeFrames - GetInBlockIdx(start);
93     // Mark tail/head of these blocks as "stored".to avoid waiting before we can
94     // Pack().
95     *pack += blocks_[block_idx].Stored(in_first);
96     *pack += blocks_[last_idx].Stored(count - in_first);
97   }
98 }
99 
Map(uptr size,const char * mem_type)100 void *StackStore::Map(uptr size, const char *mem_type) {
101   atomic_fetch_add(&allocated_, size, memory_order_relaxed);
102   return MmapNoReserveOrDie(size, mem_type);
103 }
104 
Unmap(void * addr,uptr size)105 void StackStore::Unmap(void *addr, uptr size) {
106   atomic_fetch_sub(&allocated_, size, memory_order_relaxed);
107   UnmapOrDie(addr, size);
108 }
109 
Pack(Compression type)110 uptr StackStore::Pack(Compression type) {
111   uptr res = 0;
112   for (BlockInfo &b : blocks_) res += b.Pack(type, this);
113   return res;
114 }
115 
LockAll()116 void StackStore::LockAll() {
117   for (BlockInfo &b : blocks_) b.Lock();
118 }
119 
UnlockAll()120 void StackStore::UnlockAll() {
121   for (BlockInfo &b : blocks_) b.Unlock();
122 }
123 
TestOnlyUnmap()124 void StackStore::TestOnlyUnmap() {
125   for (BlockInfo &b : blocks_) b.TestOnlyUnmap(this);
126   internal_memset(this, 0, sizeof(*this));
127 }
128 
Get() const129 uptr *StackStore::BlockInfo::Get() const {
130   // Idiomatic double-checked locking uses memory_order_acquire here. But
131   // relaxed is fine for us, justification is similar to
132   // TwoLevelMap::GetOrCreate.
133   return reinterpret_cast<uptr *>(atomic_load_relaxed(&data_));
134 }
135 
Create(StackStore * store)136 uptr *StackStore::BlockInfo::Create(StackStore *store) {
137   SpinMutexLock l(&mtx_);
138   uptr *ptr = Get();
139   if (!ptr) {
140     ptr = reinterpret_cast<uptr *>(store->Map(kBlockSizeBytes, "StackStore"));
141     atomic_store(&data_, reinterpret_cast<uptr>(ptr), memory_order_release);
142   }
143   return ptr;
144 }
145 
GetOrCreate(StackStore * store)146 uptr *StackStore::BlockInfo::GetOrCreate(StackStore *store) {
147   uptr *ptr = Get();
148   if (LIKELY(ptr))
149     return ptr;
150   return Create(store);
151 }
152 
153 class SLeb128Encoder {
154  public:
SLeb128Encoder(u8 * begin,u8 * end)155   SLeb128Encoder(u8 *begin, u8 *end) : begin(begin), end(end) {}
156 
operator ==(const SLeb128Encoder & other) const157   bool operator==(const SLeb128Encoder &other) const {
158     return begin == other.begin;
159   }
160 
operator !=(const SLeb128Encoder & other) const161   bool operator!=(const SLeb128Encoder &other) const {
162     return begin != other.begin;
163   }
164 
operator =(uptr v)165   SLeb128Encoder &operator=(uptr v) {
166     sptr diff = v - previous;
167     begin = EncodeSLEB128(diff, begin, end);
168     previous = v;
169     return *this;
170   }
operator *()171   SLeb128Encoder &operator*() { return *this; }
operator ++()172   SLeb128Encoder &operator++() { return *this; }
173 
base() const174   u8 *base() const { return begin; }
175 
176  private:
177   u8 *begin;
178   u8 *end;
179   uptr previous = 0;
180 };
181 
182 class SLeb128Decoder {
183  public:
SLeb128Decoder(const u8 * begin,const u8 * end)184   SLeb128Decoder(const u8 *begin, const u8 *end) : begin(begin), end(end) {}
185 
operator ==(const SLeb128Decoder & other) const186   bool operator==(const SLeb128Decoder &other) const {
187     return begin == other.begin;
188   }
189 
operator !=(const SLeb128Decoder & other) const190   bool operator!=(const SLeb128Decoder &other) const {
191     return begin != other.begin;
192   }
193 
operator *()194   uptr operator*() {
195     sptr diff;
196     begin = DecodeSLEB128(begin, end, &diff);
197     previous += diff;
198     return previous;
199   }
operator ++()200   SLeb128Decoder &operator++() { return *this; }
201 
operator ++(int)202   SLeb128Decoder operator++(int) { return *this; }
203 
204  private:
205   const u8 *begin;
206   const u8 *end;
207   uptr previous = 0;
208 };
209 
CompressDelta(const uptr * from,const uptr * from_end,u8 * to,u8 * to_end)210 static u8 *CompressDelta(const uptr *from, const uptr *from_end, u8 *to,
211                          u8 *to_end) {
212   SLeb128Encoder encoder(to, to_end);
213   for (; from != from_end; ++from, ++encoder) *encoder = *from;
214   return encoder.base();
215 }
216 
UncompressDelta(const u8 * from,const u8 * from_end,uptr * to,uptr * to_end)217 static uptr *UncompressDelta(const u8 *from, const u8 *from_end, uptr *to,
218                              uptr *to_end) {
219   SLeb128Decoder decoder(from, from_end);
220   SLeb128Decoder end(from_end, from_end);
221   for (; decoder != end; ++to, ++decoder) *to = *decoder;
222   CHECK_EQ(to, to_end);
223   return to;
224 }
225 
CompressLzw(const uptr * from,const uptr * from_end,u8 * to,u8 * to_end)226 static u8 *CompressLzw(const uptr *from, const uptr *from_end, u8 *to,
227                        u8 *to_end) {
228   SLeb128Encoder encoder(to, to_end);
229   encoder = LzwEncode<uptr>(from, from_end, encoder);
230   return encoder.base();
231 }
232 
UncompressLzw(const u8 * from,const u8 * from_end,uptr * to,uptr * to_end)233 static uptr *UncompressLzw(const u8 *from, const u8 *from_end, uptr *to,
234                            uptr *to_end) {
235   SLeb128Decoder decoder(from, from_end);
236   SLeb128Decoder end(from_end, from_end);
237   to = LzwDecode<uptr>(decoder, end, to);
238   CHECK_EQ(to, to_end);
239   return to;
240 }
241 
242 #if defined(_MSC_VER) && !defined(__clang__)
243 #  pragma warning(push)
244 // Disable 'nonstandard extension used: zero-sized array in struct/union'.
245 #  pragma warning(disable : 4200)
246 #endif
247 namespace {
248 struct PackedHeader {
249   uptr size;
250   StackStore::Compression type;
251   u8 data[];
252 };
253 }  // namespace
254 #if defined(_MSC_VER) && !defined(__clang__)
255 #  pragma warning(pop)
256 #endif
257 
GetOrUnpack(StackStore * store)258 uptr *StackStore::BlockInfo::GetOrUnpack(StackStore *store) {
259   SpinMutexLock l(&mtx_);
260   switch (state) {
261     case State::Storing:
262       state = State::Unpacked;
263       FALLTHROUGH;
264     case State::Unpacked:
265       return Get();
266     case State::Packed:
267       break;
268   }
269 
270   u8 *ptr = reinterpret_cast<u8 *>(Get());
271   CHECK_NE(nullptr, ptr);
272   const PackedHeader *header = reinterpret_cast<const PackedHeader *>(ptr);
273   CHECK_LE(header->size, kBlockSizeBytes);
274   CHECK_GE(header->size, sizeof(PackedHeader));
275 
276   uptr packed_size_aligned = RoundUpTo(header->size, GetPageSizeCached());
277 
278   uptr *unpacked =
279       reinterpret_cast<uptr *>(store->Map(kBlockSizeBytes, "StackStoreUnpack"));
280 
281   uptr *unpacked_end;
282   switch (header->type) {
283     case Compression::Delta:
284       unpacked_end = UncompressDelta(header->data, ptr + header->size, unpacked,
285                                      unpacked + kBlockSizeFrames);
286       break;
287     case Compression::LZW:
288       unpacked_end = UncompressLzw(header->data, ptr + header->size, unpacked,
289                                    unpacked + kBlockSizeFrames);
290       break;
291     default:
292       UNREACHABLE("Unexpected type");
293       break;
294   }
295 
296   CHECK_EQ(kBlockSizeFrames, unpacked_end - unpacked);
297 
298   MprotectReadOnly(reinterpret_cast<uptr>(unpacked), kBlockSizeBytes);
299   atomic_store(&data_, reinterpret_cast<uptr>(unpacked), memory_order_release);
300   store->Unmap(ptr, packed_size_aligned);
301 
302   state = State::Unpacked;
303   return Get();
304 }
305 
Pack(Compression type,StackStore * store)306 uptr StackStore::BlockInfo::Pack(Compression type, StackStore *store) {
307   if (type == Compression::None)
308     return 0;
309 
310   SpinMutexLock l(&mtx_);
311   switch (state) {
312     case State::Unpacked:
313     case State::Packed:
314       return 0;
315     case State::Storing:
316       break;
317   }
318 
319   uptr *ptr = Get();
320   if (!ptr || !Stored(0))
321     return 0;
322 
323   u8 *packed =
324       reinterpret_cast<u8 *>(store->Map(kBlockSizeBytes, "StackStorePack"));
325   PackedHeader *header = reinterpret_cast<PackedHeader *>(packed);
326   u8 *alloc_end = packed + kBlockSizeBytes;
327 
328   u8 *packed_end = nullptr;
329   switch (type) {
330     case Compression::Delta:
331       packed_end =
332           CompressDelta(ptr, ptr + kBlockSizeFrames, header->data, alloc_end);
333       break;
334     case Compression::LZW:
335       packed_end =
336           CompressLzw(ptr, ptr + kBlockSizeFrames, header->data, alloc_end);
337       break;
338     default:
339       UNREACHABLE("Unexpected type");
340       break;
341   }
342 
343   header->type = type;
344   header->size = packed_end - packed;
345 
346   VPrintf(1, "Packed block of %zu KiB to %zu KiB\n", kBlockSizeBytes >> 10,
347           header->size >> 10);
348 
349   if (kBlockSizeBytes - header->size < kBlockSizeBytes / 8) {
350     VPrintf(1, "Undo and keep block unpacked\n");
351     MprotectReadOnly(reinterpret_cast<uptr>(ptr), kBlockSizeBytes);
352     store->Unmap(packed, kBlockSizeBytes);
353     state = State::Unpacked;
354     return 0;
355   }
356 
357   uptr packed_size_aligned = RoundUpTo(header->size, GetPageSizeCached());
358   store->Unmap(packed + packed_size_aligned,
359                kBlockSizeBytes - packed_size_aligned);
360   MprotectReadOnly(reinterpret_cast<uptr>(packed), packed_size_aligned);
361 
362   atomic_store(&data_, reinterpret_cast<uptr>(packed), memory_order_release);
363   store->Unmap(ptr, kBlockSizeBytes);
364 
365   state = State::Packed;
366   return kBlockSizeBytes - packed_size_aligned;
367 }
368 
TestOnlyUnmap(StackStore * store)369 void StackStore::BlockInfo::TestOnlyUnmap(StackStore *store) {
370   if (uptr *ptr = Get())
371     store->Unmap(ptr, kBlockSizeBytes);
372 }
373 
Stored(uptr n)374 bool StackStore::BlockInfo::Stored(uptr n) {
375   return n + atomic_fetch_add(&stored_, n, memory_order_release) ==
376          kBlockSizeFrames;
377 }
378 
IsPacked() const379 bool StackStore::BlockInfo::IsPacked() const {
380   SpinMutexLock l(&mtx_);
381   return state == State::Packed;
382 }
383 
384 }  // namespace __sanitizer
385