1 //===-- asan_fake_stack.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of AddressSanitizer, an address sanity checker.
10 //
11 // FakeStack is used to detect use-after-return bugs.
12 //===----------------------------------------------------------------------===//
13
14 #include "asan_allocator.h"
15 #include "asan_poisoning.h"
16 #include "asan_thread.h"
17
18 namespace __asan {
19
20 static const u64 kMagic1 = kAsanStackAfterReturnMagic;
21 static const u64 kMagic2 = (kMagic1 << 8) | kMagic1;
22 static const u64 kMagic4 = (kMagic2 << 16) | kMagic2;
23 static const u64 kMagic8 = (kMagic4 << 32) | kMagic4;
24
25 static const u64 kAllocaRedzoneSize = 32UL;
26 static const u64 kAllocaRedzoneMask = 31UL;
27
28 // For small size classes inline PoisonShadow for better performance.
SetShadow(uptr ptr,uptr size,uptr class_id,u64 magic)29 ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) {
30 u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr));
31 if (SHADOW_SCALE == 3 && class_id <= 6) {
32 // This code expects SHADOW_SCALE=3.
33 for (uptr i = 0; i < (((uptr)1) << class_id); i++) {
34 shadow[i] = magic;
35 // Make sure this does not become memset.
36 SanitizerBreakOptimization(nullptr);
37 }
38 } else {
39 // The size class is too big, it's cheaper to poison only size bytes.
40 PoisonShadow(ptr, size, static_cast<u8>(magic));
41 }
42 }
43
Create(uptr stack_size_log)44 FakeStack *FakeStack::Create(uptr stack_size_log) {
45 static uptr kMinStackSizeLog = 16;
46 static uptr kMaxStackSizeLog = FIRST_32_SECOND_64(24, 28);
47 if (stack_size_log < kMinStackSizeLog)
48 stack_size_log = kMinStackSizeLog;
49 if (stack_size_log > kMaxStackSizeLog)
50 stack_size_log = kMaxStackSizeLog;
51 uptr size = RequiredSize(stack_size_log);
52 FakeStack *res = reinterpret_cast<FakeStack *>(
53 flags()->uar_noreserve ? MmapNoReserveOrDie(size, "FakeStack")
54 : MmapOrDie(size, "FakeStack"));
55 res->stack_size_log_ = stack_size_log;
56 u8 *p = reinterpret_cast<u8 *>(res);
57 VReport(1, "T%d: FakeStack created: %p -- %p stack_size_log: %zd; "
58 "mmapped %zdK, noreserve=%d \n",
59 GetCurrentTidOrInvalid(), p,
60 p + FakeStack::RequiredSize(stack_size_log), stack_size_log,
61 size >> 10, flags()->uar_noreserve);
62 return res;
63 }
64
Destroy(int tid)65 void FakeStack::Destroy(int tid) {
66 PoisonAll(0);
67 if (Verbosity() >= 2) {
68 InternalScopedString str(kNumberOfSizeClasses * 50);
69 for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++)
70 str.append("%zd: %zd/%zd; ", class_id, hint_position_[class_id],
71 NumberOfFrames(stack_size_log(), class_id));
72 Report("T%d: FakeStack destroyed: %s\n", tid, str.data());
73 }
74 uptr size = RequiredSize(stack_size_log_);
75 FlushUnneededASanShadowMemory(reinterpret_cast<uptr>(this), size);
76 UnmapOrDie(this, size);
77 }
78
PoisonAll(u8 magic)79 void FakeStack::PoisonAll(u8 magic) {
80 PoisonShadow(reinterpret_cast<uptr>(this), RequiredSize(stack_size_log()),
81 magic);
82 }
83
84 #if !defined(_MSC_VER) || defined(__clang__)
85 ALWAYS_INLINE USED
86 #endif
Allocate(uptr stack_size_log,uptr class_id,uptr real_stack)87 FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id,
88 uptr real_stack) {
89 CHECK_LT(class_id, kNumberOfSizeClasses);
90 if (needs_gc_)
91 GC(real_stack);
92 uptr &hint_position = hint_position_[class_id];
93 const int num_iter = NumberOfFrames(stack_size_log, class_id);
94 u8 *flags = GetFlags(stack_size_log, class_id);
95 for (int i = 0; i < num_iter; i++) {
96 uptr pos = ModuloNumberOfFrames(stack_size_log, class_id, hint_position++);
97 // This part is tricky. On one hand, checking and setting flags[pos]
98 // should be atomic to ensure async-signal safety. But on the other hand,
99 // if the signal arrives between checking and setting flags[pos], the
100 // signal handler's fake stack will start from a different hint_position
101 // and so will not touch this particular byte. So, it is safe to do this
102 // with regular non-atomic load and store (at least I was not able to make
103 // this code crash).
104 if (flags[pos]) continue;
105 flags[pos] = 1;
106 FakeFrame *res = reinterpret_cast<FakeFrame *>(
107 GetFrame(stack_size_log, class_id, pos));
108 res->real_stack = real_stack;
109 *SavedFlagPtr(reinterpret_cast<uptr>(res), class_id) = &flags[pos];
110 return res;
111 }
112 return nullptr; // We are out of fake stack.
113 }
114
AddrIsInFakeStack(uptr ptr,uptr * frame_beg,uptr * frame_end)115 uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) {
116 uptr stack_size_log = this->stack_size_log();
117 uptr beg = reinterpret_cast<uptr>(GetFrame(stack_size_log, 0, 0));
118 uptr end = reinterpret_cast<uptr>(this) + RequiredSize(stack_size_log);
119 if (ptr < beg || ptr >= end) return 0;
120 uptr class_id = (ptr - beg) >> stack_size_log;
121 uptr base = beg + (class_id << stack_size_log);
122 CHECK_LE(base, ptr);
123 CHECK_LT(ptr, base + (((uptr)1) << stack_size_log));
124 uptr pos = (ptr - base) >> (kMinStackFrameSizeLog + class_id);
125 uptr res = base + pos * BytesInSizeClass(class_id);
126 *frame_end = res + BytesInSizeClass(class_id);
127 *frame_beg = res + sizeof(FakeFrame);
128 return res;
129 }
130
HandleNoReturn()131 void FakeStack::HandleNoReturn() {
132 needs_gc_ = true;
133 }
134
135 // When throw, longjmp or some such happens we don't call OnFree() and
136 // as the result may leak one or more fake frames, but the good news is that
137 // we are notified about all such events by HandleNoReturn().
138 // If we recently had such no-return event we need to collect garbage frames.
139 // We do it based on their 'real_stack' values -- everything that is lower
140 // than the current real_stack is garbage.
GC(uptr real_stack)141 NOINLINE void FakeStack::GC(uptr real_stack) {
142 uptr collected = 0;
143 for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) {
144 u8 *flags = GetFlags(stack_size_log(), class_id);
145 for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n;
146 i++) {
147 if (flags[i] == 0) continue; // not allocated.
148 FakeFrame *ff = reinterpret_cast<FakeFrame *>(
149 GetFrame(stack_size_log(), class_id, i));
150 if (ff->real_stack < real_stack) {
151 flags[i] = 0;
152 collected++;
153 }
154 }
155 }
156 needs_gc_ = false;
157 }
158
ForEachFakeFrame(RangeIteratorCallback callback,void * arg)159 void FakeStack::ForEachFakeFrame(RangeIteratorCallback callback, void *arg) {
160 for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) {
161 u8 *flags = GetFlags(stack_size_log(), class_id);
162 for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n;
163 i++) {
164 if (flags[i] == 0) continue; // not allocated.
165 FakeFrame *ff = reinterpret_cast<FakeFrame *>(
166 GetFrame(stack_size_log(), class_id, i));
167 uptr begin = reinterpret_cast<uptr>(ff);
168 callback(begin, begin + FakeStack::BytesInSizeClass(class_id), arg);
169 }
170 }
171 }
172
173 #if (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA
174 static THREADLOCAL FakeStack *fake_stack_tls;
175
GetTLSFakeStack()176 FakeStack *GetTLSFakeStack() {
177 return fake_stack_tls;
178 }
SetTLSFakeStack(FakeStack * fs)179 void SetTLSFakeStack(FakeStack *fs) {
180 fake_stack_tls = fs;
181 }
182 #else
GetTLSFakeStack()183 FakeStack *GetTLSFakeStack() { return 0; }
SetTLSFakeStack(FakeStack * fs)184 void SetTLSFakeStack(FakeStack *fs) { }
185 #endif // (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA
186
GetFakeStack()187 static FakeStack *GetFakeStack() {
188 AsanThread *t = GetCurrentThread();
189 if (!t) return nullptr;
190 return t->fake_stack();
191 }
192
GetFakeStackFast()193 static FakeStack *GetFakeStackFast() {
194 if (FakeStack *fs = GetTLSFakeStack())
195 return fs;
196 if (!__asan_option_detect_stack_use_after_return)
197 return nullptr;
198 return GetFakeStack();
199 }
200
OnMalloc(uptr class_id,uptr size)201 ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) {
202 FakeStack *fs = GetFakeStackFast();
203 if (!fs) return 0;
204 uptr local_stack;
205 uptr real_stack = reinterpret_cast<uptr>(&local_stack);
206 FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack);
207 if (!ff) return 0; // Out of fake stack.
208 uptr ptr = reinterpret_cast<uptr>(ff);
209 SetShadow(ptr, size, class_id, 0);
210 return ptr;
211 }
212
OnFree(uptr ptr,uptr class_id,uptr size)213 ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size) {
214 FakeStack::Deallocate(ptr, class_id);
215 SetShadow(ptr, size, class_id, kMagic8);
216 }
217
218 } // namespace __asan
219
220 // ---------------------- Interface ---------------- {{{1
221 using namespace __asan;
222 #define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \
223 extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \
224 __asan_stack_malloc_##class_id(uptr size) { \
225 return OnMalloc(class_id, size); \
226 } \
227 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \
228 uptr ptr, uptr size) { \
229 OnFree(ptr, class_id, size); \
230 }
231
232 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(0)
233 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(1)
234 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(2)
235 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(3)
236 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(4)
237 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(5)
238 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(6)
239 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(7)
240 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(8)
241 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(9)
242 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(10)
243 extern "C" {
244 SANITIZER_INTERFACE_ATTRIBUTE
__asan_get_current_fake_stack()245 void *__asan_get_current_fake_stack() { return GetFakeStackFast(); }
246
247 SANITIZER_INTERFACE_ATTRIBUTE
__asan_addr_is_in_fake_stack(void * fake_stack,void * addr,void ** beg,void ** end)248 void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
249 void **end) {
250 FakeStack *fs = reinterpret_cast<FakeStack*>(fake_stack);
251 if (!fs) return nullptr;
252 uptr frame_beg, frame_end;
253 FakeFrame *frame = reinterpret_cast<FakeFrame *>(fs->AddrIsInFakeStack(
254 reinterpret_cast<uptr>(addr), &frame_beg, &frame_end));
255 if (!frame) return nullptr;
256 if (frame->magic != kCurrentStackFrameMagic)
257 return nullptr;
258 if (beg) *beg = reinterpret_cast<void*>(frame_beg);
259 if (end) *end = reinterpret_cast<void*>(frame_end);
260 return reinterpret_cast<void*>(frame->real_stack);
261 }
262
263 SANITIZER_INTERFACE_ATTRIBUTE
__asan_alloca_poison(uptr addr,uptr size)264 void __asan_alloca_poison(uptr addr, uptr size) {
265 uptr LeftRedzoneAddr = addr - kAllocaRedzoneSize;
266 uptr PartialRzAddr = addr + size;
267 uptr RightRzAddr = (PartialRzAddr + kAllocaRedzoneMask) & ~kAllocaRedzoneMask;
268 uptr PartialRzAligned = PartialRzAddr & ~(SHADOW_GRANULARITY - 1);
269 FastPoisonShadow(LeftRedzoneAddr, kAllocaRedzoneSize, kAsanAllocaLeftMagic);
270 FastPoisonShadowPartialRightRedzone(
271 PartialRzAligned, PartialRzAddr % SHADOW_GRANULARITY,
272 RightRzAddr - PartialRzAligned, kAsanAllocaRightMagic);
273 FastPoisonShadow(RightRzAddr, kAllocaRedzoneSize, kAsanAllocaRightMagic);
274 }
275
276 SANITIZER_INTERFACE_ATTRIBUTE
__asan_allocas_unpoison(uptr top,uptr bottom)277 void __asan_allocas_unpoison(uptr top, uptr bottom) {
278 if ((!top) || (top > bottom)) return;
279 REAL(memset)(reinterpret_cast<void*>(MemToShadow(top)), 0,
280 (bottom - top) / SHADOW_GRANULARITY);
281 }
282 } // extern "C"
283