1 //===-- asan_fake_stack.cc ------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of AddressSanitizer, an address sanity checker.
9 //
10 // FakeStack is used to detect use-after-return bugs.
11 //===----------------------------------------------------------------------===//
12
13 #include "asan_allocator.h"
14 #include "asan_poisoning.h"
15 #include "asan_thread.h"
16
17 namespace __asan {
18
19 static const u64 kMagic1 = kAsanStackAfterReturnMagic;
20 static const u64 kMagic2 = (kMagic1 << 8) | kMagic1;
21 static const u64 kMagic4 = (kMagic2 << 16) | kMagic2;
22 static const u64 kMagic8 = (kMagic4 << 32) | kMagic4;
23
24 static const u64 kAllocaRedzoneSize = 32UL;
25 static const u64 kAllocaRedzoneMask = 31UL;
26
27 // For small size classes inline PoisonShadow for better performance.
SetShadow(uptr ptr,uptr size,uptr class_id,u64 magic)28 ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) {
29 u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr));
30 if (SHADOW_SCALE == 3 && class_id <= 6) {
31 // This code expects SHADOW_SCALE=3.
32 for (uptr i = 0; i < (((uptr)1) << class_id); i++) {
33 shadow[i] = magic;
34 // Make sure this does not become memset.
35 SanitizerBreakOptimization(nullptr);
36 }
37 } else {
38 // The size class is too big, it's cheaper to poison only size bytes.
39 PoisonShadow(ptr, size, static_cast<u8>(magic));
40 }
41 }
42
Create(uptr stack_size_log)43 FakeStack *FakeStack::Create(uptr stack_size_log) {
44 static uptr kMinStackSizeLog = 16;
45 static uptr kMaxStackSizeLog = FIRST_32_SECOND_64(24, 28);
46 if (stack_size_log < kMinStackSizeLog)
47 stack_size_log = kMinStackSizeLog;
48 if (stack_size_log > kMaxStackSizeLog)
49 stack_size_log = kMaxStackSizeLog;
50 uptr size = RequiredSize(stack_size_log);
51 FakeStack *res = reinterpret_cast<FakeStack *>(
52 flags()->uar_noreserve ? MmapNoReserveOrDie(size, "FakeStack")
53 : MmapOrDie(size, "FakeStack"));
54 res->stack_size_log_ = stack_size_log;
55 u8 *p = reinterpret_cast<u8 *>(res);
56 VReport(1, "T%d: FakeStack created: %p -- %p stack_size_log: %zd; "
57 "mmapped %zdK, noreserve=%d \n",
58 GetCurrentTidOrInvalid(), p,
59 p + FakeStack::RequiredSize(stack_size_log), stack_size_log,
60 size >> 10, flags()->uar_noreserve);
61 return res;
62 }
63
Destroy(int tid)64 void FakeStack::Destroy(int tid) {
65 PoisonAll(0);
66 if (Verbosity() >= 2) {
67 InternalScopedString str(kNumberOfSizeClasses * 50);
68 for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++)
69 str.append("%zd: %zd/%zd; ", class_id, hint_position_[class_id],
70 NumberOfFrames(stack_size_log(), class_id));
71 Report("T%d: FakeStack destroyed: %s\n", tid, str.data());
72 }
73 uptr size = RequiredSize(stack_size_log_);
74 FlushUnneededASanShadowMemory(reinterpret_cast<uptr>(this), size);
75 UnmapOrDie(this, size);
76 }
77
PoisonAll(u8 magic)78 void FakeStack::PoisonAll(u8 magic) {
79 PoisonShadow(reinterpret_cast<uptr>(this), RequiredSize(stack_size_log()),
80 magic);
81 }
82
83 #if !defined(_MSC_VER) || defined(__clang__)
84 ALWAYS_INLINE USED
85 #endif
Allocate(uptr stack_size_log,uptr class_id,uptr real_stack)86 FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id,
87 uptr real_stack) {
88 CHECK_LT(class_id, kNumberOfSizeClasses);
89 if (needs_gc_)
90 GC(real_stack);
91 uptr &hint_position = hint_position_[class_id];
92 const int num_iter = NumberOfFrames(stack_size_log, class_id);
93 u8 *flags = GetFlags(stack_size_log, class_id);
94 for (int i = 0; i < num_iter; i++) {
95 uptr pos = ModuloNumberOfFrames(stack_size_log, class_id, hint_position++);
96 // This part is tricky. On one hand, checking and setting flags[pos]
97 // should be atomic to ensure async-signal safety. But on the other hand,
98 // if the signal arrives between checking and setting flags[pos], the
99 // signal handler's fake stack will start from a different hint_position
100 // and so will not touch this particular byte. So, it is safe to do this
101 // with regular non-atomic load and store (at least I was not able to make
102 // this code crash).
103 if (flags[pos]) continue;
104 flags[pos] = 1;
105 FakeFrame *res = reinterpret_cast<FakeFrame *>(
106 GetFrame(stack_size_log, class_id, pos));
107 res->real_stack = real_stack;
108 *SavedFlagPtr(reinterpret_cast<uptr>(res), class_id) = &flags[pos];
109 return res;
110 }
111 return nullptr; // We are out of fake stack.
112 }
113
AddrIsInFakeStack(uptr ptr,uptr * frame_beg,uptr * frame_end)114 uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) {
115 uptr stack_size_log = this->stack_size_log();
116 uptr beg = reinterpret_cast<uptr>(GetFrame(stack_size_log, 0, 0));
117 uptr end = reinterpret_cast<uptr>(this) + RequiredSize(stack_size_log);
118 if (ptr < beg || ptr >= end) return 0;
119 uptr class_id = (ptr - beg) >> stack_size_log;
120 uptr base = beg + (class_id << stack_size_log);
121 CHECK_LE(base, ptr);
122 CHECK_LT(ptr, base + (((uptr)1) << stack_size_log));
123 uptr pos = (ptr - base) >> (kMinStackFrameSizeLog + class_id);
124 uptr res = base + pos * BytesInSizeClass(class_id);
125 *frame_end = res + BytesInSizeClass(class_id);
126 *frame_beg = res + sizeof(FakeFrame);
127 return res;
128 }
129
HandleNoReturn()130 void FakeStack::HandleNoReturn() {
131 needs_gc_ = true;
132 }
133
134 // When throw, longjmp or some such happens we don't call OnFree() and
135 // as the result may leak one or more fake frames, but the good news is that
136 // we are notified about all such events by HandleNoReturn().
137 // If we recently had such no-return event we need to collect garbage frames.
138 // We do it based on their 'real_stack' values -- everything that is lower
139 // than the current real_stack is garbage.
GC(uptr real_stack)140 NOINLINE void FakeStack::GC(uptr real_stack) {
141 uptr collected = 0;
142 for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) {
143 u8 *flags = GetFlags(stack_size_log(), class_id);
144 for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n;
145 i++) {
146 if (flags[i] == 0) continue; // not allocated.
147 FakeFrame *ff = reinterpret_cast<FakeFrame *>(
148 GetFrame(stack_size_log(), class_id, i));
149 if (ff->real_stack < real_stack) {
150 flags[i] = 0;
151 collected++;
152 }
153 }
154 }
155 needs_gc_ = false;
156 }
157
ForEachFakeFrame(RangeIteratorCallback callback,void * arg)158 void FakeStack::ForEachFakeFrame(RangeIteratorCallback callback, void *arg) {
159 for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) {
160 u8 *flags = GetFlags(stack_size_log(), class_id);
161 for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n;
162 i++) {
163 if (flags[i] == 0) continue; // not allocated.
164 FakeFrame *ff = reinterpret_cast<FakeFrame *>(
165 GetFrame(stack_size_log(), class_id, i));
166 uptr begin = reinterpret_cast<uptr>(ff);
167 callback(begin, begin + FakeStack::BytesInSizeClass(class_id), arg);
168 }
169 }
170 }
171
172 #if (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA
173 static THREADLOCAL FakeStack *fake_stack_tls;
174
GetTLSFakeStack()175 FakeStack *GetTLSFakeStack() {
176 return fake_stack_tls;
177 }
SetTLSFakeStack(FakeStack * fs)178 void SetTLSFakeStack(FakeStack *fs) {
179 fake_stack_tls = fs;
180 }
181 #else
GetTLSFakeStack()182 FakeStack *GetTLSFakeStack() { return 0; }
SetTLSFakeStack(FakeStack * fs)183 void SetTLSFakeStack(FakeStack *fs) { }
184 #endif // (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA
185
GetFakeStack()186 static FakeStack *GetFakeStack() {
187 AsanThread *t = GetCurrentThread();
188 if (!t) return nullptr;
189 return t->fake_stack();
190 }
191
GetFakeStackFast()192 static FakeStack *GetFakeStackFast() {
193 if (FakeStack *fs = GetTLSFakeStack())
194 return fs;
195 if (!__asan_option_detect_stack_use_after_return)
196 return nullptr;
197 return GetFakeStack();
198 }
199
OnMalloc(uptr class_id,uptr size)200 ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) {
201 FakeStack *fs = GetFakeStackFast();
202 if (!fs) return 0;
203 uptr local_stack;
204 uptr real_stack = reinterpret_cast<uptr>(&local_stack);
205 FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack);
206 if (!ff) return 0; // Out of fake stack.
207 uptr ptr = reinterpret_cast<uptr>(ff);
208 SetShadow(ptr, size, class_id, 0);
209 return ptr;
210 }
211
OnFree(uptr ptr,uptr class_id,uptr size)212 ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size) {
213 FakeStack::Deallocate(ptr, class_id);
214 SetShadow(ptr, size, class_id, kMagic8);
215 }
216
217 } // namespace __asan
218
219 // ---------------------- Interface ---------------- {{{1
220 using namespace __asan;
221 #define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \
222 extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \
223 __asan_stack_malloc_##class_id(uptr size) { \
224 return OnMalloc(class_id, size); \
225 } \
226 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \
227 uptr ptr, uptr size) { \
228 OnFree(ptr, class_id, size); \
229 }
230
231 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(0)
232 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(1)
233 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(2)
234 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(3)
235 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(4)
236 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(5)
237 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(6)
238 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(7)
239 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(8)
240 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(9)
241 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(10)
242 extern "C" {
243 SANITIZER_INTERFACE_ATTRIBUTE
__asan_get_current_fake_stack()244 void *__asan_get_current_fake_stack() { return GetFakeStackFast(); }
245
246 SANITIZER_INTERFACE_ATTRIBUTE
__asan_addr_is_in_fake_stack(void * fake_stack,void * addr,void ** beg,void ** end)247 void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
248 void **end) {
249 FakeStack *fs = reinterpret_cast<FakeStack*>(fake_stack);
250 if (!fs) return nullptr;
251 uptr frame_beg, frame_end;
252 FakeFrame *frame = reinterpret_cast<FakeFrame *>(fs->AddrIsInFakeStack(
253 reinterpret_cast<uptr>(addr), &frame_beg, &frame_end));
254 if (!frame) return nullptr;
255 if (frame->magic != kCurrentStackFrameMagic)
256 return nullptr;
257 if (beg) *beg = reinterpret_cast<void*>(frame_beg);
258 if (end) *end = reinterpret_cast<void*>(frame_end);
259 return reinterpret_cast<void*>(frame->real_stack);
260 }
261
262 SANITIZER_INTERFACE_ATTRIBUTE
__asan_alloca_poison(uptr addr,uptr size)263 void __asan_alloca_poison(uptr addr, uptr size) {
264 uptr LeftRedzoneAddr = addr - kAllocaRedzoneSize;
265 uptr PartialRzAddr = addr + size;
266 uptr RightRzAddr = (PartialRzAddr + kAllocaRedzoneMask) & ~kAllocaRedzoneMask;
267 uptr PartialRzAligned = PartialRzAddr & ~(SHADOW_GRANULARITY - 1);
268 FastPoisonShadow(LeftRedzoneAddr, kAllocaRedzoneSize, kAsanAllocaLeftMagic);
269 FastPoisonShadowPartialRightRedzone(
270 PartialRzAligned, PartialRzAddr % SHADOW_GRANULARITY,
271 RightRzAddr - PartialRzAligned, kAsanAllocaRightMagic);
272 FastPoisonShadow(RightRzAddr, kAllocaRedzoneSize, kAsanAllocaRightMagic);
273 }
274
275 SANITIZER_INTERFACE_ATTRIBUTE
__asan_allocas_unpoison(uptr top,uptr bottom)276 void __asan_allocas_unpoison(uptr top, uptr bottom) {
277 if ((!top) || (top > bottom)) return;
278 REAL(memset)(reinterpret_cast<void*>(MemToShadow(top)), 0,
279 (bottom - top) / SHADOW_GRANULARITY);
280 }
281 } // extern "C"
282