1 //===-- asan_poisoning.cc -------------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of AddressSanitizer, an address sanity checker.
11 //
12 // Shadow memory poisoning by ASan RTL and by user application.
13 //===----------------------------------------------------------------------===//
14
15 #include "asan_poisoning.h"
16 #include "asan_report.h"
17 #include "asan_stack.h"
18 #include "sanitizer_common/sanitizer_atomic.h"
19 #include "sanitizer_common/sanitizer_libc.h"
20 #include "sanitizer_common/sanitizer_flags.h"
21
22 namespace __asan {
23
24 static atomic_uint8_t can_poison_memory;
25
SetCanPoisonMemory(bool value)26 void SetCanPoisonMemory(bool value) {
27 atomic_store(&can_poison_memory, value, memory_order_release);
28 }
29
CanPoisonMemory()30 bool CanPoisonMemory() {
31 return atomic_load(&can_poison_memory, memory_order_acquire);
32 }
33
PoisonShadow(uptr addr,uptr size,u8 value)34 void PoisonShadow(uptr addr, uptr size, u8 value) {
35 if (value && !CanPoisonMemory()) return;
36 CHECK(AddrIsAlignedByGranularity(addr));
37 CHECK(AddrIsInMem(addr));
38 CHECK(AddrIsAlignedByGranularity(addr + size));
39 CHECK(AddrIsInMem(addr + size - SHADOW_GRANULARITY));
40 CHECK(REAL(memset));
41 FastPoisonShadow(addr, size, value);
42 }
43
PoisonShadowPartialRightRedzone(uptr addr,uptr size,uptr redzone_size,u8 value)44 void PoisonShadowPartialRightRedzone(uptr addr,
45 uptr size,
46 uptr redzone_size,
47 u8 value) {
48 if (!CanPoisonMemory()) return;
49 CHECK(AddrIsAlignedByGranularity(addr));
50 CHECK(AddrIsInMem(addr));
51 FastPoisonShadowPartialRightRedzone(addr, size, redzone_size, value);
52 }
53
54 struct ShadowSegmentEndpoint {
55 u8 *chunk;
56 s8 offset; // in [0, SHADOW_GRANULARITY)
57 s8 value; // = *chunk;
58
ShadowSegmentEndpoint__asan::ShadowSegmentEndpoint59 explicit ShadowSegmentEndpoint(uptr address) {
60 chunk = (u8*)MemToShadow(address);
61 offset = address & (SHADOW_GRANULARITY - 1);
62 value = *chunk;
63 }
64 };
65
FlushUnneededASanShadowMemory(uptr p,uptr size)66 void FlushUnneededASanShadowMemory(uptr p, uptr size) {
67 // Since asan's mapping is compacting, the shadow chunk may be
68 // not page-aligned, so we only flush the page-aligned portion.
69 ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
70 }
71
AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr,uptr size,bool poison)72 void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
73 uptr end = ptr + size;
74 if (Verbosity()) {
75 Printf("__asan_%spoison_intra_object_redzone [%p,%p) %zd\n",
76 poison ? "" : "un", ptr, end, size);
77 if (Verbosity() >= 2)
78 PRINT_CURRENT_STACK();
79 }
80 CHECK(size);
81 CHECK_LE(size, 4096);
82 CHECK(IsAligned(end, SHADOW_GRANULARITY));
83 if (!IsAligned(ptr, SHADOW_GRANULARITY)) {
84 *(u8 *)MemToShadow(ptr) =
85 poison ? static_cast<u8>(ptr % SHADOW_GRANULARITY) : 0;
86 ptr |= SHADOW_GRANULARITY - 1;
87 ptr++;
88 }
89 for (; ptr < end; ptr += SHADOW_GRANULARITY)
90 *(u8*)MemToShadow(ptr) = poison ? kAsanIntraObjectRedzone : 0;
91 }
92
93 } // namespace __asan
94
95 // ---------------------- Interface ---------------- {{{1
96 using namespace __asan; // NOLINT
97
98 // Current implementation of __asan_(un)poison_memory_region doesn't check
99 // that user program (un)poisons the memory it owns. It poisons memory
100 // conservatively, and unpoisons progressively to make sure asan shadow
101 // mapping invariant is preserved (see detailed mapping description here:
102 // https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm).
103 //
104 // * if user asks to poison region [left, right), the program poisons
105 // at least [left, AlignDown(right)).
106 // * if user asks to unpoison region [left, right), the program unpoisons
107 // at most [AlignDown(left), right).
__asan_poison_memory_region(void const volatile * addr,uptr size)108 void __asan_poison_memory_region(void const volatile *addr, uptr size) {
109 if (!flags()->allow_user_poisoning || size == 0) return;
110 uptr beg_addr = (uptr)addr;
111 uptr end_addr = beg_addr + size;
112 VPrintf(3, "Trying to poison memory region [%p, %p)\n", (void *)beg_addr,
113 (void *)end_addr);
114 ShadowSegmentEndpoint beg(beg_addr);
115 ShadowSegmentEndpoint end(end_addr);
116 if (beg.chunk == end.chunk) {
117 CHECK_LT(beg.offset, end.offset);
118 s8 value = beg.value;
119 CHECK_EQ(value, end.value);
120 // We can only poison memory if the byte in end.offset is unaddressable.
121 // No need to re-poison memory if it is poisoned already.
122 if (value > 0 && value <= end.offset) {
123 if (beg.offset > 0) {
124 *beg.chunk = Min(value, beg.offset);
125 } else {
126 *beg.chunk = kAsanUserPoisonedMemoryMagic;
127 }
128 }
129 return;
130 }
131 CHECK_LT(beg.chunk, end.chunk);
132 if (beg.offset > 0) {
133 // Mark bytes from beg.offset as unaddressable.
134 if (beg.value == 0) {
135 *beg.chunk = beg.offset;
136 } else {
137 *beg.chunk = Min(beg.value, beg.offset);
138 }
139 beg.chunk++;
140 }
141 REAL(memset)(beg.chunk, kAsanUserPoisonedMemoryMagic, end.chunk - beg.chunk);
142 // Poison if byte in end.offset is unaddressable.
143 if (end.value > 0 && end.value <= end.offset) {
144 *end.chunk = kAsanUserPoisonedMemoryMagic;
145 }
146 }
147
__asan_unpoison_memory_region(void const volatile * addr,uptr size)148 void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
149 if (!flags()->allow_user_poisoning || size == 0) return;
150 uptr beg_addr = (uptr)addr;
151 uptr end_addr = beg_addr + size;
152 VPrintf(3, "Trying to unpoison memory region [%p, %p)\n", (void *)beg_addr,
153 (void *)end_addr);
154 ShadowSegmentEndpoint beg(beg_addr);
155 ShadowSegmentEndpoint end(end_addr);
156 if (beg.chunk == end.chunk) {
157 CHECK_LT(beg.offset, end.offset);
158 s8 value = beg.value;
159 CHECK_EQ(value, end.value);
160 // We unpoison memory bytes up to enbytes up to end.offset if it is not
161 // unpoisoned already.
162 if (value != 0) {
163 *beg.chunk = Max(value, end.offset);
164 }
165 return;
166 }
167 CHECK_LT(beg.chunk, end.chunk);
168 if (beg.offset > 0) {
169 *beg.chunk = 0;
170 beg.chunk++;
171 }
172 REAL(memset)(beg.chunk, 0, end.chunk - beg.chunk);
173 if (end.offset > 0 && end.value != 0) {
174 *end.chunk = Max(end.value, end.offset);
175 }
176 }
177
__asan_address_is_poisoned(void const volatile * addr)178 int __asan_address_is_poisoned(void const volatile *addr) {
179 return __asan::AddressIsPoisoned((uptr)addr);
180 }
181
__asan_region_is_poisoned(uptr beg,uptr size)182 uptr __asan_region_is_poisoned(uptr beg, uptr size) {
183 if (!size) return 0;
184 uptr end = beg + size;
185 if (SANITIZER_MYRIAD2) {
186 // On Myriad, address not in DRAM range need to be treated as
187 // unpoisoned.
188 if (!AddrIsInMem(beg) && !AddrIsInShadow(beg)) return 0;
189 if (!AddrIsInMem(end) && !AddrIsInShadow(end)) return 0;
190 } else {
191 if (!AddrIsInMem(beg)) return beg;
192 if (!AddrIsInMem(end)) return end;
193 }
194 CHECK_LT(beg, end);
195 uptr aligned_b = RoundUpTo(beg, SHADOW_GRANULARITY);
196 uptr aligned_e = RoundDownTo(end, SHADOW_GRANULARITY);
197 uptr shadow_beg = MemToShadow(aligned_b);
198 uptr shadow_end = MemToShadow(aligned_e);
199 // First check the first and the last application bytes,
200 // then check the SHADOW_GRANULARITY-aligned region by calling
201 // mem_is_zero on the corresponding shadow.
202 if (!__asan::AddressIsPoisoned(beg) &&
203 !__asan::AddressIsPoisoned(end - 1) &&
204 (shadow_end <= shadow_beg ||
205 __sanitizer::mem_is_zero((const char *)shadow_beg,
206 shadow_end - shadow_beg)))
207 return 0;
208 // The fast check failed, so we have a poisoned byte somewhere.
209 // Find it slowly.
210 for (; beg < end; beg++)
211 if (__asan::AddressIsPoisoned(beg))
212 return beg;
213 UNREACHABLE("mem_is_zero returned false, but poisoned byte was not found");
214 return 0;
215 }
216
217 #define CHECK_SMALL_REGION(p, size, isWrite) \
218 do { \
219 uptr __p = reinterpret_cast<uptr>(p); \
220 uptr __size = size; \
221 if (UNLIKELY(__asan::AddressIsPoisoned(__p) || \
222 __asan::AddressIsPoisoned(__p + __size - 1))) { \
223 GET_CURRENT_PC_BP_SP; \
224 uptr __bad = __asan_region_is_poisoned(__p, __size); \
225 __asan_report_error(pc, bp, sp, __bad, isWrite, __size, 0);\
226 } \
227 } while (false)
228
229
230 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_unaligned_load16(const uu16 * p)231 u16 __sanitizer_unaligned_load16(const uu16 *p) {
232 CHECK_SMALL_REGION(p, sizeof(*p), false);
233 return *p;
234 }
235
236 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_unaligned_load32(const uu32 * p)237 u32 __sanitizer_unaligned_load32(const uu32 *p) {
238 CHECK_SMALL_REGION(p, sizeof(*p), false);
239 return *p;
240 }
241
242 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_unaligned_load64(const uu64 * p)243 u64 __sanitizer_unaligned_load64(const uu64 *p) {
244 CHECK_SMALL_REGION(p, sizeof(*p), false);
245 return *p;
246 }
247
248 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_unaligned_store16(uu16 * p,u16 x)249 void __sanitizer_unaligned_store16(uu16 *p, u16 x) {
250 CHECK_SMALL_REGION(p, sizeof(*p), true);
251 *p = x;
252 }
253
254 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_unaligned_store32(uu32 * p,u32 x)255 void __sanitizer_unaligned_store32(uu32 *p, u32 x) {
256 CHECK_SMALL_REGION(p, sizeof(*p), true);
257 *p = x;
258 }
259
260 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_unaligned_store64(uu64 * p,u64 x)261 void __sanitizer_unaligned_store64(uu64 *p, u64 x) {
262 CHECK_SMALL_REGION(p, sizeof(*p), true);
263 *p = x;
264 }
265
266 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
__asan_poison_cxx_array_cookie(uptr p)267 void __asan_poison_cxx_array_cookie(uptr p) {
268 if (SANITIZER_WORDSIZE != 64) return;
269 if (!flags()->poison_array_cookie) return;
270 uptr s = MEM_TO_SHADOW(p);
271 *reinterpret_cast<u8*>(s) = kAsanArrayCookieMagic;
272 }
273
274 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
__asan_load_cxx_array_cookie(uptr * p)275 uptr __asan_load_cxx_array_cookie(uptr *p) {
276 if (SANITIZER_WORDSIZE != 64) return *p;
277 if (!flags()->poison_array_cookie) return *p;
278 uptr s = MEM_TO_SHADOW(reinterpret_cast<uptr>(p));
279 u8 sval = *reinterpret_cast<u8*>(s);
280 if (sval == kAsanArrayCookieMagic) return *p;
281 // If sval is not kAsanArrayCookieMagic it can only be freed memory,
282 // which means that we are going to get double-free. So, return 0 to avoid
283 // infinite loop of destructors. We don't want to report a double-free here
284 // though, so print a warning just in case.
285 // CHECK_EQ(sval, kAsanHeapFreeMagic);
286 if (sval == kAsanHeapFreeMagic) {
287 Report("AddressSanitizer: loaded array cookie from free-d memory; "
288 "expect a double-free report\n");
289 return 0;
290 }
291 // The cookie may remain unpoisoned if e.g. it comes from a custom
292 // operator new defined inside a class.
293 return *p;
294 }
295
296 // This is a simplified version of __asan_(un)poison_memory_region, which
297 // assumes that left border of region to be poisoned is properly aligned.
PoisonAlignedStackMemory(uptr addr,uptr size,bool do_poison)298 static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
299 if (size == 0) return;
300 uptr aligned_size = size & ~(SHADOW_GRANULARITY - 1);
301 PoisonShadow(addr, aligned_size,
302 do_poison ? kAsanStackUseAfterScopeMagic : 0);
303 if (size == aligned_size)
304 return;
305 s8 end_offset = (s8)(size - aligned_size);
306 s8* shadow_end = (s8*)MemToShadow(addr + aligned_size);
307 s8 end_value = *shadow_end;
308 if (do_poison) {
309 // If possible, mark all the bytes mapping to last shadow byte as
310 // unaddressable.
311 if (end_value > 0 && end_value <= end_offset)
312 *shadow_end = (s8)kAsanStackUseAfterScopeMagic;
313 } else {
314 // If necessary, mark few first bytes mapping to last shadow byte
315 // as addressable
316 if (end_value != 0)
317 *shadow_end = Max(end_value, end_offset);
318 }
319 }
320
__asan_set_shadow_00(uptr addr,uptr size)321 void __asan_set_shadow_00(uptr addr, uptr size) {
322 REAL(memset)((void *)addr, 0, size);
323 }
324
__asan_set_shadow_f1(uptr addr,uptr size)325 void __asan_set_shadow_f1(uptr addr, uptr size) {
326 REAL(memset)((void *)addr, 0xf1, size);
327 }
328
__asan_set_shadow_f2(uptr addr,uptr size)329 void __asan_set_shadow_f2(uptr addr, uptr size) {
330 REAL(memset)((void *)addr, 0xf2, size);
331 }
332
__asan_set_shadow_f3(uptr addr,uptr size)333 void __asan_set_shadow_f3(uptr addr, uptr size) {
334 REAL(memset)((void *)addr, 0xf3, size);
335 }
336
__asan_set_shadow_f5(uptr addr,uptr size)337 void __asan_set_shadow_f5(uptr addr, uptr size) {
338 REAL(memset)((void *)addr, 0xf5, size);
339 }
340
__asan_set_shadow_f8(uptr addr,uptr size)341 void __asan_set_shadow_f8(uptr addr, uptr size) {
342 REAL(memset)((void *)addr, 0xf8, size);
343 }
344
__asan_poison_stack_memory(uptr addr,uptr size)345 void __asan_poison_stack_memory(uptr addr, uptr size) {
346 VReport(1, "poisoning: %p %zx\n", (void *)addr, size);
347 PoisonAlignedStackMemory(addr, size, true);
348 }
349
__asan_unpoison_stack_memory(uptr addr,uptr size)350 void __asan_unpoison_stack_memory(uptr addr, uptr size) {
351 VReport(1, "unpoisoning: %p %zx\n", (void *)addr, size);
352 PoisonAlignedStackMemory(addr, size, false);
353 }
354
__sanitizer_annotate_contiguous_container(const void * beg_p,const void * end_p,const void * old_mid_p,const void * new_mid_p)355 void __sanitizer_annotate_contiguous_container(const void *beg_p,
356 const void *end_p,
357 const void *old_mid_p,
358 const void *new_mid_p) {
359 if (!flags()->detect_container_overflow) return;
360 VPrintf(2, "contiguous_container: %p %p %p %p\n", beg_p, end_p, old_mid_p,
361 new_mid_p);
362 uptr beg = reinterpret_cast<uptr>(beg_p);
363 uptr end = reinterpret_cast<uptr>(end_p);
364 uptr old_mid = reinterpret_cast<uptr>(old_mid_p);
365 uptr new_mid = reinterpret_cast<uptr>(new_mid_p);
366 uptr granularity = SHADOW_GRANULARITY;
367 if (!(beg <= old_mid && beg <= new_mid && old_mid <= end && new_mid <= end &&
368 IsAligned(beg, granularity))) {
369 GET_STACK_TRACE_FATAL_HERE;
370 ReportBadParamsToAnnotateContiguousContainer(beg, end, old_mid, new_mid,
371 &stack);
372 }
373 CHECK_LE(end - beg,
374 FIRST_32_SECOND_64(1UL << 30, 1ULL << 34)); // Sanity check.
375
376 uptr a = RoundDownTo(Min(old_mid, new_mid), granularity);
377 uptr c = RoundUpTo(Max(old_mid, new_mid), granularity);
378 uptr d1 = RoundDownTo(old_mid, granularity);
379 // uptr d2 = RoundUpTo(old_mid, granularity);
380 // Currently we should be in this state:
381 // [a, d1) is good, [d2, c) is bad, [d1, d2) is partially good.
382 // Make a quick sanity check that we are indeed in this state.
383 //
384 // FIXME: Two of these three checks are disabled until we fix
385 // https://github.com/google/sanitizers/issues/258.
386 // if (d1 != d2)
387 // CHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
388 if (a + granularity <= d1)
389 CHECK_EQ(*(u8*)MemToShadow(a), 0);
390 // if (d2 + granularity <= c && c <= end)
391 // CHECK_EQ(*(u8 *)MemToShadow(c - granularity),
392 // kAsanContiguousContainerOOBMagic);
393
394 uptr b1 = RoundDownTo(new_mid, granularity);
395 uptr b2 = RoundUpTo(new_mid, granularity);
396 // New state:
397 // [a, b1) is good, [b2, c) is bad, [b1, b2) is partially good.
398 PoisonShadow(a, b1 - a, 0);
399 PoisonShadow(b2, c - b2, kAsanContiguousContainerOOBMagic);
400 if (b1 != b2) {
401 CHECK_EQ(b2 - b1, granularity);
402 *(u8*)MemToShadow(b1) = static_cast<u8>(new_mid - b1);
403 }
404 }
405
__sanitizer_contiguous_container_find_bad_address(const void * beg_p,const void * mid_p,const void * end_p)406 const void *__sanitizer_contiguous_container_find_bad_address(
407 const void *beg_p, const void *mid_p, const void *end_p) {
408 if (!flags()->detect_container_overflow)
409 return nullptr;
410 uptr beg = reinterpret_cast<uptr>(beg_p);
411 uptr end = reinterpret_cast<uptr>(end_p);
412 uptr mid = reinterpret_cast<uptr>(mid_p);
413 CHECK_LE(beg, mid);
414 CHECK_LE(mid, end);
415 // Check some bytes starting from beg, some bytes around mid, and some bytes
416 // ending with end.
417 uptr kMaxRangeToCheck = 32;
418 uptr r1_beg = beg;
419 uptr r1_end = Min(beg + kMaxRangeToCheck, mid);
420 uptr r2_beg = Max(beg, mid - kMaxRangeToCheck);
421 uptr r2_end = Min(end, mid + kMaxRangeToCheck);
422 uptr r3_beg = Max(end - kMaxRangeToCheck, mid);
423 uptr r3_end = end;
424 for (uptr i = r1_beg; i < r1_end; i++)
425 if (AddressIsPoisoned(i))
426 return reinterpret_cast<const void *>(i);
427 for (uptr i = r2_beg; i < mid; i++)
428 if (AddressIsPoisoned(i))
429 return reinterpret_cast<const void *>(i);
430 for (uptr i = mid; i < r2_end; i++)
431 if (!AddressIsPoisoned(i))
432 return reinterpret_cast<const void *>(i);
433 for (uptr i = r3_beg; i < r3_end; i++)
434 if (!AddressIsPoisoned(i))
435 return reinterpret_cast<const void *>(i);
436 return nullptr;
437 }
438
__sanitizer_verify_contiguous_container(const void * beg_p,const void * mid_p,const void * end_p)439 int __sanitizer_verify_contiguous_container(const void *beg_p,
440 const void *mid_p,
441 const void *end_p) {
442 return __sanitizer_contiguous_container_find_bad_address(beg_p, mid_p,
443 end_p) == nullptr;
444 }
445
446 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
__asan_poison_intra_object_redzone(uptr ptr,uptr size)447 void __asan_poison_intra_object_redzone(uptr ptr, uptr size) {
448 AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, true);
449 }
450
451 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
__asan_unpoison_intra_object_redzone(uptr ptr,uptr size)452 void __asan_unpoison_intra_object_redzone(uptr ptr, uptr size) {
453 AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, false);
454 }
455
456 // --- Implementation of LSan-specific functions --- {{{1
457 namespace __lsan {
WordIsPoisoned(uptr addr)458 bool WordIsPoisoned(uptr addr) {
459 return (__asan_region_is_poisoned(addr, sizeof(uptr)) != 0);
460 }
461 }
462