1 //===-- hwasan_linux.cpp ----------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file is a part of HWAddressSanitizer and contains Linux-, NetBSD- and
11 /// FreeBSD-specific code.
12 ///
13 //===----------------------------------------------------------------------===//
14 
15 #include "sanitizer_common/sanitizer_platform.h"
16 #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
17 
18 #include "hwasan.h"
19 #include "hwasan_dynamic_shadow.h"
20 #include "hwasan_interface_internal.h"
21 #include "hwasan_mapping.h"
22 #include "hwasan_report.h"
23 #include "hwasan_thread.h"
24 #include "hwasan_thread_list.h"
25 
26 #include <dlfcn.h>
27 #include <elf.h>
28 #include <link.h>
29 #include <pthread.h>
30 #include <signal.h>
31 #include <stdio.h>
32 #include <stdlib.h>
33 #include <sys/resource.h>
34 #include <sys/time.h>
35 #include <unistd.h>
36 #include <unwind.h>
37 #include <sys/prctl.h>
38 #include <errno.h>
39 
40 #include "sanitizer_common/sanitizer_common.h"
41 #include "sanitizer_common/sanitizer_procmaps.h"
42 
43 // Configurations of HWASAN_WITH_INTERCEPTORS and SANITIZER_ANDROID.
44 //
45 // HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=OFF
46 //   Not currently tested.
47 // HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=ON
48 //   Integration tests downstream exist.
49 // HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=OFF
50 //    Tested with check-hwasan on x86_64-linux.
51 // HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=ON
52 //    Tested with check-hwasan on aarch64-linux-android.
53 #if !SANITIZER_ANDROID
54 SANITIZER_INTERFACE_ATTRIBUTE
55 THREADLOCAL uptr __hwasan_tls;
56 #endif
57 
58 namespace __hwasan {
59 
60 static void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) {
61   CHECK_EQ((beg % GetMmapGranularity()), 0);
62   CHECK_EQ(((end + 1) % GetMmapGranularity()), 0);
63   uptr size = end - beg + 1;
64   DecreaseTotalMmap(size);  // Don't count the shadow against mmap_limit_mb.
65   if (!MmapFixedNoReserve(beg, size, name)) {
66     Report(
67         "ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. "
68         "Perhaps you're using ulimit -v\n",
69         size);
70     Abort();
71   }
72 }
73 
74 static void ProtectGap(uptr addr, uptr size) {
75   if (!size)
76     return;
77   void *res = MmapFixedNoAccess(addr, size, "shadow gap");
78   if (addr == (uptr)res)
79     return;
80   // A few pages at the start of the address space can not be protected.
81   // But we really want to protect as much as possible, to prevent this memory
82   // being returned as a result of a non-FIXED mmap().
83   if (addr == 0) {
84     uptr step = GetMmapGranularity();
85     while (size > step) {
86       addr += step;
87       size -= step;
88       void *res = MmapFixedNoAccess(addr, size, "shadow gap");
89       if (addr == (uptr)res)
90         return;
91     }
92   }
93 
94   Report(
95       "ERROR: Failed to protect shadow gap [%p, %p]. "
96       "HWASan cannot proceed correctly. ABORTING.\n", (void *)addr,
97       (void *)(addr + size));
98   DumpProcessMap();
99   Die();
100 }
101 
102 static uptr kLowMemStart;
103 static uptr kLowMemEnd;
104 static uptr kLowShadowEnd;
105 static uptr kLowShadowStart;
106 static uptr kHighShadowStart;
107 static uptr kHighShadowEnd;
108 static uptr kHighMemStart;
109 static uptr kHighMemEnd;
110 
111 static void PrintRange(uptr start, uptr end, const char *name) {
112   Printf("|| [%p, %p] || %.*s ||\n", (void *)start, (void *)end, 10, name);
113 }
114 
115 static void PrintAddressSpaceLayout() {
116   PrintRange(kHighMemStart, kHighMemEnd, "HighMem");
117   if (kHighShadowEnd + 1 < kHighMemStart)
118     PrintRange(kHighShadowEnd + 1, kHighMemStart - 1, "ShadowGap");
119   else
120     CHECK_EQ(kHighShadowEnd + 1, kHighMemStart);
121   PrintRange(kHighShadowStart, kHighShadowEnd, "HighShadow");
122   if (kLowShadowEnd + 1 < kHighShadowStart)
123     PrintRange(kLowShadowEnd + 1, kHighShadowStart - 1, "ShadowGap");
124   else
125     CHECK_EQ(kLowMemEnd + 1, kHighShadowStart);
126   PrintRange(kLowShadowStart, kLowShadowEnd, "LowShadow");
127   if (kLowMemEnd + 1 < kLowShadowStart)
128     PrintRange(kLowMemEnd + 1, kLowShadowStart - 1, "ShadowGap");
129   else
130     CHECK_EQ(kLowMemEnd + 1, kLowShadowStart);
131   PrintRange(kLowMemStart, kLowMemEnd, "LowMem");
132   CHECK_EQ(0, kLowMemStart);
133 }
134 
135 static uptr GetHighMemEnd() {
136   // HighMem covers the upper part of the address space.
137   uptr max_address = GetMaxUserVirtualAddress();
138   // Adjust max address to make sure that kHighMemEnd and kHighMemStart are
139   // properly aligned:
140   max_address |= (GetMmapGranularity() << kShadowScale) - 1;
141   return max_address;
142 }
143 
144 static void InitializeShadowBaseAddress(uptr shadow_size_bytes) {
145   __hwasan_shadow_memory_dynamic_address =
146       FindDynamicShadowStart(shadow_size_bytes);
147 }
148 
149 void InitPrctl() {
150 #define PR_SET_TAGGED_ADDR_CTRL 55
151 #define PR_GET_TAGGED_ADDR_CTRL 56
152 #define PR_TAGGED_ADDR_ENABLE (1UL << 0)
153   // Check we're running on a kernel that can use the tagged address ABI.
154   if (internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0) == (uptr)-1 &&
155       errno == EINVAL) {
156 #if SANITIZER_ANDROID
157     // Some older Android kernels have the tagged pointer ABI on
158     // unconditionally, and hence don't have the tagged-addr prctl while still
159     // allow the ABI.
160     // If targeting Android and the prctl is not around we assume this is the
161     // case.
162     return;
163 #else
164     Printf(
165         "FATAL: "
166         "HWAddressSanitizer requires a kernel with tagged address ABI.\n");
167     Die();
168 #endif
169   }
170 
171   // Turn on the tagged address ABI.
172   if (internal_prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0) ==
173           (uptr)-1 ||
174       !internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0)) {
175     Printf(
176         "FATAL: HWAddressSanitizer failed to enable tagged address syscall "
177         "ABI.\nSuggest check `sysctl abi.tagged_addr_disabled` "
178         "configuration.\n");
179     Die();
180   }
181 #undef PR_SET_TAGGED_ADDR_CTRL
182 #undef PR_GET_TAGGED_ADDR_CTRL
183 #undef PR_TAGGED_ADDR_ENABLE
184 }
185 
186 bool InitShadow() {
187   // Define the entire memory range.
188   kHighMemEnd = GetHighMemEnd();
189 
190   // Determine shadow memory base offset.
191   InitializeShadowBaseAddress(MemToShadowSize(kHighMemEnd));
192 
193   // Place the low memory first.
194   kLowMemEnd = __hwasan_shadow_memory_dynamic_address - 1;
195   kLowMemStart = 0;
196 
197   // Define the low shadow based on the already placed low memory.
198   kLowShadowEnd = MemToShadow(kLowMemEnd);
199   kLowShadowStart = __hwasan_shadow_memory_dynamic_address;
200 
201   // High shadow takes whatever memory is left up there (making sure it is not
202   // interfering with low memory in the fixed case).
203   kHighShadowEnd = MemToShadow(kHighMemEnd);
204   kHighShadowStart = Max(kLowMemEnd, MemToShadow(kHighShadowEnd)) + 1;
205 
206   // High memory starts where allocated shadow allows.
207   kHighMemStart = ShadowToMem(kHighShadowStart);
208 
209   // Check the sanity of the defined memory ranges (there might be gaps).
210   CHECK_EQ(kHighMemStart % GetMmapGranularity(), 0);
211   CHECK_GT(kHighMemStart, kHighShadowEnd);
212   CHECK_GT(kHighShadowEnd, kHighShadowStart);
213   CHECK_GT(kHighShadowStart, kLowMemEnd);
214   CHECK_GT(kLowMemEnd, kLowMemStart);
215   CHECK_GT(kLowShadowEnd, kLowShadowStart);
216   CHECK_GT(kLowShadowStart, kLowMemEnd);
217 
218   if (Verbosity())
219     PrintAddressSpaceLayout();
220 
221   // Reserve shadow memory.
222   ReserveShadowMemoryRange(kLowShadowStart, kLowShadowEnd, "low shadow");
223   ReserveShadowMemoryRange(kHighShadowStart, kHighShadowEnd, "high shadow");
224 
225   // Protect all the gaps.
226   ProtectGap(0, Min(kLowMemStart, kLowShadowStart));
227   if (kLowMemEnd + 1 < kLowShadowStart)
228     ProtectGap(kLowMemEnd + 1, kLowShadowStart - kLowMemEnd - 1);
229   if (kLowShadowEnd + 1 < kHighShadowStart)
230     ProtectGap(kLowShadowEnd + 1, kHighShadowStart - kLowShadowEnd - 1);
231   if (kHighShadowEnd + 1 < kHighMemStart)
232     ProtectGap(kHighShadowEnd + 1, kHighMemStart - kHighShadowEnd - 1);
233 
234   return true;
235 }
236 
237 void InitThreads() {
238   CHECK(__hwasan_shadow_memory_dynamic_address);
239   uptr guard_page_size = GetMmapGranularity();
240   uptr thread_space_start =
241       __hwasan_shadow_memory_dynamic_address - (1ULL << kShadowBaseAlignment);
242   uptr thread_space_end =
243       __hwasan_shadow_memory_dynamic_address - guard_page_size;
244   ReserveShadowMemoryRange(thread_space_start, thread_space_end - 1,
245                            "hwasan threads");
246   ProtectGap(thread_space_end,
247              __hwasan_shadow_memory_dynamic_address - thread_space_end);
248   InitThreadList(thread_space_start, thread_space_end - thread_space_start);
249 }
250 
251 static void MadviseShadowRegion(uptr beg, uptr end) {
252   uptr size = end - beg + 1;
253   SetShadowRegionHugePageMode(beg, size);
254   if (common_flags()->use_madv_dontdump)
255     DontDumpShadowMemory(beg, size);
256 }
257 
258 void MadviseShadow() {
259   MadviseShadowRegion(kLowShadowStart, kLowShadowEnd);
260   MadviseShadowRegion(kHighShadowStart, kHighShadowEnd);
261 }
262 
263 bool MemIsApp(uptr p) {
264   CHECK(GetTagFromPointer(p) == 0);
265   return p >= kHighMemStart || (p >= kLowMemStart && p <= kLowMemEnd);
266 }
267 
268 static void HwasanAtExit(void) {
269   if (common_flags()->print_module_map)
270     DumpProcessMap();
271   if (flags()->print_stats && (flags()->atexit || hwasan_report_count > 0))
272     ReportStats();
273   if (hwasan_report_count > 0) {
274     // ReportAtExitStatistics();
275     if (common_flags()->exitcode)
276       internal__exit(common_flags()->exitcode);
277   }
278 }
279 
280 void InstallAtExitHandler() {
281   atexit(HwasanAtExit);
282 }
283 
284 // ---------------------- TSD ---------------- {{{1
285 
286 extern "C" void __hwasan_thread_enter() {
287   hwasanThreadList().CreateCurrentThread()->InitRandomState();
288 }
289 
290 extern "C" void __hwasan_thread_exit() {
291   Thread *t = GetCurrentThread();
292   // Make sure that signal handler can not see a stale current thread pointer.
293   atomic_signal_fence(memory_order_seq_cst);
294   if (t)
295     hwasanThreadList().ReleaseThread(t);
296 }
297 
298 #if HWASAN_WITH_INTERCEPTORS
299 static pthread_key_t tsd_key;
300 static bool tsd_key_inited = false;
301 
302 void HwasanTSDThreadInit() {
303   if (tsd_key_inited)
304     CHECK_EQ(0, pthread_setspecific(tsd_key,
305                                     (void *)GetPthreadDestructorIterations()));
306 }
307 
308 void HwasanTSDDtor(void *tsd) {
309   uptr iterations = (uptr)tsd;
310   if (iterations > 1) {
311     CHECK_EQ(0, pthread_setspecific(tsd_key, (void *)(iterations - 1)));
312     return;
313   }
314   __hwasan_thread_exit();
315 }
316 
317 void HwasanTSDInit() {
318   CHECK(!tsd_key_inited);
319   tsd_key_inited = true;
320   CHECK_EQ(0, pthread_key_create(&tsd_key, HwasanTSDDtor));
321 }
322 #else
323 void HwasanTSDInit() {}
324 void HwasanTSDThreadInit() {}
325 #endif
326 
327 #if SANITIZER_ANDROID
328 uptr *GetCurrentThreadLongPtr() {
329   return (uptr *)get_android_tls_ptr();
330 }
331 #else
332 uptr *GetCurrentThreadLongPtr() {
333   return &__hwasan_tls;
334 }
335 #endif
336 
337 #if SANITIZER_ANDROID
338 void AndroidTestTlsSlot() {
339   uptr kMagicValue = 0x010203040A0B0C0D;
340   uptr *tls_ptr = GetCurrentThreadLongPtr();
341   uptr old_value = *tls_ptr;
342   *tls_ptr = kMagicValue;
343   dlerror();
344   if (*(uptr *)get_android_tls_ptr() != kMagicValue) {
345     Printf(
346         "ERROR: Incompatible version of Android: TLS_SLOT_SANITIZER(6) is used "
347         "for dlerror().\n");
348     Die();
349   }
350   *tls_ptr = old_value;
351 }
352 #else
353 void AndroidTestTlsSlot() {}
354 #endif
355 
356 Thread *GetCurrentThread() {
357   auto *R = (StackAllocationsRingBuffer *)GetCurrentThreadLongPtr();
358   return hwasanThreadList().GetThreadByBufferAddress((uptr)(R->Next()));
359 }
360 
361 struct AccessInfo {
362   uptr addr;
363   uptr size;
364   bool is_store;
365   bool is_load;
366   bool recover;
367 };
368 
369 static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) {
370   // Access type is passed in a platform dependent way (see below) and encoded
371   // as 0xXY, where X&1 is 1 for store, 0 for load, and X&2 is 1 if the error is
372   // recoverable. Valid values of Y are 0 to 4, which are interpreted as
373   // log2(access_size), and 0xF, which means that access size is passed via
374   // platform dependent register (see below).
375 #if defined(__aarch64__)
376   // Access type is encoded in BRK immediate as 0x900 + 0xXY. For Y == 0xF,
377   // access size is stored in X1 register. Access address is always in X0
378   // register.
379   uptr pc = (uptr)info->si_addr;
380   const unsigned code = ((*(u32 *)pc) >> 5) & 0xffff;
381   if ((code & 0xff00) != 0x900)
382     return AccessInfo{}; // Not ours.
383 
384   const bool is_store = code & 0x10;
385   const bool recover = code & 0x20;
386   const uptr addr = uc->uc_mcontext.regs[0];
387   const unsigned size_log = code & 0xf;
388   if (size_log > 4 && size_log != 0xf)
389     return AccessInfo{}; // Not ours.
390   const uptr size = size_log == 0xf ? uc->uc_mcontext.regs[1] : 1U << size_log;
391 
392 #elif defined(__x86_64__)
393   // Access type is encoded in the instruction following INT3 as
394   // NOP DWORD ptr [EAX + 0x40 + 0xXY]. For Y == 0xF, access size is stored in
395   // RSI register. Access address is always in RDI register.
396   uptr pc = (uptr)uc->uc_mcontext.gregs[REG_RIP];
397   uint8_t *nop = (uint8_t*)pc;
398   if (*nop != 0x0f || *(nop + 1) != 0x1f || *(nop + 2) != 0x40  ||
399       *(nop + 3) < 0x40)
400     return AccessInfo{}; // Not ours.
401   const unsigned code = *(nop + 3);
402 
403   const bool is_store = code & 0x10;
404   const bool recover = code & 0x20;
405   const uptr addr = uc->uc_mcontext.gregs[REG_RDI];
406   const unsigned size_log = code & 0xf;
407   if (size_log > 4 && size_log != 0xf)
408     return AccessInfo{}; // Not ours.
409   const uptr size =
410       size_log == 0xf ? uc->uc_mcontext.gregs[REG_RSI] : 1U << size_log;
411 
412 #else
413 # error Unsupported architecture
414 #endif
415 
416   return AccessInfo{addr, size, is_store, !is_store, recover};
417 }
418 
419 static void HandleTagMismatch(AccessInfo ai, uptr pc, uptr frame,
420                               ucontext_t *uc, uptr *registers_frame = nullptr) {
421   InternalMmapVector<BufferedStackTrace> stack_buffer(1);
422   BufferedStackTrace *stack = stack_buffer.data();
423   stack->Reset();
424   stack->Unwind(pc, frame, uc, common_flags()->fast_unwind_on_fatal);
425 
426   // The second stack frame contains the failure __hwasan_check function, as
427   // we have a stack frame for the registers saved in __hwasan_tag_mismatch that
428   // we wish to ignore. This (currently) only occurs on AArch64, as x64
429   // implementations use SIGTRAP to implement the failure, and thus do not go
430   // through the stack saver.
431   if (registers_frame && stack->trace && stack->size > 0) {
432     stack->trace++;
433     stack->size--;
434   }
435 
436   bool fatal = flags()->halt_on_error || !ai.recover;
437   ReportTagMismatch(stack, ai.addr, ai.size, ai.is_store, fatal,
438                     registers_frame);
439 }
440 
441 static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) {
442   AccessInfo ai = GetAccessInfo(info, uc);
443   if (!ai.is_store && !ai.is_load)
444     return false;
445 
446   SignalContext sig{info, uc};
447   HandleTagMismatch(ai, StackTrace::GetNextInstructionPc(sig.pc), sig.bp, uc);
448 
449 #if defined(__aarch64__)
450   uc->uc_mcontext.pc += 4;
451 #elif defined(__x86_64__)
452 #else
453 # error Unsupported architecture
454 #endif
455   return true;
456 }
457 
458 static void OnStackUnwind(const SignalContext &sig, const void *,
459                           BufferedStackTrace *stack) {
460   stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context,
461                 common_flags()->fast_unwind_on_fatal);
462 }
463 
464 void HwasanOnDeadlySignal(int signo, void *info, void *context) {
465   // Probably a tag mismatch.
466   if (signo == SIGTRAP)
467     if (HwasanOnSIGTRAP(signo, (siginfo_t *)info, (ucontext_t*)context))
468       return;
469 
470   HandleDeadlySignal(info, context, GetTid(), &OnStackUnwind, nullptr);
471 }
472 
473 
474 } // namespace __hwasan
475 
476 // Entry point for interoperability between __hwasan_tag_mismatch (ASM) and the
477 // rest of the mismatch handling code (C++).
478 void __hwasan_tag_mismatch4(uptr addr, uptr access_info, uptr *registers_frame,
479                             size_t outsize) {
480   __hwasan::AccessInfo ai;
481   ai.is_store = access_info & 0x10;
482   ai.is_load = !ai.is_store;
483   ai.recover = access_info & 0x20;
484   ai.addr = addr;
485   if ((access_info & 0xf) == 0xf)
486     ai.size = outsize;
487   else
488     ai.size = 1 << (access_info & 0xf);
489 
490   __hwasan::HandleTagMismatch(ai, (uptr)__builtin_return_address(0),
491                               (uptr)__builtin_frame_address(0), nullptr,
492                               registers_frame);
493   __builtin_unreachable();
494 }
495 
496 #endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
497