1 //===-- sanitizer_posix.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is shared between AddressSanitizer and ThreadSanitizer
10 // run-time libraries and implements POSIX-specific functions from
11 // sanitizer_posix.h.
12 //===----------------------------------------------------------------------===//
13 
14 #include "sanitizer_platform.h"
15 
16 #if SANITIZER_POSIX
17 
18 #include "sanitizer_common.h"
19 #include "sanitizer_file.h"
20 #include "sanitizer_flags.h"
21 #include "sanitizer_libc.h"
22 #include "sanitizer_posix.h"
23 #include "sanitizer_procmaps.h"
24 
25 #include <errno.h>
26 #include <fcntl.h>
27 #include <signal.h>
28 #include <sys/mman.h>
29 
30 #if SANITIZER_FREEBSD
31 // The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before
32 // that, it was never implemented.  So just define it to zero.
33 #undef  MAP_NORESERVE
34 #define MAP_NORESERVE 0
35 #endif
36 
37 namespace __sanitizer {
38 
39 // ------------- sanitizer_common.h
GetMmapGranularity()40 usize GetMmapGranularity() {
41   return GetPageSize();
42 }
43 
MmapOrDie(usize size,const char * mem_type,bool raw_report)44 void *MmapOrDie(usize size, const char *mem_type, bool raw_report) {
45   size = RoundUpTo(size, GetPageSizeCached());
46   uptr res = MmapNamed(nullptr, size, PROT_READ | PROT_WRITE,
47                        MAP_PRIVATE | MAP_ANON, mem_type);
48   int reserrno;
49   if (UNLIKELY(internal_iserror(res, &reserrno)))
50     ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno, raw_report);
51   IncreaseTotalMmap(size);
52   return (void *)res;
53 }
54 
UnmapOrDie(void * addr,usize size)55 void UnmapOrDie(void *addr, usize size) {
56   if (!addr || !size) return;
57   uptr res = internal_munmap(addr, size);
58   if (UNLIKELY(internal_iserror(res))) {
59     Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
60            SanitizerToolName, size, size, addr);
61     CHECK("unable to unmap" && 0);
62   }
63   DecreaseTotalMmap(size);
64 }
65 
MmapOrDieOnFatalError(usize size,const char * mem_type)66 void *MmapOrDieOnFatalError(usize size, const char *mem_type) {
67   size = RoundUpTo(size, GetPageSizeCached());
68   uptr res = MmapNamed(nullptr, size, PROT_READ | PROT_WRITE,
69                        MAP_PRIVATE | MAP_ANON, mem_type);
70   int reserrno;
71   if (UNLIKELY(internal_iserror(res, &reserrno))) {
72     if (reserrno == ENOMEM)
73       return nullptr;
74     ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno);
75   }
76   IncreaseTotalMmap(size);
77   return (void *)res;
78 }
79 
80 // We want to map a chunk of address space aligned to 'alignment'.
81 // We do it by mapping a bit more and then unmapping redundant pieces.
82 // We probably can do it with fewer syscalls in some OS-dependent way.
MmapAlignedOrDieOnFatalError(usize size,usize alignment,const char * mem_type)83 void *MmapAlignedOrDieOnFatalError(usize size, usize alignment,
84                                    const char *mem_type) {
85   CHECK(IsPowerOfTwo(size));
86   CHECK(IsPowerOfTwo(alignment));
87   usize map_size = size + alignment;
88   uptr map_res = (uptr)MmapOrDieOnFatalError(map_size, mem_type);
89   if (UNLIKELY(!map_res))
90     return nullptr;
91   uptr map_end = map_res + map_size;
92   uptr res = map_res;
93   if (!IsAligned(res, alignment)) {
94     res = RoundUpTo(map_res, alignment);
95     // FIXME: this should not do a csetaddr
96     UnmapOrDie((void *)map_res, (char *)res - (char *)map_res);
97   }
98   uptr end = res + size;
99   if (end != map_end)
100     UnmapOrDie((void *)end, (char *)map_end - (char *)end);
101   return (void*)res;
102 }
103 
MmapNoReserveOrDie(usize size,const char * mem_type)104 void *MmapNoReserveOrDie(usize size, const char *mem_type) {
105   size = RoundUpTo(size, GetPageSizeCached());
106   uptr p = MmapNamed(nullptr, size, PROT_READ | PROT_WRITE,
107                      MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, mem_type);
108   int reserrno;
109   if (UNLIKELY(internal_iserror(p, &reserrno)))
110     ReportMmapFailureAndDie(size, mem_type, "allocate noreserve", reserrno);
111   IncreaseTotalMmap(size);
112   return (void *)p;
113 }
114 
MmapFixedImpl(uptr fixed_addr,usize size,bool tolerate_enomem,const char * name)115 static void *MmapFixedImpl(uptr fixed_addr, usize size, bool tolerate_enomem,
116                            const char *name) {
117   size = RoundUpTo(size, GetPageSizeCached());
118   fixed_addr = RoundDownTo(fixed_addr, GetPageSizeCached());
119   uptr p = MmapNamed((void *)fixed_addr, size, PROT_READ | PROT_WRITE,
120                      MAP_PRIVATE | MAP_ANON | MAP_FIXED, name);
121   int reserrno;
122   if (UNLIKELY(internal_iserror(p, &reserrno))) {
123     if (tolerate_enomem && reserrno == ENOMEM)
124       return nullptr;
125     char mem_type[40];
126     internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx",
127                       fixed_addr);
128     ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno);
129   }
130   IncreaseTotalMmap(size);
131   return (void *)p;
132 }
133 
MmapFixedOrDie(uptr fixed_addr,usize size,const char * name)134 void *MmapFixedOrDie(uptr fixed_addr, usize size, const char *name) {
135   return MmapFixedImpl(fixed_addr, size, false /*tolerate_enomem*/, name);
136 }
137 
MmapFixedOrDieOnFatalError(uptr fixed_addr,usize size,const char * name)138 void *MmapFixedOrDieOnFatalError(uptr fixed_addr, usize size, const char *name) {
139   return MmapFixedImpl(fixed_addr, size, true /*tolerate_enomem*/, name);
140 }
141 
MprotectNoAccess(uptr addr,usize size)142 bool MprotectNoAccess(uptr addr, usize size) {
143   return 0 == internal_mprotect((void*)addr, size, PROT_NONE);
144 }
145 
MprotectReadOnly(uptr addr,usize size)146 bool MprotectReadOnly(uptr addr, usize size) {
147   return 0 == internal_mprotect((void *)addr, size, PROT_READ);
148 }
149 
150 #if !SANITIZER_MAC
MprotectMallocZones(void * addr,int prot)151 void MprotectMallocZones(void *addr, int prot) {}
152 #endif
153 
OpenFile(const char * filename,FileAccessMode mode,error_t * errno_p)154 fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *errno_p) {
155   if (ShouldMockFailureToOpen(filename))
156     return kInvalidFd;
157   int flags;
158   switch (mode) {
159     case RdOnly: flags = O_RDONLY; break;
160     case WrOnly: flags = O_WRONLY | O_CREAT | O_TRUNC; break;
161     case RdWr: flags = O_RDWR | O_CREAT; break;
162   }
163   fd_t res = internal_open(filename, flags, 0660);
164   if (internal_iserror(res, errno_p))
165     return kInvalidFd;
166   return ReserveStandardFds(res);
167 }
168 
CloseFile(fd_t fd)169 void CloseFile(fd_t fd) {
170   internal_close(fd);
171 }
172 
ReadFromFile(fd_t fd,void * buff,usize buff_size,usize * bytes_read,error_t * error_p)173 bool ReadFromFile(fd_t fd, void *buff, usize buff_size, usize *bytes_read,
174                   error_t *error_p) {
175   usize res = internal_read(fd, buff, buff_size);
176   if (internal_iserror(res, error_p))
177     return false;
178   if (bytes_read)
179     *bytes_read = res;
180   return true;
181 }
182 
WriteToFile(fd_t fd,const void * buff,usize buff_size,usize * bytes_written,error_t * error_p)183 bool WriteToFile(fd_t fd, const void *buff, usize buff_size, usize *bytes_written,
184                  error_t *error_p) {
185   usize res = internal_write(fd, buff, buff_size);
186   if (internal_iserror(res, error_p))
187     return false;
188   if (bytes_written)
189     *bytes_written = res;
190   return true;
191 }
192 
MapFileToMemory(const char * file_name,usize * buff_size)193 void *MapFileToMemory(const char *file_name, usize *buff_size) {
194   fd_t fd = OpenFile(file_name, RdOnly);
195   CHECK(fd != kInvalidFd);
196   usize fsize = internal_filesize(fd);
197   CHECK_NE(fsize, (usize)-1);
198   CHECK_GT(fsize, 0);
199   *buff_size = RoundUpTo(fsize, GetPageSizeCached());
200   uptr map = internal_mmap(nullptr, *buff_size, PROT_READ, MAP_PRIVATE, fd, 0);
201   return internal_iserror(map) ? nullptr : (void *)map;
202 }
203 
MapWritableFileToMemory(void * addr,usize size,fd_t fd,OFF_T offset)204 void *MapWritableFileToMemory(void *addr, usize size, fd_t fd, OFF_T offset) {
205   usize flags = MAP_SHARED;
206   if (addr) flags |= MAP_FIXED;
207   uptr p = internal_mmap(addr, size, PROT_READ | PROT_WRITE, flags, fd, offset);
208   int mmap_errno = 0;
209   if (internal_iserror(p, &mmap_errno)) {
210     Printf("could not map writable file (%d, %lld, %zu): %zd, errno: %d\n",
211            fd, (long long)offset, size, p, mmap_errno);
212     return nullptr;
213   }
214   return (void *)p;
215 }
216 
IntervalsAreSeparate(uptr start1,uptr end1,uptr start2,uptr end2)217 static inline bool IntervalsAreSeparate(uptr start1, uptr end1,
218                                         uptr start2, uptr end2) {
219   CHECK(start1 <= end1);
220   CHECK(start2 <= end2);
221   return (end1 < start2) || (end2 < start1);
222 }
223 
224 // FIXME: this is thread-unsafe, but should not cause problems most of the time.
225 // When the shadow is mapped only a single thread usually exists (plus maybe
226 // several worker threads on Mac, which aren't expected to map big chunks of
227 // memory).
MemoryRangeIsAvailable(uptr range_start,uptr range_end)228 bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
229   MemoryMappingLayout proc_maps(/*cache_enabled*/true);
230   if (proc_maps.Error())
231     return true; // and hope for the best
232   MemoryMappedSegment segment;
233   while (proc_maps.Next(&segment)) {
234     if (segment.start == segment.end) continue;  // Empty range.
235     CHECK_NE(0, segment.end);
236     if (!IntervalsAreSeparate(segment.start, segment.end - 1, range_start,
237                               range_end))
238       return false;
239   }
240   return true;
241 }
242 
DumpProcessMap()243 void DumpProcessMap() {
244   MemoryMappingLayout proc_maps(/*cache_enabled*/true);
245   const sptr kBufSize = 4095;
246   char *filename = (char*)MmapOrDie(kBufSize, __func__);
247   MemoryMappedSegment segment(filename, kBufSize);
248   Report("Process memory map follows:\n");
249   while (proc_maps.Next(&segment)) {
250     Printf("\t%p-%p\t%s\n", (void *)segment.start, (void *)segment.end,
251            segment.filename);
252   }
253   Report("End of process memory map.\n");
254   UnmapOrDie(filename, kBufSize);
255 }
256 
GetPwd()257 const char *GetPwd() {
258   return GetEnv("PWD");
259 }
260 
IsPathSeparator(const char c)261 bool IsPathSeparator(const char c) {
262   return c == '/';
263 }
264 
IsAbsolutePath(const char * path)265 bool IsAbsolutePath(const char *path) {
266   return path != nullptr && IsPathSeparator(path[0]);
267 }
268 
Write(const char * buffer,usize length)269 void ReportFile::Write(const char *buffer, usize length) {
270   SpinMutexLock l(mu);
271   ReopenIfNecessary();
272   internal_write(fd, buffer, length);
273 }
274 
GetCodeRangeForFile(const char * module,uptr * start,uptr * end)275 bool GetCodeRangeForFile(const char *module, uptr *start, uptr *end) {
276   MemoryMappingLayout proc_maps(/*cache_enabled*/false);
277   InternalScopedString buff(kMaxPathLength);
278   MemoryMappedSegment segment(buff.data(), kMaxPathLength);
279   while (proc_maps.Next(&segment)) {
280     if (segment.IsExecutable() &&
281         internal_strcmp(module, segment.filename) == 0) {
282       *start = segment.start;
283       *end = segment.end;
284       return true;
285     }
286   }
287   return false;
288 }
289 
GetAddress() const290 uptr SignalContext::GetAddress() const {
291   auto si = static_cast<const siginfo_t *>(siginfo);
292   return (uptr)si->si_addr;
293 }
294 
IsMemoryAccess() const295 bool SignalContext::IsMemoryAccess() const {
296   auto si = static_cast<const siginfo_t *>(siginfo);
297   return si->si_signo == SIGSEGV;
298 }
299 
GetType() const300 int SignalContext::GetType() const {
301   return static_cast<const siginfo_t *>(siginfo)->si_signo;
302 }
303 
Describe() const304 const char *SignalContext::Describe() const {
305   switch (GetType()) {
306     case SIGFPE:
307       return "FPE";
308     case SIGILL:
309       return "ILL";
310     case SIGABRT:
311       return "ABRT";
312     case SIGSEGV:
313       return "SEGV";
314     case SIGBUS:
315       return "BUS";
316     case SIGTRAP:
317       return "TRAP";
318   }
319   return "UNKNOWN SIGNAL";
320 }
321 
ReserveStandardFds(fd_t fd)322 fd_t ReserveStandardFds(fd_t fd) {
323   CHECK_GE(fd, 0);
324   if (fd > 2)
325     return fd;
326   bool used[3];
327   internal_memset(used, 0, sizeof(used));
328   while (fd <= 2) {
329     used[fd] = true;
330     fd = internal_dup(fd);
331   }
332   for (int i = 0; i <= 2; ++i)
333     if (used[i])
334       internal_close(i);
335   return fd;
336 }
337 
ShouldMockFailureToOpen(const char * path)338 bool ShouldMockFailureToOpen(const char *path) {
339   return common_flags()->test_only_emulate_no_memorymap &&
340          internal_strncmp(path, "/proc/", 6) == 0;
341 }
342 
343 #if SANITIZER_LINUX && !SANITIZER_ANDROID && !SANITIZER_GO
GetNamedMappingFd(const char * name,usize size,int * flags)344 int GetNamedMappingFd(const char *name, usize size, int *flags) {
345   if (!common_flags()->decorate_proc_maps || !name)
346     return -1;
347   char shmname[200];
348   CHECK(internal_strlen(name) < sizeof(shmname) - 10);
349   internal_snprintf(shmname, sizeof(shmname), "/dev/shm/%zu [%s]",
350                     internal_getpid(), name);
351   int o_cloexec = 0;
352 #if defined(O_CLOEXEC)
353   o_cloexec = O_CLOEXEC;
354 #endif
355   int fd = ReserveStandardFds(
356       internal_open(shmname, O_RDWR | O_CREAT | O_TRUNC | o_cloexec, S_IRWXU));
357   CHECK_GE(fd, 0);
358   if (!o_cloexec) {
359     int res = fcntl(fd, F_SETFD, FD_CLOEXEC);
360     CHECK_EQ(0, res);
361   }
362   int res = internal_ftruncate(fd, size);
363   CHECK_EQ(0, res);
364   res = internal_unlink(shmname);
365   CHECK_EQ(0, res);
366   *flags &= ~(MAP_ANON | MAP_ANONYMOUS);
367   return fd;
368 }
369 #else
GetNamedMappingFd(const char * name,usize size,int * flags)370 int GetNamedMappingFd(const char *name, usize size, int *flags) {
371   return -1;
372 }
373 #endif
374 
375 #if SANITIZER_ANDROID
376 #define PR_SET_VMA 0x53564d41
377 #define PR_SET_VMA_ANON_NAME 0
DecorateMapping(uptr addr,usize size,const char * name)378 void DecorateMapping(uptr addr, usize size, const char *name) {
379   if (!common_flags()->decorate_proc_maps || !name)
380     return;
381   internal_prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, addr, size, (uptr)name);
382 }
383 #else
DecorateMapping(uptr addr,usize size,const char * name)384 void DecorateMapping(uptr addr, usize size, const char *name) {
385 }
386 #endif
387 
MmapNamed(void * addr,usize length,int prot,int flags,const char * name)388 uptr MmapNamed(void *addr, usize length, int prot, int flags, const char *name) {
389   int fd = GetNamedMappingFd(name, length, &flags);
390   uptr res = internal_mmap(addr, length, prot, flags, fd, 0);
391   if (!internal_iserror(res))
392     DecorateMapping(res, length, name);
393   return res;
394 }
395 
396 
397 } // namespace __sanitizer
398 
399 #endif // SANITIZER_POSIX
400