1 //===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is shared between run-time libraries of sanitizers.
10 //
11 // It declares common functions and classes that are used in both runtimes.
12 // Implementation of some functions are provided in sanitizer_common, while
13 // others must be defined by run-time library itself.
14 //===----------------------------------------------------------------------===//
15 #ifndef SANITIZER_COMMON_H
16 #define SANITIZER_COMMON_H
17 
18 #include "sanitizer_flags.h"
19 #include "sanitizer_interface_internal.h"
20 #include "sanitizer_internal_defs.h"
21 #include "sanitizer_libc.h"
22 #include "sanitizer_list.h"
23 #include "sanitizer_mutex.h"
24 
25 #if defined(_MSC_VER) && !defined(__clang__)
26 extern "C" void _ReadWriteBarrier();
27 #pragma intrinsic(_ReadWriteBarrier)
28 #endif
29 
30 namespace __sanitizer {
31 
32 struct AddressInfo;
33 struct BufferedStackTrace;
34 struct SignalContext;
35 struct StackTrace;
36 
37 // Constants.
38 const uptr kWordSize = SANITIZER_WORDSIZE / 8;
39 const uptr kWordSizeInBits = 8 * kWordSize;
40 
41 const uptr kCacheLineSize = SANITIZER_CACHE_LINE_SIZE;
42 
43 const uptr kMaxPathLength = 4096;
44 
45 const uptr kMaxThreadStackSize = 1 << 30;  // 1Gb
46 
47 const uptr kErrorMessageBufferSize = 1 << 16;
48 
49 // Denotes fake PC values that come from JIT/JAVA/etc.
50 // For such PC values __tsan_symbolize_external_ex() will be called.
51 const u64 kExternalPCBit = 1ULL << 60;
52 
53 extern const char *SanitizerToolName;  // Can be changed by the tool.
54 
55 extern atomic_uint32_t current_verbosity;
56 inline void SetVerbosity(int verbosity) {
57   atomic_store(&current_verbosity, verbosity, memory_order_relaxed);
58 }
59 inline int Verbosity() {
60   return atomic_load(&current_verbosity, memory_order_relaxed);
61 }
62 
63 #if SANITIZER_ANDROID
64 inline uptr GetPageSize() {
65 // Android post-M sysconf(_SC_PAGESIZE) crashes if called from .preinit_array.
66   return 4096;
67 }
68 inline uptr GetPageSizeCached() {
69   return 4096;
70 }
71 #else
72 uptr GetPageSize();
73 extern uptr PageSizeCached;
74 inline uptr GetPageSizeCached() {
75   if (!PageSizeCached)
76     PageSizeCached = GetPageSize();
77   return PageSizeCached;
78 }
79 #endif
80 uptr GetMmapGranularity();
81 uptr GetMaxVirtualAddress();
82 uptr GetMaxUserVirtualAddress();
83 // Threads
84 tid_t GetTid();
85 int TgKill(pid_t pid, tid_t tid, int sig);
86 uptr GetThreadSelf();
87 void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
88                                 uptr *stack_bottom);
89 void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
90                           uptr *tls_addr, uptr *tls_size);
91 
92 // Memory management
93 void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false);
94 inline void *MmapOrDieQuietly(uptr size, const char *mem_type) {
95   return MmapOrDie(size, mem_type, /*raw_report*/ true);
96 }
97 void UnmapOrDie(void *addr, uptr size);
98 // Behaves just like MmapOrDie, but tolerates out of memory condition, in that
99 // case returns nullptr.
100 void *MmapOrDieOnFatalError(uptr size, const char *mem_type);
101 bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name = nullptr)
102      WARN_UNUSED_RESULT;
103 bool MmapFixedSuperNoReserve(uptr fixed_addr, uptr size,
104                              const char *name = nullptr) WARN_UNUSED_RESULT;
105 void *MmapNoReserveOrDie(uptr size, const char *mem_type);
106 void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
107 // Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in
108 // that case returns nullptr.
109 void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size,
110                                  const char *name = nullptr);
111 void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
112 void *MmapNoAccess(uptr size);
113 // Map aligned chunk of address space; size and alignment are powers of two.
114 // Dies on all but out of memory errors, in the latter case returns nullptr.
115 void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
116                                    const char *mem_type);
117 // Disallow access to a memory range.  Use MmapFixedNoAccess to allocate an
118 // unaccessible memory.
119 bool MprotectNoAccess(uptr addr, uptr size);
120 bool MprotectReadOnly(uptr addr, uptr size);
121 
122 void MprotectMallocZones(void *addr, int prot);
123 
124 #if SANITIZER_LINUX
125 // Unmap memory. Currently only used on Linux.
126 void UnmapFromTo(uptr from, uptr to);
127 #endif
128 
129 // Maps shadow_size_bytes of shadow memory and returns shadow address. It will
130 // be aligned to the mmap granularity * 2^shadow_scale, or to
131 // 2^min_shadow_base_alignment if that is larger. The returned address will
132 // have max(2^min_shadow_base_alignment, mmap granularity) on the left, and
133 // shadow_size_bytes bytes on the right, which on linux is mapped no access.
134 // The high_mem_end may be updated if the original shadow size doesn't fit.
135 uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
136                       uptr min_shadow_base_alignment, uptr &high_mem_end);
137 
138 // Let S = max(shadow_size, num_aliases * alias_size, ring_buffer_size).
139 // Reserves 2*S bytes of address space to the right of the returned address and
140 // ring_buffer_size bytes to the left.  The returned address is aligned to 2*S.
141 // Also creates num_aliases regions of accessible memory starting at offset S
142 // from the returned address.  Each region has size alias_size and is backed by
143 // the same physical memory.
144 uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
145                                 uptr num_aliases, uptr ring_buffer_size);
146 
147 // Reserve memory range [beg, end]. If madvise_shadow is true then apply
148 // madvise (e.g. hugepages, core dumping) requested by options.
149 void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name,
150                               bool madvise_shadow = true);
151 
152 // Protect size bytes of memory starting at addr. Also try to protect
153 // several pages at the start of the address space as specified by
154 // zero_base_shadow_start, at most up to the size or zero_base_max_shadow_start.
155 void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start,
156                 uptr zero_base_max_shadow_start);
157 
158 // Find an available address space.
159 uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
160                               uptr *largest_gap_found, uptr *max_occupied_addr);
161 
162 // Used to check if we can map shadow memory to a fixed location.
163 bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
164 // Releases memory pages entirely within the [beg, end] address range. Noop if
165 // the provided range does not contain at least one entire page.
166 void ReleaseMemoryPagesToOS(uptr beg, uptr end);
167 void IncreaseTotalMmap(uptr size);
168 void DecreaseTotalMmap(uptr size);
169 uptr GetRSS();
170 void SetShadowRegionHugePageMode(uptr addr, uptr length);
171 bool DontDumpShadowMemory(uptr addr, uptr length);
172 // Check if the built VMA size matches the runtime one.
173 void CheckVMASize();
174 void RunMallocHooks(const void *ptr, uptr size);
175 void RunFreeHooks(const void *ptr);
176 
177 class ReservedAddressRange {
178  public:
179   uptr Init(uptr size, const char *name = nullptr, uptr fixed_addr = 0);
180   uptr InitAligned(uptr size, uptr align, const char *name = nullptr);
181   uptr Map(uptr fixed_addr, uptr size, const char *name = nullptr);
182   uptr MapOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
183   void Unmap(uptr addr, uptr size);
184   void *base() const { return base_; }
185   uptr size() const { return size_; }
186 
187  private:
188   void* base_;
189   uptr size_;
190   const char* name_;
191   uptr os_handle_;
192 };
193 
194 typedef void (*fill_profile_f)(uptr start, uptr rss, bool file,
195                                /*out*/ uptr *stats);
196 
197 // Parse the contents of /proc/self/smaps and generate a memory profile.
198 // |cb| is a tool-specific callback that fills the |stats| array.
199 void GetMemoryProfile(fill_profile_f cb, uptr *stats);
200 void ParseUnixMemoryProfile(fill_profile_f cb, uptr *stats, char *smaps,
201                             uptr smaps_len);
202 
203 // Simple low-level (mmap-based) allocator for internal use. Doesn't have
204 // constructor, so all instances of LowLevelAllocator should be
205 // linker initialized.
206 class LowLevelAllocator {
207  public:
208   // Requires an external lock.
209   void *Allocate(uptr size);
210  private:
211   char *allocated_end_;
212   char *allocated_current_;
213 };
214 // Set the min alignment of LowLevelAllocator to at least alignment.
215 void SetLowLevelAllocateMinAlignment(uptr alignment);
216 typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size);
217 // Allows to register tool-specific callbacks for LowLevelAllocator.
218 // Passing NULL removes the callback.
219 void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
220 
221 // IO
222 void CatastrophicErrorWrite(const char *buffer, uptr length);
223 void RawWrite(const char *buffer);
224 bool ColorizeReports();
225 void RemoveANSIEscapeSequencesFromString(char *buffer);
226 void Printf(const char *format, ...) FORMAT(1, 2);
227 void Report(const char *format, ...) FORMAT(1, 2);
228 void SetPrintfAndReportCallback(void (*callback)(const char *));
229 #define VReport(level, ...)                                              \
230   do {                                                                   \
231     if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \
232   } while (0)
233 #define VPrintf(level, ...)                                              \
234   do {                                                                   \
235     if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \
236   } while (0)
237 
238 // Lock sanitizer error reporting and protects against nested errors.
239 class ScopedErrorReportLock {
240  public:
241   ScopedErrorReportLock() SANITIZER_ACQUIRE(mutex_) { Lock(); }
242   ~ScopedErrorReportLock() SANITIZER_RELEASE(mutex_) { Unlock(); }
243 
244   static void Lock() SANITIZER_ACQUIRE(mutex_);
245   static void Unlock() SANITIZER_RELEASE(mutex_);
246   static void CheckLocked() SANITIZER_CHECK_LOCKED(mutex_);
247 
248  private:
249   static atomic_uintptr_t reporting_thread_;
250   static StaticSpinMutex mutex_;
251 };
252 
253 extern uptr stoptheworld_tracer_pid;
254 extern uptr stoptheworld_tracer_ppid;
255 
256 bool IsAccessibleMemoryRange(uptr beg, uptr size);
257 
258 // Error report formatting.
259 const char *StripPathPrefix(const char *filepath,
260                             const char *strip_file_prefix);
261 // Strip the directories from the module name.
262 const char *StripModuleName(const char *module);
263 
264 // OS
265 uptr ReadBinaryName(/*out*/char *buf, uptr buf_len);
266 uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len);
267 uptr ReadBinaryDir(/*out*/ char *buf, uptr buf_len);
268 uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len);
269 const char *GetProcessName();
270 void UpdateProcessName();
271 void CacheBinaryName();
272 void DisableCoreDumperIfNecessary();
273 void DumpProcessMap();
274 const char *GetEnv(const char *name);
275 bool SetEnv(const char *name, const char *value);
276 
277 u32 GetUid();
278 void ReExec();
279 void CheckASLR();
280 void CheckMPROTECT();
281 char **GetArgv();
282 char **GetEnviron();
283 void PrintCmdline();
284 bool StackSizeIsUnlimited();
285 void SetStackSizeLimitInBytes(uptr limit);
286 bool AddressSpaceIsUnlimited();
287 void SetAddressSpaceUnlimited();
288 void AdjustStackSize(void *attr);
289 void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args);
290 void SetSandboxingCallback(void (*f)());
291 
292 void InitializeCoverage(bool enabled, const char *coverage_dir);
293 
294 void InitTlsSize();
295 uptr GetTlsSize();
296 
297 // Other
298 void SleepForSeconds(unsigned seconds);
299 void SleepForMillis(unsigned millis);
300 u64 NanoTime();
301 u64 MonotonicNanoTime();
302 int Atexit(void (*function)(void));
303 bool TemplateMatch(const char *templ, const char *str);
304 
305 // Exit
306 void NORETURN Abort();
307 void NORETURN Die();
308 void NORETURN
309 CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
310 void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
311                                       const char *mmap_type, error_t err,
312                                       bool raw_report = false);
313 
314 // Specific tools may override behavior of "Die" function to do tool-specific
315 // job.
316 typedef void (*DieCallbackType)(void);
317 
318 // It's possible to add several callbacks that would be run when "Die" is
319 // called. The callbacks will be run in the opposite order. The tools are
320 // strongly recommended to setup all callbacks during initialization, when there
321 // is only a single thread.
322 bool AddDieCallback(DieCallbackType callback);
323 bool RemoveDieCallback(DieCallbackType callback);
324 
325 void SetUserDieCallback(DieCallbackType callback);
326 
327 void SetCheckUnwindCallback(void (*callback)());
328 
329 // Functions related to signal handling.
330 typedef void (*SignalHandlerType)(int, void *, void *);
331 HandleSignalMode GetHandleSignalMode(int signum);
332 void InstallDeadlySignalHandlers(SignalHandlerType handler);
333 
334 // Signal reporting.
335 // Each sanitizer uses slightly different implementation of stack unwinding.
336 typedef void (*UnwindSignalStackCallbackType)(const SignalContext &sig,
337                                               const void *callback_context,
338                                               BufferedStackTrace *stack);
339 // Print deadly signal report and die.
340 void HandleDeadlySignal(void *siginfo, void *context, u32 tid,
341                         UnwindSignalStackCallbackType unwind,
342                         const void *unwind_context);
343 
344 // Part of HandleDeadlySignal, exposed for asan.
345 void StartReportDeadlySignal();
346 // Part of HandleDeadlySignal, exposed for asan.
347 void ReportDeadlySignal(const SignalContext &sig, u32 tid,
348                         UnwindSignalStackCallbackType unwind,
349                         const void *unwind_context);
350 
351 // Alternative signal stack (POSIX-only).
352 void SetAlternateSignalStack();
353 void UnsetAlternateSignalStack();
354 
355 // Construct a one-line string:
356 //   SUMMARY: SanitizerToolName: error_message
357 // and pass it to __sanitizer_report_error_summary.
358 // If alt_tool_name is provided, it's used in place of SanitizerToolName.
359 void ReportErrorSummary(const char *error_message,
360                         const char *alt_tool_name = nullptr);
361 // Same as above, but construct error_message as:
362 //   error_type file:line[:column][ function]
363 void ReportErrorSummary(const char *error_type, const AddressInfo &info,
364                         const char *alt_tool_name = nullptr);
365 // Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
366 void ReportErrorSummary(const char *error_type, const StackTrace *trace,
367                         const char *alt_tool_name = nullptr);
368 
369 void ReportMmapWriteExec(int prot, int mflags);
370 
371 // Math
372 #if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
373 extern "C" {
374 unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
375 unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
376 #if defined(_WIN64)
377 unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask);
378 unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask);
379 #endif
380 }
381 #endif
382 
383 inline uptr MostSignificantSetBitIndex(uptr x) {
384   CHECK_NE(x, 0U);
385   unsigned long up;
386 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
387 # ifdef _WIN64
388   up = SANITIZER_WORDSIZE - 1 - __builtin_clzll(x);
389 # else
390   up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x);
391 # endif
392 #elif defined(_WIN64)
393   _BitScanReverse64(&up, x);
394 #else
395   _BitScanReverse(&up, x);
396 #endif
397   return up;
398 }
399 
400 inline uptr LeastSignificantSetBitIndex(uptr x) {
401   CHECK_NE(x, 0U);
402   unsigned long up;
403 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
404 # ifdef _WIN64
405   up = __builtin_ctzll(x);
406 # else
407   up = __builtin_ctzl(x);
408 # endif
409 #elif defined(_WIN64)
410   _BitScanForward64(&up, x);
411 #else
412   _BitScanForward(&up, x);
413 #endif
414   return up;
415 }
416 
417 inline constexpr bool IsPowerOfTwo(uptr x) { return (x & (x - 1)) == 0; }
418 
419 inline uptr RoundUpToPowerOfTwo(uptr size) {
420   CHECK(size);
421   if (IsPowerOfTwo(size)) return size;
422 
423   uptr up = MostSignificantSetBitIndex(size);
424   CHECK_LT(size, (1ULL << (up + 1)));
425   CHECK_GT(size, (1ULL << up));
426   return 1ULL << (up + 1);
427 }
428 
429 inline constexpr uptr RoundUpTo(uptr size, uptr boundary) {
430   RAW_CHECK(IsPowerOfTwo(boundary));
431   return (size + boundary - 1) & ~(boundary - 1);
432 }
433 
434 inline constexpr uptr RoundDownTo(uptr x, uptr boundary) {
435   return x & ~(boundary - 1);
436 }
437 
438 inline constexpr bool IsAligned(uptr a, uptr alignment) {
439   return (a & (alignment - 1)) == 0;
440 }
441 
442 inline uptr Log2(uptr x) {
443   CHECK(IsPowerOfTwo(x));
444   return LeastSignificantSetBitIndex(x);
445 }
446 
447 // Don't use std::min, std::max or std::swap, to minimize dependency
448 // on libstdc++.
449 template <class T>
450 constexpr T Min(T a, T b) {
451   return a < b ? a : b;
452 }
453 template <class T>
454 constexpr T Max(T a, T b) {
455   return a > b ? a : b;
456 }
457 template <class T>
458 constexpr T Abs(T a) {
459   return a < 0 ? -a : a;
460 }
461 template<class T> void Swap(T& a, T& b) {
462   T tmp = a;
463   a = b;
464   b = tmp;
465 }
466 
467 // Char handling
468 inline bool IsSpace(int c) {
469   return (c == ' ') || (c == '\n') || (c == '\t') ||
470          (c == '\f') || (c == '\r') || (c == '\v');
471 }
472 inline bool IsDigit(int c) {
473   return (c >= '0') && (c <= '9');
474 }
475 inline int ToLower(int c) {
476   return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
477 }
478 
479 // A low-level vector based on mmap. May incur a significant memory overhead for
480 // small vectors.
481 // WARNING: The current implementation supports only POD types.
482 template<typename T>
483 class InternalMmapVectorNoCtor {
484  public:
485   using value_type = T;
486   void Initialize(uptr initial_capacity) {
487     capacity_bytes_ = 0;
488     size_ = 0;
489     data_ = 0;
490     reserve(initial_capacity);
491   }
492   void Destroy() { UnmapOrDie(data_, capacity_bytes_); }
493   T &operator[](uptr i) {
494     CHECK_LT(i, size_);
495     return data_[i];
496   }
497   const T &operator[](uptr i) const {
498     CHECK_LT(i, size_);
499     return data_[i];
500   }
501   void push_back(const T &element) {
502     CHECK_LE(size_, capacity());
503     if (size_ == capacity()) {
504       uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1);
505       Realloc(new_capacity);
506     }
507     internal_memcpy(&data_[size_++], &element, sizeof(T));
508   }
509   T &back() {
510     CHECK_GT(size_, 0);
511     return data_[size_ - 1];
512   }
513   void pop_back() {
514     CHECK_GT(size_, 0);
515     size_--;
516   }
517   uptr size() const {
518     return size_;
519   }
520   const T *data() const {
521     return data_;
522   }
523   T *data() {
524     return data_;
525   }
526   uptr capacity() const { return capacity_bytes_ / sizeof(T); }
527   void reserve(uptr new_size) {
528     // Never downsize internal buffer.
529     if (new_size > capacity())
530       Realloc(new_size);
531   }
532   void resize(uptr new_size) {
533     if (new_size > size_) {
534       reserve(new_size);
535       internal_memset(&data_[size_], 0, sizeof(T) * (new_size - size_));
536     }
537     size_ = new_size;
538   }
539 
540   void clear() { size_ = 0; }
541   bool empty() const { return size() == 0; }
542 
543   const T *begin() const {
544     return data();
545   }
546   T *begin() {
547     return data();
548   }
549   const T *end() const {
550     return data() + size();
551   }
552   T *end() {
553     return data() + size();
554   }
555 
556   void swap(InternalMmapVectorNoCtor &other) {
557     Swap(data_, other.data_);
558     Swap(capacity_bytes_, other.capacity_bytes_);
559     Swap(size_, other.size_);
560   }
561 
562  private:
563   void Realloc(uptr new_capacity) {
564     CHECK_GT(new_capacity, 0);
565     CHECK_LE(size_, new_capacity);
566     uptr new_capacity_bytes =
567         RoundUpTo(new_capacity * sizeof(T), GetPageSizeCached());
568     T *new_data = (T *)MmapOrDie(new_capacity_bytes, "InternalMmapVector");
569     internal_memcpy(new_data, data_, size_ * sizeof(T));
570     UnmapOrDie(data_, capacity_bytes_);
571     data_ = new_data;
572     capacity_bytes_ = new_capacity_bytes;
573   }
574 
575   T *data_;
576   uptr capacity_bytes_;
577   uptr size_;
578 };
579 
580 template <typename T>
581 bool operator==(const InternalMmapVectorNoCtor<T> &lhs,
582                 const InternalMmapVectorNoCtor<T> &rhs) {
583   if (lhs.size() != rhs.size()) return false;
584   return internal_memcmp(lhs.data(), rhs.data(), lhs.size() * sizeof(T)) == 0;
585 }
586 
587 template <typename T>
588 bool operator!=(const InternalMmapVectorNoCtor<T> &lhs,
589                 const InternalMmapVectorNoCtor<T> &rhs) {
590   return !(lhs == rhs);
591 }
592 
593 template<typename T>
594 class InternalMmapVector : public InternalMmapVectorNoCtor<T> {
595  public:
596   InternalMmapVector() { InternalMmapVectorNoCtor<T>::Initialize(0); }
597   explicit InternalMmapVector(uptr cnt) {
598     InternalMmapVectorNoCtor<T>::Initialize(cnt);
599     this->resize(cnt);
600   }
601   ~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); }
602   // Disallow copies and moves.
603   InternalMmapVector(const InternalMmapVector &) = delete;
604   InternalMmapVector &operator=(const InternalMmapVector &) = delete;
605   InternalMmapVector(InternalMmapVector &&) = delete;
606   InternalMmapVector &operator=(InternalMmapVector &&) = delete;
607 };
608 
609 class InternalScopedString {
610  public:
611   InternalScopedString() : buffer_(1) { buffer_[0] = '\0'; }
612 
613   uptr length() const { return buffer_.size() - 1; }
614   void clear() {
615     buffer_.resize(1);
616     buffer_[0] = '\0';
617   }
618   void append(const char *format, ...) FORMAT(2, 3);
619   const char *data() const { return buffer_.data(); }
620   char *data() { return buffer_.data(); }
621 
622  private:
623   InternalMmapVector<char> buffer_;
624 };
625 
626 template <class T>
627 struct CompareLess {
628   bool operator()(const T &a, const T &b) const { return a < b; }
629 };
630 
631 // HeapSort for arrays and InternalMmapVector.
632 template <class T, class Compare = CompareLess<T>>
633 void Sort(T *v, uptr size, Compare comp = {}) {
634   if (size < 2)
635     return;
636   // Stage 1: insert elements to the heap.
637   for (uptr i = 1; i < size; i++) {
638     uptr j, p;
639     for (j = i; j > 0; j = p) {
640       p = (j - 1) / 2;
641       if (comp(v[p], v[j]))
642         Swap(v[j], v[p]);
643       else
644         break;
645     }
646   }
647   // Stage 2: swap largest element with the last one,
648   // and sink the new top.
649   for (uptr i = size - 1; i > 0; i--) {
650     Swap(v[0], v[i]);
651     uptr j, max_ind;
652     for (j = 0; j < i; j = max_ind) {
653       uptr left = 2 * j + 1;
654       uptr right = 2 * j + 2;
655       max_ind = j;
656       if (left < i && comp(v[max_ind], v[left]))
657         max_ind = left;
658       if (right < i && comp(v[max_ind], v[right]))
659         max_ind = right;
660       if (max_ind != j)
661         Swap(v[j], v[max_ind]);
662       else
663         break;
664     }
665   }
666 }
667 
668 // Works like std::lower_bound: finds the first element that is not less
669 // than the val.
670 template <class Container, class T,
671           class Compare = CompareLess<typename Container::value_type>>
672 uptr InternalLowerBound(const Container &v, const T &val, Compare comp = {}) {
673   uptr first = 0;
674   uptr last = v.size();
675   while (last > first) {
676     uptr mid = (first + last) / 2;
677     if (comp(v[mid], val))
678       first = mid + 1;
679     else
680       last = mid;
681   }
682   return first;
683 }
684 
685 enum ModuleArch {
686   kModuleArchUnknown,
687   kModuleArchI386,
688   kModuleArchX86_64,
689   kModuleArchX86_64H,
690   kModuleArchARMV6,
691   kModuleArchARMV7,
692   kModuleArchARMV7S,
693   kModuleArchARMV7K,
694   kModuleArchARM64,
695   kModuleArchRISCV64,
696   kModuleArchHexagon
697 };
698 
699 // Sorts and removes duplicates from the container.
700 template <class Container,
701           class Compare = CompareLess<typename Container::value_type>>
702 void SortAndDedup(Container &v, Compare comp = {}) {
703   Sort(v.data(), v.size(), comp);
704   uptr size = v.size();
705   if (size < 2)
706     return;
707   uptr last = 0;
708   for (uptr i = 1; i < size; ++i) {
709     if (comp(v[last], v[i])) {
710       ++last;
711       if (last != i)
712         v[last] = v[i];
713     } else {
714       CHECK(!comp(v[i], v[last]));
715     }
716   }
717   v.resize(last + 1);
718 }
719 
720 constexpr uptr kDefaultFileMaxSize = FIRST_32_SECOND_64(1 << 26, 1 << 28);
721 
722 // Opens the file 'file_name" and reads up to 'max_len' bytes.
723 // The resulting buffer is mmaped and stored in '*buff'.
724 // Returns true if file was successfully opened and read.
725 bool ReadFileToVector(const char *file_name,
726                       InternalMmapVectorNoCtor<char> *buff,
727                       uptr max_len = kDefaultFileMaxSize,
728                       error_t *errno_p = nullptr);
729 
730 // Opens the file 'file_name" and reads up to 'max_len' bytes.
731 // This function is less I/O efficient than ReadFileToVector as it may reread
732 // file multiple times to avoid mmap during read attempts. It's used to read
733 // procmap, so short reads with mmap in between can produce inconsistent result.
734 // The resulting buffer is mmaped and stored in '*buff'.
735 // The size of the mmaped region is stored in '*buff_size'.
736 // The total number of read bytes is stored in '*read_len'.
737 // Returns true if file was successfully opened and read.
738 bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
739                       uptr *read_len, uptr max_len = kDefaultFileMaxSize,
740                       error_t *errno_p = nullptr);
741 
742 // When adding a new architecture, don't forget to also update
743 // script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cpp.
744 inline const char *ModuleArchToString(ModuleArch arch) {
745   switch (arch) {
746     case kModuleArchUnknown:
747       return "";
748     case kModuleArchI386:
749       return "i386";
750     case kModuleArchX86_64:
751       return "x86_64";
752     case kModuleArchX86_64H:
753       return "x86_64h";
754     case kModuleArchARMV6:
755       return "armv6";
756     case kModuleArchARMV7:
757       return "armv7";
758     case kModuleArchARMV7S:
759       return "armv7s";
760     case kModuleArchARMV7K:
761       return "armv7k";
762     case kModuleArchARM64:
763       return "arm64";
764     case kModuleArchRISCV64:
765       return "riscv64";
766     case kModuleArchHexagon:
767       return "hexagon";
768   }
769   CHECK(0 && "Invalid module arch");
770   return "";
771 }
772 
773 const uptr kModuleUUIDSize = 32;
774 const uptr kMaxSegName = 16;
775 
776 // Represents a binary loaded into virtual memory (e.g. this can be an
777 // executable or a shared object).
778 class LoadedModule {
779  public:
780   LoadedModule()
781       : full_name_(nullptr),
782         base_address_(0),
783         max_executable_address_(0),
784         arch_(kModuleArchUnknown),
785         uuid_size_(0),
786         instrumented_(false) {
787     internal_memset(uuid_, 0, kModuleUUIDSize);
788     ranges_.clear();
789   }
790   void set(const char *module_name, uptr base_address);
791   void set(const char *module_name, uptr base_address, ModuleArch arch,
792            u8 uuid[kModuleUUIDSize], bool instrumented);
793   void setUuid(const char *uuid, uptr size);
794   void clear();
795   void addAddressRange(uptr beg, uptr end, bool executable, bool writable,
796                        const char *name = nullptr);
797   bool containsAddress(uptr address) const;
798 
799   const char *full_name() const { return full_name_; }
800   uptr base_address() const { return base_address_; }
801   uptr max_executable_address() const { return max_executable_address_; }
802   ModuleArch arch() const { return arch_; }
803   const u8 *uuid() const { return uuid_; }
804   uptr uuid_size() const { return uuid_size_; }
805   bool instrumented() const { return instrumented_; }
806 
807   struct AddressRange {
808     AddressRange *next;
809     uptr beg;
810     uptr end;
811     bool executable;
812     bool writable;
813     char name[kMaxSegName];
814 
815     AddressRange(uptr beg, uptr end, bool executable, bool writable,
816                  const char *name)
817         : next(nullptr),
818           beg(beg),
819           end(end),
820           executable(executable),
821           writable(writable) {
822       internal_strncpy(this->name, (name ? name : ""), ARRAY_SIZE(this->name));
823     }
824   };
825 
826   const IntrusiveList<AddressRange> &ranges() const { return ranges_; }
827 
828  private:
829   char *full_name_;  // Owned.
830   uptr base_address_;
831   uptr max_executable_address_;
832   ModuleArch arch_;
833   uptr uuid_size_;
834   u8 uuid_[kModuleUUIDSize];
835   bool instrumented_;
836   IntrusiveList<AddressRange> ranges_;
837 };
838 
839 // List of LoadedModules. OS-dependent implementation is responsible for
840 // filling this information.
841 class ListOfModules {
842  public:
843   ListOfModules() : initialized(false) {}
844   ~ListOfModules() { clear(); }
845   void init();
846   void fallbackInit();  // Uses fallback init if available, otherwise clears
847   const LoadedModule *begin() const { return modules_.begin(); }
848   LoadedModule *begin() { return modules_.begin(); }
849   const LoadedModule *end() const { return modules_.end(); }
850   LoadedModule *end() { return modules_.end(); }
851   uptr size() const { return modules_.size(); }
852   const LoadedModule &operator[](uptr i) const {
853     CHECK_LT(i, modules_.size());
854     return modules_[i];
855   }
856 
857  private:
858   void clear() {
859     for (auto &module : modules_) module.clear();
860     modules_.clear();
861   }
862   void clearOrInit() {
863     initialized ? clear() : modules_.Initialize(kInitialCapacity);
864     initialized = true;
865   }
866 
867   InternalMmapVectorNoCtor<LoadedModule> modules_;
868   // We rarely have more than 16K loaded modules.
869   static const uptr kInitialCapacity = 1 << 14;
870   bool initialized;
871 };
872 
873 // Callback type for iterating over a set of memory ranges.
874 typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
875 
876 enum AndroidApiLevel {
877   ANDROID_NOT_ANDROID = 0,
878   ANDROID_KITKAT = 19,
879   ANDROID_LOLLIPOP_MR1 = 22,
880   ANDROID_POST_LOLLIPOP = 23
881 };
882 
883 void WriteToSyslog(const char *buffer);
884 
885 #if defined(SANITIZER_WINDOWS) && defined(_MSC_VER) && !defined(__clang__)
886 #define SANITIZER_WIN_TRACE 1
887 #else
888 #define SANITIZER_WIN_TRACE 0
889 #endif
890 
891 #if SANITIZER_MAC || SANITIZER_WIN_TRACE
892 void LogFullErrorReport(const char *buffer);
893 #else
894 inline void LogFullErrorReport(const char *buffer) {}
895 #endif
896 
897 #if SANITIZER_LINUX || SANITIZER_MAC
898 void WriteOneLineToSyslog(const char *s);
899 void LogMessageOnPrintf(const char *str);
900 #else
901 inline void WriteOneLineToSyslog(const char *s) {}
902 inline void LogMessageOnPrintf(const char *str) {}
903 #endif
904 
905 #if SANITIZER_LINUX || SANITIZER_WIN_TRACE
906 // Initialize Android logging. Any writes before this are silently lost.
907 void AndroidLogInit();
908 void SetAbortMessage(const char *);
909 #else
910 inline void AndroidLogInit() {}
911 // FIXME: MacOS implementation could use CRSetCrashLogMessage.
912 inline void SetAbortMessage(const char *) {}
913 #endif
914 
915 #if SANITIZER_ANDROID
916 void SanitizerInitializeUnwinder();
917 AndroidApiLevel AndroidGetApiLevel();
918 #else
919 inline void AndroidLogWrite(const char *buffer_unused) {}
920 inline void SanitizerInitializeUnwinder() {}
921 inline AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
922 #endif
923 
924 inline uptr GetPthreadDestructorIterations() {
925 #if SANITIZER_ANDROID
926   return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4;
927 #elif SANITIZER_POSIX
928   return 4;
929 #else
930 // Unused on Windows.
931   return 0;
932 #endif
933 }
934 
935 void *internal_start_thread(void *(*func)(void*), void *arg);
936 void internal_join_thread(void *th);
937 void MaybeStartBackgroudThread();
938 
939 // Make the compiler think that something is going on there.
940 // Use this inside a loop that looks like memset/memcpy/etc to prevent the
941 // compiler from recognising it and turning it into an actual call to
942 // memset/memcpy/etc.
943 static inline void SanitizerBreakOptimization(void *arg) {
944 #if defined(_MSC_VER) && !defined(__clang__)
945   _ReadWriteBarrier();
946 #else
947   __asm__ __volatile__("" : : "r" (arg) : "memory");
948 #endif
949 }
950 
951 struct SignalContext {
952   void *siginfo;
953   void *context;
954   uptr addr;
955   uptr pc;
956   uptr sp;
957   uptr bp;
958   bool is_memory_access;
959   enum WriteFlag { Unknown, Read, Write } write_flag;
960 
961   // In some cases the kernel cannot provide the true faulting address; `addr`
962   // will be zero then.  This field allows to distinguish between these cases
963   // and dereferences of null.
964   bool is_true_faulting_addr;
965 
966   // VS2013 doesn't implement unrestricted unions, so we need a trivial default
967   // constructor
968   SignalContext() = default;
969 
970   // Creates signal context in a platform-specific manner.
971   // SignalContext is going to keep pointers to siginfo and context without
972   // owning them.
973   SignalContext(void *siginfo, void *context)
974       : siginfo(siginfo),
975         context(context),
976         addr(GetAddress()),
977         is_memory_access(IsMemoryAccess()),
978         write_flag(GetWriteFlag()),
979         is_true_faulting_addr(IsTrueFaultingAddress()) {
980     InitPcSpBp();
981   }
982 
983   static void DumpAllRegisters(void *context);
984 
985   // Type of signal e.g. SIGSEGV or EXCEPTION_ACCESS_VIOLATION.
986   int GetType() const;
987 
988   // String description of the signal.
989   const char *Describe() const;
990 
991   // Returns true if signal is stack overflow.
992   bool IsStackOverflow() const;
993 
994  private:
995   // Platform specific initialization.
996   void InitPcSpBp();
997   uptr GetAddress() const;
998   WriteFlag GetWriteFlag() const;
999   bool IsMemoryAccess() const;
1000   bool IsTrueFaultingAddress() const;
1001 };
1002 
1003 void InitializePlatformEarly();
1004 void MaybeReexec();
1005 
1006 template <typename Fn>
1007 class RunOnDestruction {
1008  public:
1009   explicit RunOnDestruction(Fn fn) : fn_(fn) {}
1010   ~RunOnDestruction() { fn_(); }
1011 
1012  private:
1013   Fn fn_;
1014 };
1015 
1016 // A simple scope guard. Usage:
1017 // auto cleanup = at_scope_exit([]{ do_cleanup; });
1018 template <typename Fn>
1019 RunOnDestruction<Fn> at_scope_exit(Fn fn) {
1020   return RunOnDestruction<Fn>(fn);
1021 }
1022 
1023 // Linux on 64-bit s390 had a nasty bug that crashes the whole machine
1024 // if a process uses virtual memory over 4TB (as many sanitizers like
1025 // to do).  This function will abort the process if running on a kernel
1026 // that looks vulnerable.
1027 #if SANITIZER_LINUX && SANITIZER_S390_64
1028 void AvoidCVE_2016_2143();
1029 #else
1030 inline void AvoidCVE_2016_2143() {}
1031 #endif
1032 
1033 struct StackDepotStats {
1034   uptr n_uniq_ids;
1035   uptr allocated;
1036 };
1037 
1038 // The default value for allocator_release_to_os_interval_ms common flag to
1039 // indicate that sanitizer allocator should not attempt to release memory to OS.
1040 const s32 kReleaseToOSIntervalNever = -1;
1041 
1042 void CheckNoDeepBind(const char *filename, int flag);
1043 
1044 // Returns the requested amount of random data (up to 256 bytes) that can then
1045 // be used to seed a PRNG. Defaults to blocking like the underlying syscall.
1046 bool GetRandom(void *buffer, uptr length, bool blocking = true);
1047 
1048 // Returns the number of logical processors on the system.
1049 u32 GetNumberOfCPUs();
1050 extern u32 NumberOfCPUsCached;
1051 inline u32 GetNumberOfCPUsCached() {
1052   if (!NumberOfCPUsCached)
1053     NumberOfCPUsCached = GetNumberOfCPUs();
1054   return NumberOfCPUsCached;
1055 }
1056 
1057 template <typename T>
1058 class ArrayRef {
1059  public:
1060   ArrayRef() {}
1061   ArrayRef(T *begin, T *end) : begin_(begin), end_(end) {}
1062 
1063   T *begin() { return begin_; }
1064   T *end() { return end_; }
1065 
1066  private:
1067   T *begin_ = nullptr;
1068   T *end_ = nullptr;
1069 };
1070 
1071 }  // namespace __sanitizer
1072 
1073 inline void *operator new(__sanitizer::operator_new_size_type size,
1074                           __sanitizer::LowLevelAllocator &alloc) {
1075   return alloc.Allocate(size);
1076 }
1077 
1078 #endif  // SANITIZER_COMMON_H
1079