1 //===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is shared between run-time libraries of sanitizers.
10 //
11 // It declares common functions and classes that are used in both runtimes.
12 // Implementation of some functions are provided in sanitizer_common, while
13 // others must be defined by run-time library itself.
14 //===----------------------------------------------------------------------===//
15 #ifndef SANITIZER_COMMON_H
16 #define SANITIZER_COMMON_H
17 
18 #include "sanitizer_flags.h"
19 #include "sanitizer_interface_internal.h"
20 #include "sanitizer_internal_defs.h"
21 #include "sanitizer_libc.h"
22 #include "sanitizer_list.h"
23 #include "sanitizer_mutex.h"
24 
25 #if defined(_MSC_VER) && !defined(__clang__)
26 extern "C" void _ReadWriteBarrier();
27 #pragma intrinsic(_ReadWriteBarrier)
28 #endif
29 
30 namespace __sanitizer {
31 
32 struct AddressInfo;
33 struct BufferedStackTrace;
34 struct SignalContext;
35 struct StackTrace;
36 
37 // Constants.
38 const uptr kWordSize = SANITIZER_WORDSIZE / 8;
39 const uptr kWordSizeInBits = 8 * kWordSize;
40 
41 const uptr kCacheLineSize = SANITIZER_CACHE_LINE_SIZE;
42 
43 const uptr kMaxPathLength = 4096;
44 
45 const uptr kMaxThreadStackSize = 1 << 30;  // 1Gb
46 
47 const uptr kErrorMessageBufferSize = 1 << 16;
48 
49 // Denotes fake PC values that come from JIT/JAVA/etc.
50 // For such PC values __tsan_symbolize_external_ex() will be called.
51 const u64 kExternalPCBit = 1ULL << 60;
52 
53 extern const char *SanitizerToolName;  // Can be changed by the tool.
54 
55 extern atomic_uint32_t current_verbosity;
56 inline void SetVerbosity(int verbosity) {
57   atomic_store(&current_verbosity, verbosity, memory_order_relaxed);
58 }
59 inline int Verbosity() {
60   return atomic_load(&current_verbosity, memory_order_relaxed);
61 }
62 
63 #if SANITIZER_ANDROID
64 inline uptr GetPageSize() {
65 // Android post-M sysconf(_SC_PAGESIZE) crashes if called from .preinit_array.
66   return 4096;
67 }
68 inline uptr GetPageSizeCached() {
69   return 4096;
70 }
71 #else
72 uptr GetPageSize();
73 extern uptr PageSizeCached;
74 inline uptr GetPageSizeCached() {
75   if (!PageSizeCached)
76     PageSizeCached = GetPageSize();
77   return PageSizeCached;
78 }
79 #endif
80 uptr GetMmapGranularity();
81 uptr GetMaxVirtualAddress();
82 uptr GetMaxUserVirtualAddress();
83 // Threads
84 tid_t GetTid();
85 int TgKill(pid_t pid, tid_t tid, int sig);
86 uptr GetThreadSelf();
87 void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
88                                 uptr *stack_bottom);
89 void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
90                           uptr *tls_addr, uptr *tls_size);
91 
92 // Memory management
93 void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false);
94 inline void *MmapOrDieQuietly(uptr size, const char *mem_type) {
95   return MmapOrDie(size, mem_type, /*raw_report*/ true);
96 }
97 void UnmapOrDie(void *addr, uptr size);
98 // Behaves just like MmapOrDie, but tolerates out of memory condition, in that
99 // case returns nullptr.
100 void *MmapOrDieOnFatalError(uptr size, const char *mem_type);
101 bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name = nullptr)
102      WARN_UNUSED_RESULT;
103 bool MmapFixedSuperNoReserve(uptr fixed_addr, uptr size,
104                              const char *name = nullptr) WARN_UNUSED_RESULT;
105 void *MmapNoReserveOrDie(uptr size, const char *mem_type);
106 void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
107 // Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in
108 // that case returns nullptr.
109 void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size,
110                                  const char *name = nullptr);
111 void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
112 void *MmapNoAccess(uptr size);
113 // Map aligned chunk of address space; size and alignment are powers of two.
114 // Dies on all but out of memory errors, in the latter case returns nullptr.
115 void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
116                                    const char *mem_type);
117 // Disallow access to a memory range.  Use MmapFixedNoAccess to allocate an
118 // unaccessible memory.
119 bool MprotectNoAccess(uptr addr, uptr size);
120 bool MprotectReadOnly(uptr addr, uptr size);
121 
122 void MprotectMallocZones(void *addr, int prot);
123 
124 #if SANITIZER_LINUX
125 // Unmap memory. Currently only used on Linux.
126 void UnmapFromTo(uptr from, uptr to);
127 #endif
128 
129 // Maps shadow_size_bytes of shadow memory and returns shadow address. It will
130 // be aligned to the mmap granularity * 2^shadow_scale, or to
131 // 2^min_shadow_base_alignment if that is larger. The returned address will
132 // have max(2^min_shadow_base_alignment, mmap granularity) on the left, and
133 // shadow_size_bytes bytes on the right, which on linux is mapped no access.
134 // The high_mem_end may be updated if the original shadow size doesn't fit.
135 uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
136                       uptr min_shadow_base_alignment, uptr &high_mem_end);
137 
138 // Let S = max(shadow_size, num_aliases * alias_size, ring_buffer_size).
139 // Reserves 2*S bytes of address space to the right of the returned address and
140 // ring_buffer_size bytes to the left.  The returned address is aligned to 2*S.
141 // Also creates num_aliases regions of accessible memory starting at offset S
142 // from the returned address.  Each region has size alias_size and is backed by
143 // the same physical memory.
144 uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
145                                 uptr num_aliases, uptr ring_buffer_size);
146 
147 // Reserve memory range [beg, end]. If madvise_shadow is true then apply
148 // madvise (e.g. hugepages, core dumping) requested by options.
149 void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name,
150                               bool madvise_shadow = true);
151 
152 // Protect size bytes of memory starting at addr. Also try to protect
153 // several pages at the start of the address space as specified by
154 // zero_base_shadow_start, at most up to the size or zero_base_max_shadow_start.
155 void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start,
156                 uptr zero_base_max_shadow_start);
157 
158 // Find an available address space.
159 uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
160                               uptr *largest_gap_found, uptr *max_occupied_addr);
161 
162 // Used to check if we can map shadow memory to a fixed location.
163 bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
164 // Releases memory pages entirely within the [beg, end] address range. Noop if
165 // the provided range does not contain at least one entire page.
166 void ReleaseMemoryPagesToOS(uptr beg, uptr end);
167 void IncreaseTotalMmap(uptr size);
168 void DecreaseTotalMmap(uptr size);
169 uptr GetRSS();
170 void SetShadowRegionHugePageMode(uptr addr, uptr length);
171 bool DontDumpShadowMemory(uptr addr, uptr length);
172 // Check if the built VMA size matches the runtime one.
173 void CheckVMASize();
174 void RunMallocHooks(const void *ptr, uptr size);
175 void RunFreeHooks(const void *ptr);
176 
177 class ReservedAddressRange {
178  public:
179   uptr Init(uptr size, const char *name = nullptr, uptr fixed_addr = 0);
180   uptr InitAligned(uptr size, uptr align, const char *name = nullptr);
181   uptr Map(uptr fixed_addr, uptr size, const char *name = nullptr);
182   uptr MapOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
183   void Unmap(uptr addr, uptr size);
184   void *base() const { return base_; }
185   uptr size() const { return size_; }
186 
187  private:
188   void* base_;
189   uptr size_;
190   const char* name_;
191   uptr os_handle_;
192 };
193 
194 typedef void (*fill_profile_f)(uptr start, uptr rss, bool file,
195                                /*out*/uptr *stats, uptr stats_size);
196 
197 // Parse the contents of /proc/self/smaps and generate a memory profile.
198 // |cb| is a tool-specific callback that fills the |stats| array containing
199 // |stats_size| elements.
200 void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size);
201 
202 // Simple low-level (mmap-based) allocator for internal use. Doesn't have
203 // constructor, so all instances of LowLevelAllocator should be
204 // linker initialized.
205 class LowLevelAllocator {
206  public:
207   // Requires an external lock.
208   void *Allocate(uptr size);
209  private:
210   char *allocated_end_;
211   char *allocated_current_;
212 };
213 // Set the min alignment of LowLevelAllocator to at least alignment.
214 void SetLowLevelAllocateMinAlignment(uptr alignment);
215 typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size);
216 // Allows to register tool-specific callbacks for LowLevelAllocator.
217 // Passing NULL removes the callback.
218 void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
219 
220 // IO
221 void CatastrophicErrorWrite(const char *buffer, uptr length);
222 void RawWrite(const char *buffer);
223 bool ColorizeReports();
224 void RemoveANSIEscapeSequencesFromString(char *buffer);
225 void Printf(const char *format, ...);
226 void Report(const char *format, ...);
227 void SetPrintfAndReportCallback(void (*callback)(const char *));
228 #define VReport(level, ...)                                              \
229   do {                                                                   \
230     if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \
231   } while (0)
232 #define VPrintf(level, ...)                                              \
233   do {                                                                   \
234     if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \
235   } while (0)
236 
237 // Lock sanitizer error reporting and protects against nested errors.
238 class ScopedErrorReportLock {
239  public:
240   ScopedErrorReportLock() ACQUIRE(mutex_) { Lock(); }
241   ~ScopedErrorReportLock() RELEASE(mutex_) { Unlock(); }
242 
243   static void Lock() ACQUIRE(mutex_);
244   static void Unlock() RELEASE(mutex_);
245   static void CheckLocked() CHECK_LOCKED(mutex_);
246 
247  private:
248   static atomic_uintptr_t reporting_thread_;
249   static StaticSpinMutex mutex_;
250 };
251 
252 extern uptr stoptheworld_tracer_pid;
253 extern uptr stoptheworld_tracer_ppid;
254 
255 bool IsAccessibleMemoryRange(uptr beg, uptr size);
256 
257 // Error report formatting.
258 const char *StripPathPrefix(const char *filepath,
259                             const char *strip_file_prefix);
260 // Strip the directories from the module name.
261 const char *StripModuleName(const char *module);
262 
263 // OS
264 uptr ReadBinaryName(/*out*/char *buf, uptr buf_len);
265 uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len);
266 uptr ReadBinaryDir(/*out*/ char *buf, uptr buf_len);
267 uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len);
268 const char *GetProcessName();
269 void UpdateProcessName();
270 void CacheBinaryName();
271 void DisableCoreDumperIfNecessary();
272 void DumpProcessMap();
273 const char *GetEnv(const char *name);
274 bool SetEnv(const char *name, const char *value);
275 
276 u32 GetUid();
277 void ReExec();
278 void CheckASLR();
279 void CheckMPROTECT();
280 char **GetArgv();
281 char **GetEnviron();
282 void PrintCmdline();
283 bool StackSizeIsUnlimited();
284 void SetStackSizeLimitInBytes(uptr limit);
285 bool AddressSpaceIsUnlimited();
286 void SetAddressSpaceUnlimited();
287 void AdjustStackSize(void *attr);
288 void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args);
289 void SetSandboxingCallback(void (*f)());
290 
291 void InitializeCoverage(bool enabled, const char *coverage_dir);
292 
293 void InitTlsSize();
294 uptr GetTlsSize();
295 
296 // Other
297 void SleepForSeconds(unsigned seconds);
298 void SleepForMillis(unsigned millis);
299 u64 NanoTime();
300 u64 MonotonicNanoTime();
301 int Atexit(void (*function)(void));
302 bool TemplateMatch(const char *templ, const char *str);
303 
304 // Exit
305 void NORETURN Abort();
306 void NORETURN Die();
307 void NORETURN
308 CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
309 void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
310                                       const char *mmap_type, error_t err,
311                                       bool raw_report = false);
312 
313 // Specific tools may override behavior of "Die" function to do tool-specific
314 // job.
315 typedef void (*DieCallbackType)(void);
316 
317 // It's possible to add several callbacks that would be run when "Die" is
318 // called. The callbacks will be run in the opposite order. The tools are
319 // strongly recommended to setup all callbacks during initialization, when there
320 // is only a single thread.
321 bool AddDieCallback(DieCallbackType callback);
322 bool RemoveDieCallback(DieCallbackType callback);
323 
324 void SetUserDieCallback(DieCallbackType callback);
325 
326 void SetCheckUnwindCallback(void (*callback)());
327 
328 // Callback will be called if soft_rss_limit_mb is given and the limit is
329 // exceeded (exceeded==true) or if rss went down below the limit
330 // (exceeded==false).
331 // The callback should be registered once at the tool init time.
332 void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded));
333 
334 // Functions related to signal handling.
335 typedef void (*SignalHandlerType)(int, void *, void *);
336 HandleSignalMode GetHandleSignalMode(int signum);
337 void InstallDeadlySignalHandlers(SignalHandlerType handler);
338 
339 // Signal reporting.
340 // Each sanitizer uses slightly different implementation of stack unwinding.
341 typedef void (*UnwindSignalStackCallbackType)(const SignalContext &sig,
342                                               const void *callback_context,
343                                               BufferedStackTrace *stack);
344 // Print deadly signal report and die.
345 void HandleDeadlySignal(void *siginfo, void *context, u32 tid,
346                         UnwindSignalStackCallbackType unwind,
347                         const void *unwind_context);
348 
349 // Part of HandleDeadlySignal, exposed for asan.
350 void StartReportDeadlySignal();
351 // Part of HandleDeadlySignal, exposed for asan.
352 void ReportDeadlySignal(const SignalContext &sig, u32 tid,
353                         UnwindSignalStackCallbackType unwind,
354                         const void *unwind_context);
355 
356 // Alternative signal stack (POSIX-only).
357 void SetAlternateSignalStack();
358 void UnsetAlternateSignalStack();
359 
360 // Construct a one-line string:
361 //   SUMMARY: SanitizerToolName: error_message
362 // and pass it to __sanitizer_report_error_summary.
363 // If alt_tool_name is provided, it's used in place of SanitizerToolName.
364 void ReportErrorSummary(const char *error_message,
365                         const char *alt_tool_name = nullptr);
366 // Same as above, but construct error_message as:
367 //   error_type file:line[:column][ function]
368 void ReportErrorSummary(const char *error_type, const AddressInfo &info,
369                         const char *alt_tool_name = nullptr);
370 // Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
371 void ReportErrorSummary(const char *error_type, const StackTrace *trace,
372                         const char *alt_tool_name = nullptr);
373 
374 void ReportMmapWriteExec(int prot);
375 
376 // Math
377 #if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
378 extern "C" {
379 unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
380 unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
381 #if defined(_WIN64)
382 unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask);
383 unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask);
384 #endif
385 }
386 #endif
387 
388 inline uptr MostSignificantSetBitIndex(uptr x) {
389   CHECK_NE(x, 0U);
390   unsigned long up;
391 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
392 # ifdef _WIN64
393   up = SANITIZER_WORDSIZE - 1 - __builtin_clzll(x);
394 # else
395   up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x);
396 # endif
397 #elif defined(_WIN64)
398   _BitScanReverse64(&up, x);
399 #else
400   _BitScanReverse(&up, x);
401 #endif
402   return up;
403 }
404 
405 inline uptr LeastSignificantSetBitIndex(uptr x) {
406   CHECK_NE(x, 0U);
407   unsigned long up;
408 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
409 # ifdef _WIN64
410   up = __builtin_ctzll(x);
411 # else
412   up = __builtin_ctzl(x);
413 # endif
414 #elif defined(_WIN64)
415   _BitScanForward64(&up, x);
416 #else
417   _BitScanForward(&up, x);
418 #endif
419   return up;
420 }
421 
422 inline bool IsPowerOfTwo(uptr x) {
423   return (x & (x - 1)) == 0;
424 }
425 
426 inline uptr RoundUpToPowerOfTwo(uptr size) {
427   CHECK(size);
428   if (IsPowerOfTwo(size)) return size;
429 
430   uptr up = MostSignificantSetBitIndex(size);
431   CHECK_LT(size, (1ULL << (up + 1)));
432   CHECK_GT(size, (1ULL << up));
433   return 1ULL << (up + 1);
434 }
435 
436 inline uptr RoundUpTo(uptr size, uptr boundary) {
437   RAW_CHECK(IsPowerOfTwo(boundary));
438   return (size + boundary - 1) & ~(boundary - 1);
439 }
440 
441 inline uptr RoundDownTo(uptr x, uptr boundary) {
442   return x & ~(boundary - 1);
443 }
444 
445 inline bool IsAligned(uptr a, uptr alignment) {
446   return (a & (alignment - 1)) == 0;
447 }
448 
449 inline uptr Log2(uptr x) {
450   CHECK(IsPowerOfTwo(x));
451   return LeastSignificantSetBitIndex(x);
452 }
453 
454 // Don't use std::min, std::max or std::swap, to minimize dependency
455 // on libstdc++.
456 template <class T>
457 constexpr T Min(T a, T b) {
458   return a < b ? a : b;
459 }
460 template <class T>
461 constexpr T Max(T a, T b) {
462   return a > b ? a : b;
463 }
464 template<class T> void Swap(T& a, T& b) {
465   T tmp = a;
466   a = b;
467   b = tmp;
468 }
469 
470 // Char handling
471 inline bool IsSpace(int c) {
472   return (c == ' ') || (c == '\n') || (c == '\t') ||
473          (c == '\f') || (c == '\r') || (c == '\v');
474 }
475 inline bool IsDigit(int c) {
476   return (c >= '0') && (c <= '9');
477 }
478 inline int ToLower(int c) {
479   return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
480 }
481 
482 // A low-level vector based on mmap. May incur a significant memory overhead for
483 // small vectors.
484 // WARNING: The current implementation supports only POD types.
485 template<typename T>
486 class InternalMmapVectorNoCtor {
487  public:
488   using value_type = T;
489   void Initialize(uptr initial_capacity) {
490     capacity_bytes_ = 0;
491     size_ = 0;
492     data_ = 0;
493     reserve(initial_capacity);
494   }
495   void Destroy() { UnmapOrDie(data_, capacity_bytes_); }
496   T &operator[](uptr i) {
497     CHECK_LT(i, size_);
498     return data_[i];
499   }
500   const T &operator[](uptr i) const {
501     CHECK_LT(i, size_);
502     return data_[i];
503   }
504   void push_back(const T &element) {
505     CHECK_LE(size_, capacity());
506     if (size_ == capacity()) {
507       uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1);
508       Realloc(new_capacity);
509     }
510     internal_memcpy(&data_[size_++], &element, sizeof(T));
511   }
512   T &back() {
513     CHECK_GT(size_, 0);
514     return data_[size_ - 1];
515   }
516   void pop_back() {
517     CHECK_GT(size_, 0);
518     size_--;
519   }
520   uptr size() const {
521     return size_;
522   }
523   const T *data() const {
524     return data_;
525   }
526   T *data() {
527     return data_;
528   }
529   uptr capacity() const { return capacity_bytes_ / sizeof(T); }
530   void reserve(uptr new_size) {
531     // Never downsize internal buffer.
532     if (new_size > capacity())
533       Realloc(new_size);
534   }
535   void resize(uptr new_size) {
536     if (new_size > size_) {
537       reserve(new_size);
538       internal_memset(&data_[size_], 0, sizeof(T) * (new_size - size_));
539     }
540     size_ = new_size;
541   }
542 
543   void clear() { size_ = 0; }
544   bool empty() const { return size() == 0; }
545 
546   const T *begin() const {
547     return data();
548   }
549   T *begin() {
550     return data();
551   }
552   const T *end() const {
553     return data() + size();
554   }
555   T *end() {
556     return data() + size();
557   }
558 
559   void swap(InternalMmapVectorNoCtor &other) {
560     Swap(data_, other.data_);
561     Swap(capacity_bytes_, other.capacity_bytes_);
562     Swap(size_, other.size_);
563   }
564 
565  private:
566   void Realloc(uptr new_capacity) {
567     CHECK_GT(new_capacity, 0);
568     CHECK_LE(size_, new_capacity);
569     uptr new_capacity_bytes =
570         RoundUpTo(new_capacity * sizeof(T), GetPageSizeCached());
571     T *new_data = (T *)MmapOrDie(new_capacity_bytes, "InternalMmapVector");
572     internal_memcpy(new_data, data_, size_ * sizeof(T));
573     UnmapOrDie(data_, capacity_bytes_);
574     data_ = new_data;
575     capacity_bytes_ = new_capacity_bytes;
576   }
577 
578   T *data_;
579   uptr capacity_bytes_;
580   uptr size_;
581 };
582 
583 template <typename T>
584 bool operator==(const InternalMmapVectorNoCtor<T> &lhs,
585                 const InternalMmapVectorNoCtor<T> &rhs) {
586   if (lhs.size() != rhs.size()) return false;
587   return internal_memcmp(lhs.data(), rhs.data(), lhs.size() * sizeof(T)) == 0;
588 }
589 
590 template <typename T>
591 bool operator!=(const InternalMmapVectorNoCtor<T> &lhs,
592                 const InternalMmapVectorNoCtor<T> &rhs) {
593   return !(lhs == rhs);
594 }
595 
596 template<typename T>
597 class InternalMmapVector : public InternalMmapVectorNoCtor<T> {
598  public:
599   InternalMmapVector() { InternalMmapVectorNoCtor<T>::Initialize(0); }
600   explicit InternalMmapVector(uptr cnt) {
601     InternalMmapVectorNoCtor<T>::Initialize(cnt);
602     this->resize(cnt);
603   }
604   ~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); }
605   // Disallow copies and moves.
606   InternalMmapVector(const InternalMmapVector &) = delete;
607   InternalMmapVector &operator=(const InternalMmapVector &) = delete;
608   InternalMmapVector(InternalMmapVector &&) = delete;
609   InternalMmapVector &operator=(InternalMmapVector &&) = delete;
610 };
611 
612 class InternalScopedString {
613  public:
614   InternalScopedString() : buffer_(1) { buffer_[0] = '\0'; }
615 
616   uptr length() const { return buffer_.size() - 1; }
617   void clear() {
618     buffer_.resize(1);
619     buffer_[0] = '\0';
620   }
621   void append(const char *format, ...);
622   const char *data() const { return buffer_.data(); }
623   char *data() { return buffer_.data(); }
624 
625  private:
626   InternalMmapVector<char> buffer_;
627 };
628 
629 template <class T>
630 struct CompareLess {
631   bool operator()(const T &a, const T &b) const { return a < b; }
632 };
633 
634 // HeapSort for arrays and InternalMmapVector.
635 template <class T, class Compare = CompareLess<T>>
636 void Sort(T *v, uptr size, Compare comp = {}) {
637   if (size < 2)
638     return;
639   // Stage 1: insert elements to the heap.
640   for (uptr i = 1; i < size; i++) {
641     uptr j, p;
642     for (j = i; j > 0; j = p) {
643       p = (j - 1) / 2;
644       if (comp(v[p], v[j]))
645         Swap(v[j], v[p]);
646       else
647         break;
648     }
649   }
650   // Stage 2: swap largest element with the last one,
651   // and sink the new top.
652   for (uptr i = size - 1; i > 0; i--) {
653     Swap(v[0], v[i]);
654     uptr j, max_ind;
655     for (j = 0; j < i; j = max_ind) {
656       uptr left = 2 * j + 1;
657       uptr right = 2 * j + 2;
658       max_ind = j;
659       if (left < i && comp(v[max_ind], v[left]))
660         max_ind = left;
661       if (right < i && comp(v[max_ind], v[right]))
662         max_ind = right;
663       if (max_ind != j)
664         Swap(v[j], v[max_ind]);
665       else
666         break;
667     }
668   }
669 }
670 
671 // Works like std::lower_bound: finds the first element that is not less
672 // than the val.
673 template <class Container,
674           class Compare = CompareLess<typename Container::value_type>>
675 uptr InternalLowerBound(const Container &v,
676                         const typename Container::value_type &val,
677                         Compare comp = {}) {
678   uptr first = 0;
679   uptr last = v.size();
680   while (last > first) {
681     uptr mid = (first + last) / 2;
682     if (comp(v[mid], val))
683       first = mid + 1;
684     else
685       last = mid;
686   }
687   return first;
688 }
689 
690 enum ModuleArch {
691   kModuleArchUnknown,
692   kModuleArchI386,
693   kModuleArchX86_64,
694   kModuleArchX86_64H,
695   kModuleArchARMV6,
696   kModuleArchARMV7,
697   kModuleArchARMV7S,
698   kModuleArchARMV7K,
699   kModuleArchARM64,
700   kModuleArchRISCV64
701 };
702 
703 // Sorts and removes duplicates from the container.
704 template <class Container,
705           class Compare = CompareLess<typename Container::value_type>>
706 void SortAndDedup(Container &v, Compare comp = {}) {
707   Sort(v.data(), v.size(), comp);
708   uptr size = v.size();
709   if (size < 2)
710     return;
711   uptr last = 0;
712   for (uptr i = 1; i < size; ++i) {
713     if (comp(v[last], v[i])) {
714       ++last;
715       if (last != i)
716         v[last] = v[i];
717     } else {
718       CHECK(!comp(v[i], v[last]));
719     }
720   }
721   v.resize(last + 1);
722 }
723 
724 // Opens the file 'file_name" and reads up to 'max_len' bytes.
725 // The resulting buffer is mmaped and stored in '*buff'.
726 // Returns true if file was successfully opened and read.
727 bool ReadFileToVector(const char *file_name,
728                       InternalMmapVectorNoCtor<char> *buff,
729                       uptr max_len = 1 << 26, error_t *errno_p = nullptr);
730 
731 // Opens the file 'file_name" and reads up to 'max_len' bytes.
732 // This function is less I/O efficient than ReadFileToVector as it may reread
733 // file multiple times to avoid mmap during read attempts. It's used to read
734 // procmap, so short reads with mmap in between can produce inconsistent result.
735 // The resulting buffer is mmaped and stored in '*buff'.
736 // The size of the mmaped region is stored in '*buff_size'.
737 // The total number of read bytes is stored in '*read_len'.
738 // Returns true if file was successfully opened and read.
739 bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
740                       uptr *read_len, uptr max_len = 1 << 26,
741                       error_t *errno_p = nullptr);
742 
743 // When adding a new architecture, don't forget to also update
744 // script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cpp.
745 inline const char *ModuleArchToString(ModuleArch arch) {
746   switch (arch) {
747     case kModuleArchUnknown:
748       return "";
749     case kModuleArchI386:
750       return "i386";
751     case kModuleArchX86_64:
752       return "x86_64";
753     case kModuleArchX86_64H:
754       return "x86_64h";
755     case kModuleArchARMV6:
756       return "armv6";
757     case kModuleArchARMV7:
758       return "armv7";
759     case kModuleArchARMV7S:
760       return "armv7s";
761     case kModuleArchARMV7K:
762       return "armv7k";
763     case kModuleArchARM64:
764       return "arm64";
765     case kModuleArchRISCV64:
766       return "riscv64";
767   }
768   CHECK(0 && "Invalid module arch");
769   return "";
770 }
771 
772 const uptr kModuleUUIDSize = 16;
773 const uptr kMaxSegName = 16;
774 
775 // Represents a binary loaded into virtual memory (e.g. this can be an
776 // executable or a shared object).
777 class LoadedModule {
778  public:
779   LoadedModule()
780       : full_name_(nullptr),
781         base_address_(0),
782         max_executable_address_(0),
783         arch_(kModuleArchUnknown),
784         instrumented_(false) {
785     internal_memset(uuid_, 0, kModuleUUIDSize);
786     ranges_.clear();
787   }
788   void set(const char *module_name, uptr base_address);
789   void set(const char *module_name, uptr base_address, ModuleArch arch,
790            u8 uuid[kModuleUUIDSize], bool instrumented);
791   void clear();
792   void addAddressRange(uptr beg, uptr end, bool executable, bool writable,
793                        const char *name = nullptr);
794   bool containsAddress(uptr address) const;
795 
796   const char *full_name() const { return full_name_; }
797   uptr base_address() const { return base_address_; }
798   uptr max_executable_address() const { return max_executable_address_; }
799   ModuleArch arch() const { return arch_; }
800   const u8 *uuid() const { return uuid_; }
801   bool instrumented() const { return instrumented_; }
802 
803   struct AddressRange {
804     AddressRange *next;
805     uptr beg;
806     uptr end;
807     bool executable;
808     bool writable;
809     char name[kMaxSegName];
810 
811     AddressRange(uptr beg, uptr end, bool executable, bool writable,
812                  const char *name)
813         : next(nullptr),
814           beg(beg),
815           end(end),
816           executable(executable),
817           writable(writable) {
818       internal_strncpy(this->name, (name ? name : ""), ARRAY_SIZE(this->name));
819     }
820   };
821 
822   const IntrusiveList<AddressRange> &ranges() const { return ranges_; }
823 
824  private:
825   char *full_name_;  // Owned.
826   uptr base_address_;
827   uptr max_executable_address_;
828   ModuleArch arch_;
829   u8 uuid_[kModuleUUIDSize];
830   bool instrumented_;
831   IntrusiveList<AddressRange> ranges_;
832 };
833 
834 // List of LoadedModules. OS-dependent implementation is responsible for
835 // filling this information.
836 class ListOfModules {
837  public:
838   ListOfModules() : initialized(false) {}
839   ~ListOfModules() { clear(); }
840   void init();
841   void fallbackInit();  // Uses fallback init if available, otherwise clears
842   const LoadedModule *begin() const { return modules_.begin(); }
843   LoadedModule *begin() { return modules_.begin(); }
844   const LoadedModule *end() const { return modules_.end(); }
845   LoadedModule *end() { return modules_.end(); }
846   uptr size() const { return modules_.size(); }
847   const LoadedModule &operator[](uptr i) const {
848     CHECK_LT(i, modules_.size());
849     return modules_[i];
850   }
851 
852  private:
853   void clear() {
854     for (auto &module : modules_) module.clear();
855     modules_.clear();
856   }
857   void clearOrInit() {
858     initialized ? clear() : modules_.Initialize(kInitialCapacity);
859     initialized = true;
860   }
861 
862   InternalMmapVectorNoCtor<LoadedModule> modules_;
863   // We rarely have more than 16K loaded modules.
864   static const uptr kInitialCapacity = 1 << 14;
865   bool initialized;
866 };
867 
868 // Callback type for iterating over a set of memory ranges.
869 typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
870 
871 enum AndroidApiLevel {
872   ANDROID_NOT_ANDROID = 0,
873   ANDROID_KITKAT = 19,
874   ANDROID_LOLLIPOP_MR1 = 22,
875   ANDROID_POST_LOLLIPOP = 23
876 };
877 
878 void WriteToSyslog(const char *buffer);
879 
880 #if defined(SANITIZER_WINDOWS) && defined(_MSC_VER) && !defined(__clang__)
881 #define SANITIZER_WIN_TRACE 1
882 #else
883 #define SANITIZER_WIN_TRACE 0
884 #endif
885 
886 #if SANITIZER_MAC || SANITIZER_WIN_TRACE
887 void LogFullErrorReport(const char *buffer);
888 #else
889 inline void LogFullErrorReport(const char *buffer) {}
890 #endif
891 
892 #if SANITIZER_LINUX || SANITIZER_MAC
893 void WriteOneLineToSyslog(const char *s);
894 void LogMessageOnPrintf(const char *str);
895 #else
896 inline void WriteOneLineToSyslog(const char *s) {}
897 inline void LogMessageOnPrintf(const char *str) {}
898 #endif
899 
900 #if SANITIZER_LINUX || SANITIZER_WIN_TRACE
901 // Initialize Android logging. Any writes before this are silently lost.
902 void AndroidLogInit();
903 void SetAbortMessage(const char *);
904 #else
905 inline void AndroidLogInit() {}
906 // FIXME: MacOS implementation could use CRSetCrashLogMessage.
907 inline void SetAbortMessage(const char *) {}
908 #endif
909 
910 #if SANITIZER_ANDROID
911 void SanitizerInitializeUnwinder();
912 AndroidApiLevel AndroidGetApiLevel();
913 #else
914 inline void AndroidLogWrite(const char *buffer_unused) {}
915 inline void SanitizerInitializeUnwinder() {}
916 inline AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
917 #endif
918 
919 inline uptr GetPthreadDestructorIterations() {
920 #if SANITIZER_ANDROID
921   return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4;
922 #elif SANITIZER_POSIX
923   return 4;
924 #else
925 // Unused on Windows.
926   return 0;
927 #endif
928 }
929 
930 void *internal_start_thread(void *(*func)(void*), void *arg);
931 void internal_join_thread(void *th);
932 void MaybeStartBackgroudThread();
933 
934 // Make the compiler think that something is going on there.
935 // Use this inside a loop that looks like memset/memcpy/etc to prevent the
936 // compiler from recognising it and turning it into an actual call to
937 // memset/memcpy/etc.
938 static inline void SanitizerBreakOptimization(void *arg) {
939 #if defined(_MSC_VER) && !defined(__clang__)
940   _ReadWriteBarrier();
941 #else
942   __asm__ __volatile__("" : : "r" (arg) : "memory");
943 #endif
944 }
945 
946 struct SignalContext {
947   void *siginfo;
948   void *context;
949   uptr addr;
950   uptr pc;
951   uptr sp;
952   uptr bp;
953   bool is_memory_access;
954   enum WriteFlag { UNKNOWN, READ, WRITE } write_flag;
955 
956   // In some cases the kernel cannot provide the true faulting address; `addr`
957   // will be zero then.  This field allows to distinguish between these cases
958   // and dereferences of null.
959   bool is_true_faulting_addr;
960 
961   // VS2013 doesn't implement unrestricted unions, so we need a trivial default
962   // constructor
963   SignalContext() = default;
964 
965   // Creates signal context in a platform-specific manner.
966   // SignalContext is going to keep pointers to siginfo and context without
967   // owning them.
968   SignalContext(void *siginfo, void *context)
969       : siginfo(siginfo),
970         context(context),
971         addr(GetAddress()),
972         is_memory_access(IsMemoryAccess()),
973         write_flag(GetWriteFlag()),
974         is_true_faulting_addr(IsTrueFaultingAddress()) {
975     InitPcSpBp();
976   }
977 
978   static void DumpAllRegisters(void *context);
979 
980   // Type of signal e.g. SIGSEGV or EXCEPTION_ACCESS_VIOLATION.
981   int GetType() const;
982 
983   // String description of the signal.
984   const char *Describe() const;
985 
986   // Returns true if signal is stack overflow.
987   bool IsStackOverflow() const;
988 
989  private:
990   // Platform specific initialization.
991   void InitPcSpBp();
992   uptr GetAddress() const;
993   WriteFlag GetWriteFlag() const;
994   bool IsMemoryAccess() const;
995   bool IsTrueFaultingAddress() const;
996 };
997 
998 void InitializePlatformEarly();
999 void MaybeReexec();
1000 
1001 template <typename Fn>
1002 class RunOnDestruction {
1003  public:
1004   explicit RunOnDestruction(Fn fn) : fn_(fn) {}
1005   ~RunOnDestruction() { fn_(); }
1006 
1007  private:
1008   Fn fn_;
1009 };
1010 
1011 // A simple scope guard. Usage:
1012 // auto cleanup = at_scope_exit([]{ do_cleanup; });
1013 template <typename Fn>
1014 RunOnDestruction<Fn> at_scope_exit(Fn fn) {
1015   return RunOnDestruction<Fn>(fn);
1016 }
1017 
1018 // Linux on 64-bit s390 had a nasty bug that crashes the whole machine
1019 // if a process uses virtual memory over 4TB (as many sanitizers like
1020 // to do).  This function will abort the process if running on a kernel
1021 // that looks vulnerable.
1022 #if SANITIZER_LINUX && SANITIZER_S390_64
1023 void AvoidCVE_2016_2143();
1024 #else
1025 inline void AvoidCVE_2016_2143() {}
1026 #endif
1027 
1028 struct StackDepotStats {
1029   uptr n_uniq_ids;
1030   uptr allocated;
1031 };
1032 
1033 // The default value for allocator_release_to_os_interval_ms common flag to
1034 // indicate that sanitizer allocator should not attempt to release memory to OS.
1035 const s32 kReleaseToOSIntervalNever = -1;
1036 
1037 void CheckNoDeepBind(const char *filename, int flag);
1038 
1039 // Returns the requested amount of random data (up to 256 bytes) that can then
1040 // be used to seed a PRNG. Defaults to blocking like the underlying syscall.
1041 bool GetRandom(void *buffer, uptr length, bool blocking = true);
1042 
1043 // Returns the number of logical processors on the system.
1044 u32 GetNumberOfCPUs();
1045 extern u32 NumberOfCPUsCached;
1046 inline u32 GetNumberOfCPUsCached() {
1047   if (!NumberOfCPUsCached)
1048     NumberOfCPUsCached = GetNumberOfCPUs();
1049   return NumberOfCPUsCached;
1050 }
1051 
1052 template <typename T>
1053 class ArrayRef {
1054  public:
1055   ArrayRef() {}
1056   ArrayRef(T *begin, T *end) : begin_(begin), end_(end) {}
1057 
1058   T *begin() { return begin_; }
1059   T *end() { return end_; }
1060 
1061  private:
1062   T *begin_ = nullptr;
1063   T *end_ = nullptr;
1064 };
1065 
1066 #define PRINTF_128(v)                                                         \
1067   (*((u8 *)&v + 0)), (*((u8 *)&v + 1)), (*((u8 *)&v + 2)), (*((u8 *)&v + 3)), \
1068       (*((u8 *)&v + 4)), (*((u8 *)&v + 5)), (*((u8 *)&v + 6)),                \
1069       (*((u8 *)&v + 7)), (*((u8 *)&v + 8)), (*((u8 *)&v + 9)),                \
1070       (*((u8 *)&v + 10)), (*((u8 *)&v + 11)), (*((u8 *)&v + 12)),             \
1071       (*((u8 *)&v + 13)), (*((u8 *)&v + 14)), (*((u8 *)&v + 15))
1072 
1073 }  // namespace __sanitizer
1074 
1075 inline void *operator new(__sanitizer::operator_new_size_type size,
1076                           __sanitizer::LowLevelAllocator &alloc) {  // NOLINT
1077   return alloc.Allocate(size);
1078 }
1079 
1080 #endif  // SANITIZER_COMMON_H
1081