1 //===-- sanitizer_fuchsia.cpp ---------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is shared between AddressSanitizer and other sanitizer
10 // run-time libraries and implements Fuchsia-specific functions from
11 // sanitizer_common.h.
12 //===----------------------------------------------------------------------===//
13
14 #include "sanitizer_fuchsia.h"
15 #if SANITIZER_FUCHSIA
16
17 #include "sanitizer_common.h"
18 #include "sanitizer_libc.h"
19 #include "sanitizer_mutex.h"
20
21 #include <limits.h>
22 #include <pthread.h>
23 #include <stdlib.h>
24 #include <unistd.h>
25 #include <zircon/errors.h>
26 #include <zircon/process.h>
27 #include <zircon/syscalls.h>
28
29 namespace __sanitizer {
30
internal__exit(int exitcode)31 void NORETURN internal__exit(int exitcode) { _zx_process_exit(exitcode); }
32
internal_sched_yield()33 uptr internal_sched_yield() {
34 zx_status_t status = _zx_nanosleep(0);
35 CHECK_EQ(status, ZX_OK);
36 return 0; // Why doesn't this return void?
37 }
38
internal_nanosleep(zx_time_t ns)39 static void internal_nanosleep(zx_time_t ns) {
40 zx_status_t status = _zx_nanosleep(_zx_deadline_after(ns));
41 CHECK_EQ(status, ZX_OK);
42 }
43
internal_sleep(unsigned int seconds)44 unsigned int internal_sleep(unsigned int seconds) {
45 internal_nanosleep(ZX_SEC(seconds));
46 return 0;
47 }
48
NanoTime()49 u64 NanoTime() {
50 zx_time_t time;
51 zx_status_t status = _zx_clock_get(ZX_CLOCK_UTC, &time);
52 CHECK_EQ(status, ZX_OK);
53 return time;
54 }
55
MonotonicNanoTime()56 u64 MonotonicNanoTime() { return _zx_clock_get_monotonic(); }
57
internal_getpid()58 uptr internal_getpid() {
59 zx_info_handle_basic_t info;
60 zx_status_t status =
61 _zx_object_get_info(_zx_process_self(), ZX_INFO_HANDLE_BASIC, &info,
62 sizeof(info), NULL, NULL);
63 CHECK_EQ(status, ZX_OK);
64 uptr pid = static_cast<uptr>(info.koid);
65 CHECK_EQ(pid, info.koid);
66 return pid;
67 }
68
internal_dlinfo(void * handle,int request,void * p)69 int internal_dlinfo(void *handle, int request, void *p) {
70 UNIMPLEMENTED();
71 }
72
GetThreadSelf()73 uptr GetThreadSelf() { return reinterpret_cast<uptr>(thrd_current()); }
74
GetTid()75 tid_t GetTid() { return GetThreadSelf(); }
76
Abort()77 void Abort() { abort(); }
78
Atexit(void (* function)(void))79 int Atexit(void (*function)(void)) { return atexit(function); }
80
SleepForSeconds(int seconds)81 void SleepForSeconds(int seconds) { internal_sleep(seconds); }
82
SleepForMillis(int millis)83 void SleepForMillis(int millis) { internal_nanosleep(ZX_MSEC(millis)); }
84
GetThreadStackTopAndBottom(bool,uptr * stack_top,uptr * stack_bottom)85 void GetThreadStackTopAndBottom(bool, uptr *stack_top, uptr *stack_bottom) {
86 pthread_attr_t attr;
87 CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
88 void *base;
89 size_t size;
90 CHECK_EQ(pthread_attr_getstack(&attr, &base, &size), 0);
91 CHECK_EQ(pthread_attr_destroy(&attr), 0);
92
93 *stack_bottom = reinterpret_cast<uptr>(base);
94 *stack_top = *stack_bottom + size;
95 }
96
InitializePlatformEarly()97 void InitializePlatformEarly() {}
MaybeReexec()98 void MaybeReexec() {}
CheckASLR()99 void CheckASLR() {}
CheckMPROTECT()100 void CheckMPROTECT() {}
PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments * args)101 void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {}
DisableCoreDumperIfNecessary()102 void DisableCoreDumperIfNecessary() {}
InstallDeadlySignalHandlers(SignalHandlerType handler)103 void InstallDeadlySignalHandlers(SignalHandlerType handler) {}
SetAlternateSignalStack()104 void SetAlternateSignalStack() {}
UnsetAlternateSignalStack()105 void UnsetAlternateSignalStack() {}
InitTlsSize()106 void InitTlsSize() {}
107
PrintModuleMap()108 void PrintModuleMap() {}
109
IsStackOverflow() const110 bool SignalContext::IsStackOverflow() const { return false; }
DumpAllRegisters(void * context)111 void SignalContext::DumpAllRegisters(void *context) { UNIMPLEMENTED(); }
Describe() const112 const char *SignalContext::Describe() const { UNIMPLEMENTED(); }
113
114 enum MutexState : int { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
115
BlockingMutex()116 BlockingMutex::BlockingMutex() {
117 // NOTE! It's important that this use internal_memset, because plain
118 // memset might be intercepted (e.g., actually be __asan_memset).
119 // Defining this so the compiler initializes each field, e.g.:
120 // BlockingMutex::BlockingMutex() : BlockingMutex(LINKER_INITIALIZED) {}
121 // might result in the compiler generating a call to memset, which would
122 // have the same problem.
123 internal_memset(this, 0, sizeof(*this));
124 }
125
Lock()126 void BlockingMutex::Lock() {
127 CHECK_EQ(owner_, 0);
128 atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
129 if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
130 return;
131 while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
132 zx_status_t status =
133 _zx_futex_wait(reinterpret_cast<zx_futex_t *>(m), MtxSleeping,
134 ZX_HANDLE_INVALID, ZX_TIME_INFINITE);
135 if (status != ZX_ERR_BAD_STATE) // Normal race.
136 CHECK_EQ(status, ZX_OK);
137 }
138 }
139
Unlock()140 void BlockingMutex::Unlock() {
141 atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
142 u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release);
143 CHECK_NE(v, MtxUnlocked);
144 if (v == MtxSleeping) {
145 zx_status_t status = _zx_futex_wake(reinterpret_cast<zx_futex_t *>(m), 1);
146 CHECK_EQ(status, ZX_OK);
147 }
148 }
149
CheckLocked()150 void BlockingMutex::CheckLocked() {
151 atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
152 CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
153 }
154
GetPageSize()155 uptr GetPageSize() { return PAGE_SIZE; }
156
GetMmapGranularity()157 uptr GetMmapGranularity() { return PAGE_SIZE; }
158
159 sanitizer_shadow_bounds_t ShadowBounds;
160
GetMaxUserVirtualAddress()161 uptr GetMaxUserVirtualAddress() {
162 ShadowBounds = __sanitizer_shadow_bounds();
163 return ShadowBounds.memory_limit - 1;
164 }
165
GetMaxVirtualAddress()166 uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); }
167
DoAnonymousMmapOrDie(uptr size,const char * mem_type,bool raw_report,bool die_for_nomem)168 static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type,
169 bool raw_report, bool die_for_nomem) {
170 size = RoundUpTo(size, PAGE_SIZE);
171
172 zx_handle_t vmo;
173 zx_status_t status = _zx_vmo_create(size, 0, &vmo);
174 if (status != ZX_OK) {
175 if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
176 ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status,
177 raw_report);
178 return nullptr;
179 }
180 _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
181 internal_strlen(mem_type));
182
183 // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
184 uintptr_t addr;
185 status =
186 _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0,
187 vmo, 0, size, &addr);
188 _zx_handle_close(vmo);
189
190 if (status != ZX_OK) {
191 if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
192 ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status,
193 raw_report);
194 return nullptr;
195 }
196
197 IncreaseTotalMmap(size);
198
199 return reinterpret_cast<void *>(addr);
200 }
201
MmapOrDie(uptr size,const char * mem_type,bool raw_report)202 void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
203 return DoAnonymousMmapOrDie(size, mem_type, raw_report, true);
204 }
205
MmapNoReserveOrDie(uptr size,const char * mem_type)206 void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
207 return MmapOrDie(size, mem_type);
208 }
209
MmapOrDieOnFatalError(uptr size,const char * mem_type)210 void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
211 return DoAnonymousMmapOrDie(size, mem_type, false, false);
212 }
213
Init(uptr init_size,const char * name,uptr fixed_addr)214 uptr ReservedAddressRange::Init(uptr init_size, const char *name,
215 uptr fixed_addr) {
216 init_size = RoundUpTo(init_size, PAGE_SIZE);
217 DCHECK_EQ(os_handle_, ZX_HANDLE_INVALID);
218 uintptr_t base;
219 zx_handle_t vmar;
220 zx_status_t status =
221 _zx_vmar_allocate(
222 _zx_vmar_root_self(),
223 ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC,
224 0, init_size, &vmar, &base);
225 if (status != ZX_OK)
226 ReportMmapFailureAndDie(init_size, name, "zx_vmar_allocate", status);
227 base_ = reinterpret_cast<void *>(base);
228 size_ = init_size;
229 name_ = name;
230 os_handle_ = vmar;
231
232 return reinterpret_cast<uptr>(base_);
233 }
234
DoMmapFixedOrDie(zx_handle_t vmar,uptr fixed_addr,uptr map_size,void * base,const char * name,bool die_for_nomem)235 static uptr DoMmapFixedOrDie(zx_handle_t vmar, uptr fixed_addr, uptr map_size,
236 void *base, const char *name, bool die_for_nomem) {
237 uptr offset = fixed_addr - reinterpret_cast<uptr>(base);
238 map_size = RoundUpTo(map_size, PAGE_SIZE);
239 zx_handle_t vmo;
240 zx_status_t status = _zx_vmo_create(map_size, 0, &vmo);
241 if (status != ZX_OK) {
242 if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
243 ReportMmapFailureAndDie(map_size, name, "zx_vmo_create", status);
244 return 0;
245 }
246 _zx_object_set_property(vmo, ZX_PROP_NAME, name, internal_strlen(name));
247 DCHECK_GE(base + size_, map_size + offset);
248 uintptr_t addr;
249
250 status =
251 _zx_vmar_map(vmar, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
252 offset, vmo, 0, map_size, &addr);
253 _zx_handle_close(vmo);
254 if (status != ZX_OK) {
255 if (status != ZX_ERR_NO_MEMORY || die_for_nomem) {
256 ReportMmapFailureAndDie(map_size, name, "zx_vmar_map", status);
257 }
258 return 0;
259 }
260 IncreaseTotalMmap(map_size);
261 return addr;
262 }
263
Map(uptr fixed_addr,uptr map_size,const char * name)264 uptr ReservedAddressRange::Map(uptr fixed_addr, uptr map_size,
265 const char *name) {
266 return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
267 name_, false);
268 }
269
MapOrDie(uptr fixed_addr,uptr map_size,const char * name)270 uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr map_size,
271 const char *name) {
272 return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
273 name_, true);
274 }
275
UnmapOrDieVmar(void * addr,uptr size,zx_handle_t target_vmar)276 void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar) {
277 if (!addr || !size) return;
278 size = RoundUpTo(size, PAGE_SIZE);
279
280 zx_status_t status =
281 _zx_vmar_unmap(target_vmar, reinterpret_cast<uintptr_t>(addr), size);
282 if (status != ZX_OK) {
283 Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
284 SanitizerToolName, size, size, addr);
285 CHECK("unable to unmap" && 0);
286 }
287
288 DecreaseTotalMmap(size);
289 }
290
Unmap(uptr addr,uptr size)291 void ReservedAddressRange::Unmap(uptr addr, uptr size) {
292 CHECK_LE(size, size_);
293 const zx_handle_t vmar = static_cast<zx_handle_t>(os_handle_);
294 if (addr == reinterpret_cast<uptr>(base_)) {
295 if (size == size_) {
296 // Destroying the vmar effectively unmaps the whole mapping.
297 _zx_vmar_destroy(vmar);
298 _zx_handle_close(vmar);
299 os_handle_ = static_cast<uptr>(ZX_HANDLE_INVALID);
300 DecreaseTotalMmap(size);
301 return;
302 }
303 } else {
304 CHECK_EQ(addr + size, reinterpret_cast<uptr>(base_) + size_);
305 }
306 // Partial unmapping does not affect the fact that the initial range is still
307 // reserved, and the resulting unmapped memory can't be reused.
308 UnmapOrDieVmar(reinterpret_cast<void *>(addr), size, vmar);
309 }
310
311 // This should never be called.
MmapFixedNoAccess(uptr fixed_addr,uptr size,const char * name)312 void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
313 UNIMPLEMENTED();
314 }
315
MmapAlignedOrDieOnFatalError(uptr size,uptr alignment,const char * mem_type)316 void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
317 const char *mem_type) {
318 CHECK_GE(size, PAGE_SIZE);
319 CHECK(IsPowerOfTwo(size));
320 CHECK(IsPowerOfTwo(alignment));
321
322 zx_handle_t vmo;
323 zx_status_t status = _zx_vmo_create(size, 0, &vmo);
324 if (status != ZX_OK) {
325 if (status != ZX_ERR_NO_MEMORY)
326 ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status, false);
327 return nullptr;
328 }
329 _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
330 internal_strlen(mem_type));
331
332 // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
333
334 // Map a larger size to get a chunk of address space big enough that
335 // it surely contains an aligned region of the requested size. Then
336 // overwrite the aligned middle portion with a mapping from the
337 // beginning of the VMO, and unmap the excess before and after.
338 size_t map_size = size + alignment;
339 uintptr_t addr;
340 status =
341 _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0,
342 vmo, 0, map_size, &addr);
343 if (status == ZX_OK) {
344 uintptr_t map_addr = addr;
345 uintptr_t map_end = map_addr + map_size;
346 addr = RoundUpTo(map_addr, alignment);
347 uintptr_t end = addr + size;
348 if (addr != map_addr) {
349 zx_info_vmar_t info;
350 status = _zx_object_get_info(_zx_vmar_root_self(), ZX_INFO_VMAR, &info,
351 sizeof(info), NULL, NULL);
352 if (status == ZX_OK) {
353 uintptr_t new_addr;
354 status = _zx_vmar_map(
355 _zx_vmar_root_self(),
356 ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC_OVERWRITE,
357 addr - info.base, vmo, 0, size, &new_addr);
358 if (status == ZX_OK) CHECK_EQ(new_addr, addr);
359 }
360 }
361 if (status == ZX_OK && addr != map_addr)
362 status = _zx_vmar_unmap(_zx_vmar_root_self(), map_addr, addr - map_addr);
363 if (status == ZX_OK && end != map_end)
364 status = _zx_vmar_unmap(_zx_vmar_root_self(), end, map_end - end);
365 }
366 _zx_handle_close(vmo);
367
368 if (status != ZX_OK) {
369 if (status != ZX_ERR_NO_MEMORY)
370 ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status, false);
371 return nullptr;
372 }
373
374 IncreaseTotalMmap(size);
375
376 return reinterpret_cast<void *>(addr);
377 }
378
UnmapOrDie(void * addr,uptr size)379 void UnmapOrDie(void *addr, uptr size) {
380 UnmapOrDieVmar(addr, size, _zx_vmar_root_self());
381 }
382
383 // This is used on the shadow mapping, which cannot be changed.
384 // Zircon doesn't have anything like MADV_DONTNEED.
ReleaseMemoryPagesToOS(uptr beg,uptr end)385 void ReleaseMemoryPagesToOS(uptr beg, uptr end) {}
386
DumpProcessMap()387 void DumpProcessMap() {
388 // TODO(mcgrathr): write it
389 return;
390 }
391
IsAccessibleMemoryRange(uptr beg,uptr size)392 bool IsAccessibleMemoryRange(uptr beg, uptr size) {
393 // TODO(mcgrathr): Figure out a better way.
394 zx_handle_t vmo;
395 zx_status_t status = _zx_vmo_create(size, 0, &vmo);
396 if (status == ZX_OK) {
397 status = _zx_vmo_write(vmo, reinterpret_cast<const void *>(beg), 0, size);
398 _zx_handle_close(vmo);
399 }
400 return status == ZX_OK;
401 }
402
403 // FIXME implement on this platform.
GetMemoryProfile(fill_profile_f cb,uptr * stats,uptr stats_size)404 void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) {}
405
ReadFileToBuffer(const char * file_name,char ** buff,uptr * buff_size,uptr * read_len,uptr max_len,error_t * errno_p)406 bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
407 uptr *read_len, uptr max_len, error_t *errno_p) {
408 zx_handle_t vmo;
409 zx_status_t status = __sanitizer_get_configuration(file_name, &vmo);
410 if (status == ZX_OK) {
411 uint64_t vmo_size;
412 status = _zx_vmo_get_size(vmo, &vmo_size);
413 if (status == ZX_OK) {
414 if (vmo_size < max_len) max_len = vmo_size;
415 size_t map_size = RoundUpTo(max_len, PAGE_SIZE);
416 uintptr_t addr;
417 status = _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ, 0, vmo, 0,
418 map_size, &addr);
419 if (status == ZX_OK) {
420 *buff = reinterpret_cast<char *>(addr);
421 *buff_size = map_size;
422 *read_len = max_len;
423 }
424 }
425 _zx_handle_close(vmo);
426 }
427 if (status != ZX_OK && errno_p) *errno_p = status;
428 return status == ZX_OK;
429 }
430
RawWrite(const char * buffer)431 void RawWrite(const char *buffer) {
432 constexpr size_t size = 128;
433 static _Thread_local char line[size];
434 static _Thread_local size_t lastLineEnd = 0;
435 static _Thread_local size_t cur = 0;
436
437 while (*buffer) {
438 if (cur >= size) {
439 if (lastLineEnd == 0)
440 lastLineEnd = size;
441 __sanitizer_log_write(line, lastLineEnd);
442 internal_memmove(line, line + lastLineEnd, cur - lastLineEnd);
443 cur = cur - lastLineEnd;
444 lastLineEnd = 0;
445 }
446 if (*buffer == '\n')
447 lastLineEnd = cur + 1;
448 line[cur++] = *buffer++;
449 }
450 // Flush all complete lines before returning.
451 if (lastLineEnd != 0) {
452 __sanitizer_log_write(line, lastLineEnd);
453 internal_memmove(line, line + lastLineEnd, cur - lastLineEnd);
454 cur = cur - lastLineEnd;
455 lastLineEnd = 0;
456 }
457 }
458
CatastrophicErrorWrite(const char * buffer,uptr length)459 void CatastrophicErrorWrite(const char *buffer, uptr length) {
460 __sanitizer_log_write(buffer, length);
461 }
462
463 char **StoredArgv;
464 char **StoredEnviron;
465
GetArgv()466 char **GetArgv() { return StoredArgv; }
GetEnviron()467 char **GetEnviron() { return StoredEnviron; }
468
GetEnv(const char * name)469 const char *GetEnv(const char *name) {
470 if (StoredEnviron) {
471 uptr NameLen = internal_strlen(name);
472 for (char **Env = StoredEnviron; *Env != 0; Env++) {
473 if (internal_strncmp(*Env, name, NameLen) == 0 && (*Env)[NameLen] == '=')
474 return (*Env) + NameLen + 1;
475 }
476 }
477 return nullptr;
478 }
479
ReadBinaryName(char * buf,uptr buf_len)480 uptr ReadBinaryName(/*out*/ char *buf, uptr buf_len) {
481 const char *argv0 = "<UNKNOWN>";
482 if (StoredArgv && StoredArgv[0]) {
483 argv0 = StoredArgv[0];
484 }
485 internal_strncpy(buf, argv0, buf_len);
486 return internal_strlen(buf);
487 }
488
ReadLongProcessName(char * buf,uptr buf_len)489 uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) {
490 return ReadBinaryName(buf, buf_len);
491 }
492
493 uptr MainThreadStackBase, MainThreadStackSize;
494
GetRandom(void * buffer,uptr length,bool blocking)495 bool GetRandom(void *buffer, uptr length, bool blocking) {
496 CHECK_LE(length, ZX_CPRNG_DRAW_MAX_LEN);
497 _zx_cprng_draw(buffer, length);
498 return true;
499 }
500
GetNumberOfCPUs()501 u32 GetNumberOfCPUs() {
502 return zx_system_get_num_cpus();
503 }
504
GetRSS()505 uptr GetRSS() { UNIMPLEMENTED(); }
506
507 } // namespace __sanitizer
508
509 using namespace __sanitizer;
510
511 extern "C" {
__sanitizer_startup_hook(int argc,char ** argv,char ** envp,void * stack_base,size_t stack_size)512 void __sanitizer_startup_hook(int argc, char **argv, char **envp,
513 void *stack_base, size_t stack_size) {
514 __sanitizer::StoredArgv = argv;
515 __sanitizer::StoredEnviron = envp;
516 __sanitizer::MainThreadStackBase = reinterpret_cast<uintptr_t>(stack_base);
517 __sanitizer::MainThreadStackSize = stack_size;
518 }
519
__sanitizer_set_report_path(const char * path)520 void __sanitizer_set_report_path(const char *path) {
521 // Handle the initialization code in each sanitizer, but no other calls.
522 // This setting is never consulted on Fuchsia.
523 DCHECK_EQ(path, common_flags()->log_path);
524 }
525
__sanitizer_set_report_fd(void * fd)526 void __sanitizer_set_report_fd(void *fd) {
527 UNREACHABLE("not available on Fuchsia");
528 }
529 } // extern "C"
530
531 #endif // SANITIZER_FUCHSIA
532