1 //===-- fuchsia.cpp ---------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "platform.h"
10
11 #if SCUDO_FUCHSIA
12
13 #include "common.h"
14 #include "mutex.h"
15 #include "string_utils.h"
16
17 #include <lib/sync/mutex.h> // for sync_mutex_t
18 #include <stdlib.h> // for getenv()
19 #include <zircon/compiler.h>
20 #include <zircon/process.h>
21 #include <zircon/sanitizer.h>
22 #include <zircon/syscalls.h>
23
24 namespace scudo {
25
getPageSize()26 uptr getPageSize() { return _zx_system_get_page_size(); }
27
die()28 void NORETURN die() { __builtin_trap(); }
29
30 // We zero-initialize the Extra parameter of map(), make sure this is consistent
31 // with ZX_HANDLE_INVALID.
32 static_assert(ZX_HANDLE_INVALID == 0, "");
33
allocateVmar(uptr Size,MapPlatformData * Data,bool AllowNoMem)34 static void *allocateVmar(uptr Size, MapPlatformData *Data, bool AllowNoMem) {
35 // Only scenario so far.
36 DCHECK(Data);
37 DCHECK_EQ(Data->Vmar, ZX_HANDLE_INVALID);
38
39 const zx_status_t Status = _zx_vmar_allocate(
40 _zx_vmar_root_self(),
41 ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0,
42 Size, &Data->Vmar, &Data->VmarBase);
43 if (UNLIKELY(Status != ZX_OK)) {
44 if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
45 dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY ? Size : 0);
46 return nullptr;
47 }
48 return reinterpret_cast<void *>(Data->VmarBase);
49 }
50
map(void * Addr,uptr Size,const char * Name,uptr Flags,MapPlatformData * Data)51 void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
52 MapPlatformData *Data) {
53 DCHECK_EQ(Size % getPageSizeCached(), 0);
54 const bool AllowNoMem = !!(Flags & MAP_ALLOWNOMEM);
55
56 // For MAP_NOACCESS, just allocate a Vmar and return.
57 if (Flags & MAP_NOACCESS)
58 return allocateVmar(Size, Data, AllowNoMem);
59
60 const zx_handle_t Vmar = (Data && Data->Vmar != ZX_HANDLE_INVALID)
61 ? Data->Vmar
62 : _zx_vmar_root_self();
63
64 zx_status_t Status;
65 zx_handle_t Vmo;
66 uint64_t VmoSize = 0;
67 if (Data && Data->Vmo != ZX_HANDLE_INVALID) {
68 // If a Vmo was specified, it's a resize operation.
69 CHECK(Addr);
70 DCHECK(Flags & MAP_RESIZABLE);
71 Vmo = Data->Vmo;
72 VmoSize = Data->VmoSize;
73 Status = _zx_vmo_set_size(Vmo, VmoSize + Size);
74 if (Status != ZX_OK) {
75 if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
76 dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY ? Size : 0);
77 return nullptr;
78 }
79 } else {
80 // Otherwise, create a Vmo and set its name.
81 Status = _zx_vmo_create(Size, ZX_VMO_RESIZABLE, &Vmo);
82 if (UNLIKELY(Status != ZX_OK)) {
83 if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
84 dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY ? Size : 0);
85 return nullptr;
86 }
87 _zx_object_set_property(Vmo, ZX_PROP_NAME, Name, strlen(Name));
88 }
89
90 uintptr_t P;
91 zx_vm_option_t MapFlags =
92 ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_ALLOW_FAULTS;
93 if (Addr)
94 DCHECK(Data);
95 const uint64_t Offset =
96 Addr ? reinterpret_cast<uintptr_t>(Addr) - Data->VmarBase : 0;
97 if (Offset)
98 MapFlags |= ZX_VM_SPECIFIC;
99 Status = _zx_vmar_map(Vmar, MapFlags, Offset, Vmo, VmoSize, Size, &P);
100 if (UNLIKELY(Status != ZX_OK)) {
101 if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
102 dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY ? Size : 0);
103 return nullptr;
104 }
105
106 if (Flags & MAP_PRECOMMIT) {
107 Status = _zx_vmar_op_range(Vmar, ZX_VMAR_OP_COMMIT, P, Size,
108 /*buffer=*/nullptr, /*buffer_size=*/0);
109 }
110
111 // No need to track the Vmo if we don't intend on resizing it. Close it.
112 if (Flags & MAP_RESIZABLE) {
113 DCHECK(Data);
114 if (Data->Vmo == ZX_HANDLE_INVALID)
115 Data->Vmo = Vmo;
116 else
117 DCHECK_EQ(Data->Vmo, Vmo);
118 } else {
119 CHECK_EQ(_zx_handle_close(Vmo), ZX_OK);
120 }
121 if (UNLIKELY(Status != ZX_OK)) {
122 if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
123 dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY ? Size : 0);
124 return nullptr;
125 }
126
127 if (Data)
128 Data->VmoSize += Size;
129
130 return reinterpret_cast<void *>(P);
131 }
132
unmap(void * Addr,uptr Size,uptr Flags,MapPlatformData * Data)133 void unmap(void *Addr, uptr Size, uptr Flags, MapPlatformData *Data) {
134 if (Flags & UNMAP_ALL) {
135 DCHECK_NE(Data, nullptr);
136 const zx_handle_t Vmar = Data->Vmar;
137 DCHECK_NE(Vmar, _zx_vmar_root_self());
138 // Destroying the vmar effectively unmaps the whole mapping.
139 CHECK_EQ(_zx_vmar_destroy(Vmar), ZX_OK);
140 CHECK_EQ(_zx_handle_close(Vmar), ZX_OK);
141 } else {
142 const zx_handle_t Vmar = (Data && Data->Vmar != ZX_HANDLE_INVALID)
143 ? Data->Vmar
144 : _zx_vmar_root_self();
145 const zx_status_t Status =
146 _zx_vmar_unmap(Vmar, reinterpret_cast<uintptr_t>(Addr), Size);
147 if (UNLIKELY(Status != ZX_OK))
148 dieOnMapUnmapError();
149 }
150 if (Data) {
151 if (Data->Vmo != ZX_HANDLE_INVALID)
152 CHECK_EQ(_zx_handle_close(Data->Vmo), ZX_OK);
153 memset(Data, 0, sizeof(*Data));
154 }
155 }
156
setMemoryPermission(UNUSED uptr Addr,UNUSED uptr Size,UNUSED uptr Flags,UNUSED MapPlatformData * Data)157 void setMemoryPermission(UNUSED uptr Addr, UNUSED uptr Size, UNUSED uptr Flags,
158 UNUSED MapPlatformData *Data) {
159 const zx_vm_option_t Prot =
160 (Flags & MAP_NOACCESS) ? 0 : (ZX_VM_PERM_READ | ZX_VM_PERM_WRITE);
161 DCHECK(Data);
162 DCHECK_NE(Data->Vmar, ZX_HANDLE_INVALID);
163 if (_zx_vmar_protect(Data->Vmar, Prot, Addr, Size) != ZX_OK)
164 dieOnMapUnmapError();
165 }
166
releasePagesToOS(UNUSED uptr BaseAddress,uptr Offset,uptr Size,MapPlatformData * Data)167 void releasePagesToOS(UNUSED uptr BaseAddress, uptr Offset, uptr Size,
168 MapPlatformData *Data) {
169 DCHECK(Data);
170 DCHECK_NE(Data->Vmar, ZX_HANDLE_INVALID);
171 DCHECK_NE(Data->Vmo, ZX_HANDLE_INVALID);
172 const zx_status_t Status =
173 _zx_vmo_op_range(Data->Vmo, ZX_VMO_OP_DECOMMIT, Offset, Size, NULL, 0);
174 CHECK_EQ(Status, ZX_OK);
175 }
176
getEnv(const char * Name)177 const char *getEnv(const char *Name) { return getenv(Name); }
178
179 // Note: we need to flag these methods with __TA_NO_THREAD_SAFETY_ANALYSIS
180 // because the Fuchsia implementation of sync_mutex_t has clang thread safety
181 // annotations. Were we to apply proper capability annotations to the top level
182 // HybridMutex class itself, they would not be needed. As it stands, the
183 // thread analysis thinks that we are locking the mutex and accidentally leaving
184 // it locked on the way out.
tryLock()185 bool HybridMutex::tryLock() __TA_NO_THREAD_SAFETY_ANALYSIS {
186 // Size and alignment must be compatible between both types.
187 return sync_mutex_trylock(&M) == ZX_OK;
188 }
189
lockSlow()190 void HybridMutex::lockSlow() __TA_NO_THREAD_SAFETY_ANALYSIS {
191 sync_mutex_lock(&M);
192 }
193
unlock()194 void HybridMutex::unlock() __TA_NO_THREAD_SAFETY_ANALYSIS {
195 sync_mutex_unlock(&M);
196 }
197
getMonotonicTime()198 u64 getMonotonicTime() { return _zx_clock_get_monotonic(); }
199
getNumberOfCPUs()200 u32 getNumberOfCPUs() { return _zx_system_get_num_cpus(); }
201
getThreadID()202 u32 getThreadID() { return 0; }
203
getRandom(void * Buffer,uptr Length,UNUSED bool Blocking)204 bool getRandom(void *Buffer, uptr Length, UNUSED bool Blocking) {
205 static_assert(MaxRandomLength <= ZX_CPRNG_DRAW_MAX_LEN, "");
206 if (UNLIKELY(!Buffer || !Length || Length > MaxRandomLength))
207 return false;
208 _zx_cprng_draw(Buffer, Length);
209 return true;
210 }
211
outputRaw(const char * Buffer)212 void outputRaw(const char *Buffer) {
213 __sanitizer_log_write(Buffer, strlen(Buffer));
214 }
215
setAbortMessage(const char * Message)216 void setAbortMessage(const char *Message) {}
217
218 } // namespace scudo
219
220 #endif // SCUDO_FUCHSIA
221