1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <zircon/process.h>
6 #include <zircon/syscalls.h>
7 #include <zircon/threads.h>
8 
9 #include "src/base/macros.h"
10 #include "src/base/platform/platform-posix-time.h"
11 #include "src/base/platform/platform-posix.h"
12 #include "src/base/platform/platform.h"
13 
14 namespace v8 {
15 namespace base {
16 
17 namespace {
18 
GetProtectionFromMemoryPermission(OS::MemoryPermission access)19 uint32_t GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
20   switch (access) {
21     case OS::MemoryPermission::kNoAccess:
22     case OS::MemoryPermission::kNoAccessWillJitLater:
23       return 0;  // no permissions
24     case OS::MemoryPermission::kRead:
25       return ZX_VM_PERM_READ;
26     case OS::MemoryPermission::kReadWrite:
27       return ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
28     case OS::MemoryPermission::kReadWriteExecute:
29       return ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_PERM_EXECUTE;
30     case OS::MemoryPermission::kReadExecute:
31       return ZX_VM_PERM_READ | ZX_VM_PERM_EXECUTE;
32   }
33   UNREACHABLE();
34 }
35 
36 }  // namespace
37 
CreateTimezoneCache()38 TimezoneCache* OS::CreateTimezoneCache() {
39   return new PosixDefaultTimezoneCache();
40 }
41 
42 // static
Allocate(void * address,size_t size,size_t alignment,OS::MemoryPermission access)43 void* OS::Allocate(void* address, size_t size, size_t alignment,
44                    OS::MemoryPermission access) {
45   size_t page_size = OS::AllocatePageSize();
46   DCHECK_EQ(0, size % page_size);
47   DCHECK_EQ(0, alignment % page_size);
48   address = AlignedAddress(address, alignment);
49   // Add the maximum misalignment so we are guaranteed an aligned base address.
50   size_t request_size = size + (alignment - page_size);
51 
52   zx_handle_t vmo;
53   if (zx_vmo_create(request_size, 0, &vmo) != ZX_OK) {
54     return nullptr;
55   }
56   static const char kVirtualMemoryName[] = "v8-virtualmem";
57   zx_object_set_property(vmo, ZX_PROP_NAME, kVirtualMemoryName,
58                          strlen(kVirtualMemoryName));
59 
60   // Always call zx_vmo_replace_as_executable() in case the memory will need
61   // to be marked as executable in the future.
62   // TOOD(https://crbug.com/v8/8899): Only call this when we know that the
63   // region will need to be marked as executable in the future.
64   if (zx_vmo_replace_as_executable(vmo, ZX_HANDLE_INVALID, &vmo) != ZX_OK) {
65     return nullptr;
66   }
67 
68   uintptr_t reservation;
69   uint32_t prot = GetProtectionFromMemoryPermission(access);
70   zx_status_t status = zx_vmar_map(zx_vmar_root_self(), prot, 0, vmo, 0,
71                                    request_size, &reservation);
72   // Either the vmo is now referenced by the vmar, or we failed and are bailing,
73   // so close the vmo either way.
74   zx_handle_close(vmo);
75   if (status != ZX_OK) {
76     return nullptr;
77   }
78 
79   uint8_t* base = reinterpret_cast<uint8_t*>(reservation);
80   uint8_t* aligned_base = reinterpret_cast<uint8_t*>(
81       RoundUp(reinterpret_cast<uintptr_t>(base), alignment));
82 
83   // Unmap extra memory reserved before and after the desired block.
84   if (aligned_base != base) {
85     DCHECK_LT(base, aligned_base);
86     size_t prefix_size = static_cast<size_t>(aligned_base - base);
87     zx_vmar_unmap(zx_vmar_root_self(), reinterpret_cast<uintptr_t>(base),
88                   prefix_size);
89     request_size -= prefix_size;
90   }
91 
92   size_t aligned_size = RoundUp(size, page_size);
93 
94   if (aligned_size != request_size) {
95     DCHECK_LT(aligned_size, request_size);
96     size_t suffix_size = request_size - aligned_size;
97     zx_vmar_unmap(zx_vmar_root_self(),
98                   reinterpret_cast<uintptr_t>(aligned_base + aligned_size),
99                   suffix_size);
100     request_size -= suffix_size;
101   }
102 
103   DCHECK(aligned_size == request_size);
104   return static_cast<void*>(aligned_base);
105 }
106 
107 // static
Free(void * address,const size_t size)108 bool OS::Free(void* address, const size_t size) {
109   DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % AllocatePageSize());
110   DCHECK_EQ(0, size % AllocatePageSize());
111   return zx_vmar_unmap(zx_vmar_root_self(),
112                        reinterpret_cast<uintptr_t>(address), size) == ZX_OK;
113 }
114 
115 // static
Release(void * address,size_t size)116 bool OS::Release(void* address, size_t size) {
117   DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
118   DCHECK_EQ(0, size % CommitPageSize());
119   return zx_vmar_unmap(zx_vmar_root_self(),
120                        reinterpret_cast<uintptr_t>(address), size) == ZX_OK;
121 }
122 
123 // static
SetPermissions(void * address,size_t size,MemoryPermission access)124 bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
125   DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
126   DCHECK_EQ(0, size % CommitPageSize());
127   uint32_t prot = GetProtectionFromMemoryPermission(access);
128   return zx_vmar_protect(zx_vmar_root_self(), prot,
129                          reinterpret_cast<uintptr_t>(address), size) == ZX_OK;
130 }
131 
132 // static
DiscardSystemPages(void * address,size_t size)133 bool OS::DiscardSystemPages(void* address, size_t size) {
134   // TODO(hpayer): Does Fuchsia have madvise?
135   return true;
136 }
137 
138 // static
HasLazyCommits()139 bool OS::HasLazyCommits() {
140   // TODO(scottmg): Port, https://crbug.com/731217.
141   return false;
142 }
143 
GetSharedLibraryAddresses()144 std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
145   UNREACHABLE();  // TODO(scottmg): Port, https://crbug.com/731217.
146 }
147 
SignalCodeMovingGC()148 void OS::SignalCodeMovingGC() {
149   UNREACHABLE();  // TODO(scottmg): Port, https://crbug.com/731217.
150 }
151 
GetUserTime(uint32_t * secs,uint32_t * usecs)152 int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
153   const auto kNanosPerMicrosecond = 1000ULL;
154   const auto kMicrosPerSecond = 1000000ULL;
155 
156   zx_info_thread_stats_t info = {};
157   zx_status_t status = zx_object_get_info(thrd_get_zx_handle(thrd_current()),
158                                           ZX_INFO_THREAD_STATS, &info,
159                                           sizeof(info), nullptr, nullptr);
160   if (status != ZX_OK) {
161     return -1;
162   }
163 
164   // First convert to microseconds, rounding up.
165   const uint64_t micros_since_thread_started =
166       (info.total_runtime + kNanosPerMicrosecond - 1ULL) / kNanosPerMicrosecond;
167 
168   *secs = static_cast<uint32_t>(micros_since_thread_started / kMicrosPerSecond);
169   *usecs =
170       static_cast<uint32_t>(micros_since_thread_started % kMicrosPerSecond);
171   return 0;
172 }
173 
AdjustSchedulingParams()174 void OS::AdjustSchedulingParams() {}
175 
176 }  // namespace base
177 }  // namespace v8
178