1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 // Platform-specific code for POSIX goes here. This is not a platform on its
6 // own, but contains the parts which are the same across the POSIX platforms
7 // Linux, MacOS, FreeBSD, OpenBSD, NetBSD and QNX.
8
9 #include <errno.h>
10 #include <limits.h>
11 #include <pthread.h>
12 #if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__)
13 #include <pthread_np.h> // for pthread_set_name_np
14 #endif
15 #include <sched.h> // for sched_yield
16 #include <stdio.h>
17 #include <time.h>
18 #include <unistd.h>
19
20 #include <sys/mman.h>
21 #include <sys/stat.h>
22 #include <sys/time.h>
23 #include <sys/types.h>
24 #if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) || \
25 defined(__NetBSD__) || defined(__OpenBSD__)
26 #include <sys/sysctl.h> // NOLINT, for sysctl
27 #endif
28
29 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
30 #define LOG_TAG "v8"
31 #include <android/log.h> // NOLINT
32 #endif
33
34 #include <cmath>
35 #include <cstdlib>
36
37 #include "src/base/platform/platform-posix.h"
38
39 #include "src/base/lazy-instance.h"
40 #include "src/base/macros.h"
41 #include "src/base/platform/platform.h"
42 #include "src/base/platform/time.h"
43 #include "src/base/utils/random-number-generator.h"
44
45 #ifdef V8_FAST_TLS_SUPPORTED
46 #include <atomic>
47 #endif
48
49 #if V8_OS_MACOSX
50 #include <dlfcn.h>
51 #include <mach/mach.h>
52 #endif
53
54 #if V8_OS_LINUX
55 #include <sys/prctl.h> // NOLINT, for prctl
56 #endif
57
58 #if defined(V8_OS_FUCHSIA)
59 #include <zircon/process.h>
60 #else
61 #include <sys/resource.h>
62 #endif
63
64 #if !defined(_AIX) && !defined(V8_OS_FUCHSIA)
65 #include <sys/syscall.h>
66 #endif
67
68 #if V8_OS_FREEBSD || V8_OS_MACOSX || V8_OS_OPENBSD || V8_OS_SOLARIS || V8_OS_DRAGONFLYBSD
69 #define MAP_ANONYMOUS MAP_ANON
70 #endif
71
72 #if defined(V8_OS_SOLARIS)
73 #if (defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE > 2) || defined(__EXTENSIONS__)
74 extern "C" int madvise(caddr_t, size_t, int);
75 #else
76 extern int madvise(caddr_t, size_t, int);
77 #endif
78 #endif
79
80 #ifndef MADV_FREE
81 #define MADV_FREE MADV_DONTNEED
82 #endif
83
84 #if defined(V8_LIBC_GLIBC)
85 extern "C" void* __libc_stack_end; // NOLINT
86 #endif
87
88 namespace v8 {
89 namespace base {
90
91 namespace {
92
93 // 0 is never a valid thread id.
94 const pthread_t kNoThread = static_cast<pthread_t>(0);
95
96 bool g_hard_abort = false;
97
98 const char* g_gc_fake_mmap = nullptr;
99
100 DEFINE_LAZY_LEAKY_OBJECT_GETTER(RandomNumberGenerator,
101 GetPlatformRandomNumberGenerator)
102 static LazyMutex rng_mutex = LAZY_MUTEX_INITIALIZER;
103
104 #if !V8_OS_FUCHSIA
105 #if V8_OS_MACOSX
106 // kMmapFd is used to pass vm_alloc flags to tag the region with the user
107 // defined tag 255 This helps identify V8-allocated regions in memory analysis
108 // tools like vmmap(1).
109 const int kMmapFd = VM_MAKE_TAG(255);
110 #else // !V8_OS_MACOSX
111 const int kMmapFd = -1;
112 #endif // !V8_OS_MACOSX
113
114 const int kMmapFdOffset = 0;
115
116 // TODO(v8:10026): Add the right permission flag to make executable pages
117 // guarded.
GetProtectionFromMemoryPermission(OS::MemoryPermission access)118 int GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
119 switch (access) {
120 case OS::MemoryPermission::kNoAccess:
121 return PROT_NONE;
122 case OS::MemoryPermission::kRead:
123 return PROT_READ;
124 case OS::MemoryPermission::kReadWrite:
125 return PROT_READ | PROT_WRITE;
126 case OS::MemoryPermission::kReadWriteExecute:
127 return PROT_READ | PROT_WRITE | PROT_EXEC;
128 case OS::MemoryPermission::kReadExecute:
129 return PROT_READ | PROT_EXEC;
130 }
131 UNREACHABLE();
132 }
133
GetFlagsForMemoryPermission(OS::MemoryPermission access)134 int GetFlagsForMemoryPermission(OS::MemoryPermission access) {
135 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
136 if (access == OS::MemoryPermission::kNoAccess) {
137 #if !V8_OS_AIX && !V8_OS_FREEBSD && !V8_OS_QNX && !V8_OS_DRAGONFLYBSD
138 flags |= MAP_NORESERVE;
139 #endif // !V8_OS_AIX && !V8_OS_FREEBSD && !V8_OS_QNX
140 #if V8_OS_QNX
141 flags |= MAP_LAZY;
142 #endif // V8_OS_QNX
143 }
144 return flags;
145 }
146
Allocate(void * hint,size_t size,OS::MemoryPermission access)147 void* Allocate(void* hint, size_t size, OS::MemoryPermission access) {
148 int prot = GetProtectionFromMemoryPermission(access);
149 int flags = GetFlagsForMemoryPermission(access);
150 void* result = mmap(hint, size, prot, flags, kMmapFd, kMmapFdOffset);
151 if (result == MAP_FAILED) return nullptr;
152 return result;
153 }
154
155 #endif // !V8_OS_FUCHSIA
156
157 } // namespace
158
159 #if V8_OS_LINUX || V8_OS_FREEBSD
160 #ifdef __arm__
161
ArmUsingHardFloat()162 bool OS::ArmUsingHardFloat() {
163 // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
164 // the Floating Point ABI used (PCS stands for Procedure Call Standard).
165 // We use these as well as a couple of other defines to statically determine
166 // what FP ABI used.
167 // GCC versions 4.4 and below don't support hard-fp.
168 // GCC versions 4.5 may support hard-fp without defining __ARM_PCS or
169 // __ARM_PCS_VFP.
170
171 #define GCC_VERSION \
172 (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
173 #if GCC_VERSION >= 40600 && !defined(__clang__)
174 #if defined(__ARM_PCS_VFP)
175 return true;
176 #else
177 return false;
178 #endif
179
180 #elif GCC_VERSION < 40500 && !defined(__clang__)
181 return false;
182
183 #else
184 #if defined(__ARM_PCS_VFP)
185 return true;
186 #elif defined(__ARM_PCS) || defined(__SOFTFP__) || defined(__SOFTFP) || \
187 !defined(__VFP_FP__)
188 return false;
189 #else
190 #error \
191 "Your version of compiler does not report the FP ABI compiled for." \
192 "Please report it on this issue" \
193 "http://code.google.com/p/v8/issues/detail?id=2140"
194
195 #endif
196 #endif
197 #undef GCC_VERSION
198 }
199
200 #endif // def __arm__
201 #endif
202
Initialize(bool hard_abort,const char * const gc_fake_mmap)203 void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) {
204 g_hard_abort = hard_abort;
205 g_gc_fake_mmap = gc_fake_mmap;
206 }
207
ActivationFrameAlignment()208 int OS::ActivationFrameAlignment() {
209 #if V8_TARGET_ARCH_ARM
210 // On EABI ARM targets this is required for fp correctness in the
211 // runtime system.
212 return 8;
213 #elif V8_TARGET_ARCH_MIPS
214 return 8;
215 #elif V8_TARGET_ARCH_S390
216 return 8;
217 #else
218 // Otherwise we just assume 16 byte alignment, i.e.:
219 // - With gcc 4.4 the tree vectorization optimizer can generate code
220 // that requires 16 byte alignment such as movdqa on x86.
221 // - Mac OS X, PPC and Solaris (64-bit) activation frames must
222 // be 16 byte-aligned; see "Mac OS X ABI Function Call Guide"
223 return 16;
224 #endif
225 }
226
227 // static
AllocatePageSize()228 size_t OS::AllocatePageSize() {
229 return static_cast<size_t>(sysconf(_SC_PAGESIZE));
230 }
231
232 // static
CommitPageSize()233 size_t OS::CommitPageSize() {
234 static size_t page_size = getpagesize();
235 return page_size;
236 }
237
238 // static
SetRandomMmapSeed(int64_t seed)239 void OS::SetRandomMmapSeed(int64_t seed) {
240 if (seed) {
241 MutexGuard guard(rng_mutex.Pointer());
242 GetPlatformRandomNumberGenerator()->SetSeed(seed);
243 }
244 }
245
246 // static
GetRandomMmapAddr()247 void* OS::GetRandomMmapAddr() {
248 uintptr_t raw_addr;
249 {
250 MutexGuard guard(rng_mutex.Pointer());
251 GetPlatformRandomNumberGenerator()->NextBytes(&raw_addr, sizeof(raw_addr));
252 }
253 #if defined(__APPLE__)
254 #if V8_TARGET_ARCH_ARM64
255 DCHECK_EQ(1 << 14, AllocatePageSize());
256 raw_addr = RoundDown(raw_addr, 1 << 14);
257 #endif
258 #endif
259 #if defined(V8_USE_ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
260 defined(THREAD_SANITIZER) || defined(LEAK_SANITIZER)
261 // If random hint addresses interfere with address ranges hard coded in
262 // sanitizers, bad things happen. This address range is copied from TSAN
263 // source but works with all tools.
264 // See crbug.com/539863.
265 raw_addr &= 0x007fffff0000ULL;
266 raw_addr += 0x7e8000000000ULL;
267 #else
268 #if V8_TARGET_ARCH_X64
269 // Currently available CPUs have 48 bits of virtual addressing. Truncate
270 // the hint address to 46 bits to give the kernel a fighting chance of
271 // fulfilling our placement request.
272 raw_addr &= uint64_t{0x3FFFFFFFF000};
273 #elif V8_TARGET_ARCH_PPC64
274 #if V8_OS_AIX
275 // AIX: 64 bits of virtual addressing, but we limit address range to:
276 // a) minimize Segment Lookaside Buffer (SLB) misses and
277 raw_addr &= uint64_t{0x3FFFF000};
278 // Use extra address space to isolate the mmap regions.
279 raw_addr += uint64_t{0x400000000000};
280 #elif V8_TARGET_BIG_ENDIAN
281 // Big-endian Linux: 42 bits of virtual addressing.
282 raw_addr &= uint64_t{0x03FFFFFFF000};
283 #else
284 // Little-endian Linux: 46 bits of virtual addressing.
285 raw_addr &= uint64_t{0x3FFFFFFF0000};
286 #endif
287 #elif V8_TARGET_ARCH_S390X
288 // Linux on Z uses bits 22-32 for Region Indexing, which translates to 42 bits
289 // of virtual addressing. Truncate to 40 bits to allow kernel chance to
290 // fulfill request.
291 raw_addr &= uint64_t{0xFFFFFFF000};
292 #elif V8_TARGET_ARCH_S390
293 // 31 bits of virtual addressing. Truncate to 29 bits to allow kernel chance
294 // to fulfill request.
295 raw_addr &= 0x1FFFF000;
296 #elif V8_TARGET_ARCH_MIPS64
297 // 42 bits of virtual addressing. Truncate to 40 bits to allow kernel chance
298 // to fulfill request.
299 raw_addr &= uint64_t{0xFFFFFF0000};
300 #else
301 raw_addr &= 0x3FFFF000;
302
303 #ifdef __sun
304 // For our Solaris/illumos mmap hint, we pick a random address in the bottom
305 // half of the top half of the address space (that is, the third quarter).
306 // Because we do not MAP_FIXED, this will be treated only as a hint -- the
307 // system will not fail to mmap() because something else happens to already
308 // be mapped at our random address. We deliberately set the hint high enough
309 // to get well above the system's break (that is, the heap); Solaris and
310 // illumos will try the hint and if that fails allocate as if there were
311 // no hint at all. The high hint prevents the break from getting hemmed in
312 // at low values, ceding half of the address space to the system heap.
313 raw_addr += 0x80000000;
314 #elif V8_OS_AIX
315 // The range 0x30000000 - 0xD0000000 is available on AIX;
316 // choose the upper range.
317 raw_addr += 0x90000000;
318 #else
319 // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
320 // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
321 // 10.6 and 10.7.
322 raw_addr += 0x20000000;
323 #endif
324 #endif
325 #endif
326 return reinterpret_cast<void*>(raw_addr);
327 }
328
329 // TODO(bbudge) Move Cygwin and Fuchsia stuff into platform-specific files.
330 #if !V8_OS_CYGWIN && !V8_OS_FUCHSIA
331 // static
Allocate(void * hint,size_t size,size_t alignment,MemoryPermission access)332 void* OS::Allocate(void* hint, size_t size, size_t alignment,
333 MemoryPermission access) {
334 size_t page_size = AllocatePageSize();
335 DCHECK_EQ(0, size % page_size);
336 DCHECK_EQ(0, alignment % page_size);
337 hint = AlignedAddress(hint, alignment);
338 // Add the maximum misalignment so we are guaranteed an aligned base address.
339 size_t request_size = size + (alignment - page_size);
340 request_size = RoundUp(request_size, OS::AllocatePageSize());
341 void* result = base::Allocate(hint, request_size, access);
342 if (result == nullptr) return nullptr;
343
344 // Unmap memory allocated before the aligned base address.
345 uint8_t* base = static_cast<uint8_t*>(result);
346 uint8_t* aligned_base = reinterpret_cast<uint8_t*>(
347 RoundUp(reinterpret_cast<uintptr_t>(base), alignment));
348 if (aligned_base != base) {
349 DCHECK_LT(base, aligned_base);
350 size_t prefix_size = static_cast<size_t>(aligned_base - base);
351 CHECK(Free(base, prefix_size));
352 request_size -= prefix_size;
353 }
354 // Unmap memory allocated after the potentially unaligned end.
355 if (size != request_size) {
356 DCHECK_LT(size, request_size);
357 size_t suffix_size = request_size - size;
358 CHECK(Free(aligned_base + size, suffix_size));
359 request_size -= suffix_size;
360 }
361
362 DCHECK_EQ(size, request_size);
363 return static_cast<void*>(aligned_base);
364 }
365
366 // static
Free(void * address,const size_t size)367 bool OS::Free(void* address, const size_t size) {
368 DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % AllocatePageSize());
369 DCHECK_EQ(0, size % AllocatePageSize());
370 return munmap(address, size) == 0;
371 }
372
373 // static
Release(void * address,size_t size)374 bool OS::Release(void* address, size_t size) {
375 DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
376 DCHECK_EQ(0, size % CommitPageSize());
377 return munmap(address, size) == 0;
378 }
379
380 // static
SetPermissions(void * address,size_t size,MemoryPermission access)381 bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
382 DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
383 DCHECK_EQ(0, size % CommitPageSize());
384
385 int prot = GetProtectionFromMemoryPermission(access);
386 int ret = mprotect(address, size, prot);
387 if (ret == 0 && access == OS::MemoryPermission::kNoAccess) {
388 // This is advisory; ignore errors and continue execution.
389 USE(DiscardSystemPages(address, size));
390 }
391
392 // For accounting purposes, we want to call MADV_FREE_REUSE on macOS after
393 // changing permissions away from OS::MemoryPermission::kNoAccess. Since this
394 // state is not kept at this layer, we always call this if access != kNoAccess.
395 // The cost is a syscall that effectively no-ops.
396 // TODO(erikchen): Fix this to only call MADV_FREE_REUSE when necessary.
397 // https://crbug.com/823915
398 #if defined(OS_MACOSX)
399 if (access != OS::MemoryPermission::kNoAccess)
400 madvise(address, size, MADV_FREE_REUSE);
401 #endif
402
403 return ret == 0;
404 }
405
DiscardSystemPages(void * address,size_t size)406 bool OS::DiscardSystemPages(void* address, size_t size) {
407 DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
408 DCHECK_EQ(0, size % CommitPageSize());
409 #if defined(OS_MACOSX)
410 // On OSX, MADV_FREE_REUSABLE has comparable behavior to MADV_FREE, but also
411 // marks the pages with the reusable bit, which allows both Activity Monitor
412 // and memory-infra to correctly track the pages.
413 int ret = madvise(address, size, MADV_FREE_REUSABLE);
414 #elif defined(_AIX) || defined(V8_OS_SOLARIS)
415 int ret = madvise(reinterpret_cast<caddr_t>(address), size, MADV_FREE);
416 #else
417 int ret = madvise(address, size, MADV_FREE);
418 #endif
419 if (ret != 0 && errno == ENOSYS)
420 return true; // madvise is not available on all systems.
421 if (ret != 0 && errno == EINVAL) {
422 // MADV_FREE only works on Linux 4.5+ . If request failed, retry with older
423 // MADV_DONTNEED . Note that MADV_FREE being defined at compile time doesn't
424 // imply runtime support.
425 #if defined(_AIX) || defined(V8_OS_SOLARIS)
426 ret = madvise(reinterpret_cast<caddr_t>(address), size, MADV_DONTNEED);
427 #else
428 ret = madvise(address, size, MADV_DONTNEED);
429 #endif
430 }
431 return ret == 0;
432 }
433
434 // static
HasLazyCommits()435 bool OS::HasLazyCommits() {
436 #if V8_OS_AIX || V8_OS_LINUX || V8_OS_MACOSX || V8_OS_FREEBSD || V8_OS_DRAGONFLYBSD
437 return true;
438 #else
439 // TODO(bbudge) Return true for all POSIX platforms.
440 return false;
441 #endif
442 }
443 #endif // !V8_OS_CYGWIN && !V8_OS_FUCHSIA
444
GetGCFakeMMapFile()445 const char* OS::GetGCFakeMMapFile() {
446 return g_gc_fake_mmap;
447 }
448
449
Sleep(TimeDelta interval)450 void OS::Sleep(TimeDelta interval) {
451 usleep(static_cast<useconds_t>(interval.InMicroseconds()));
452 }
453
454
Abort()455 void OS::Abort() {
456 if (g_hard_abort) {
457 V8_IMMEDIATE_CRASH();
458 }
459 // Redirect to std abort to signal abnormal program termination.
460 abort();
461 }
462
463
DebugBreak()464 void OS::DebugBreak() {
465 #if V8_HOST_ARCH_ARM
466 asm("bkpt 0");
467 #elif V8_HOST_ARCH_ARM64
468 asm("brk 0");
469 #elif V8_HOST_ARCH_MIPS
470 asm("break");
471 #elif V8_HOST_ARCH_MIPS64
472 asm("break");
473 #elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64
474 asm("twge 2,2");
475 #elif V8_HOST_ARCH_IA32
476 asm("int $3");
477 #elif V8_HOST_ARCH_X64
478 asm("int $3");
479 #elif V8_HOST_ARCH_S390
480 // Software breakpoint instruction is 0x0001
481 asm volatile(".word 0x0001");
482 #else
483 #error Unsupported host architecture.
484 #endif
485 }
486
487
488 class PosixMemoryMappedFile final : public OS::MemoryMappedFile {
489 public:
PosixMemoryMappedFile(FILE * file,void * memory,size_t size)490 PosixMemoryMappedFile(FILE* file, void* memory, size_t size)
491 : file_(file), memory_(memory), size_(size) {}
492 ~PosixMemoryMappedFile() final;
memory() const493 void* memory() const final { return memory_; }
size() const494 size_t size() const final { return size_; }
495
496 private:
497 FILE* const file_;
498 void* const memory_;
499 size_t const size_;
500 };
501
502
503 // static
open(const char * name,FileMode mode)504 OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name,
505 FileMode mode) {
506 const char* fopen_mode = (mode == FileMode::kReadOnly) ? "r" : "r+";
507 if (FILE* file = fopen(name, fopen_mode)) {
508 if (fseek(file, 0, SEEK_END) == 0) {
509 long size = ftell(file); // NOLINT(runtime/int)
510 if (size == 0) return new PosixMemoryMappedFile(file, nullptr, 0);
511 if (size > 0) {
512 int prot = PROT_READ;
513 int flags = MAP_PRIVATE;
514 if (mode == FileMode::kReadWrite) {
515 prot |= PROT_WRITE;
516 flags = MAP_SHARED;
517 }
518 void* const memory =
519 mmap(OS::GetRandomMmapAddr(), size, prot, flags, fileno(file), 0);
520 if (memory != MAP_FAILED) {
521 return new PosixMemoryMappedFile(file, memory, size);
522 }
523 }
524 }
525 fclose(file);
526 }
527 return nullptr;
528 }
529
530 // static
create(const char * name,size_t size,void * initial)531 OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name,
532 size_t size, void* initial) {
533 if (FILE* file = fopen(name, "w+")) {
534 if (size == 0) return new PosixMemoryMappedFile(file, 0, 0);
535 size_t result = fwrite(initial, 1, size, file);
536 if (result == size && !ferror(file)) {
537 void* memory = mmap(OS::GetRandomMmapAddr(), result,
538 PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
539 if (memory != MAP_FAILED) {
540 return new PosixMemoryMappedFile(file, memory, result);
541 }
542 }
543 fclose(file);
544 }
545 return nullptr;
546 }
547
548
~PosixMemoryMappedFile()549 PosixMemoryMappedFile::~PosixMemoryMappedFile() {
550 if (memory_) CHECK(OS::Free(memory_, RoundUp(size_, OS::AllocatePageSize())));
551 fclose(file_);
552 }
553
554
GetCurrentProcessId()555 int OS::GetCurrentProcessId() {
556 return static_cast<int>(getpid());
557 }
558
559
GetCurrentThreadId()560 int OS::GetCurrentThreadId() {
561 #if V8_OS_MACOSX || (V8_OS_ANDROID && defined(__APPLE__))
562 return static_cast<int>(pthread_mach_thread_np(pthread_self()));
563 #elif V8_OS_LINUX
564 return static_cast<int>(syscall(__NR_gettid));
565 #elif V8_OS_ANDROID
566 return static_cast<int>(gettid());
567 #elif V8_OS_DRAGONFLYBSD || defined(__DragonFly__)
568 return static_cast<int>(lwp_gettid());
569 #elif V8_OS_FREEBSD
570 return static_cast<int>(pthread_getthreadid_np());
571 #elif V8_OS_NETBSD
572 return static_cast<int>(_lwp_self());
573 #elif V8_OS_AIX
574 return static_cast<int>(thread_self());
575 #elif V8_OS_FUCHSIA
576 return static_cast<int>(zx_thread_self());
577 #elif V8_OS_SOLARIS
578 return static_cast<int>(pthread_self());
579 #else
580 return static_cast<int>(reinterpret_cast<intptr_t>(pthread_self()));
581 #endif
582 }
583
ExitProcess(int exit_code)584 void OS::ExitProcess(int exit_code) {
585 // Use _exit instead of exit to avoid races between isolate
586 // threads and static destructors.
587 fflush(stdout);
588 fflush(stderr);
589 _exit(exit_code);
590 }
591
592 // ----------------------------------------------------------------------------
593 // POSIX date/time support.
594 //
595
596 #if !defined(V8_OS_FUCHSIA)
GetUserTime(uint32_t * secs,uint32_t * usecs)597 int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
598 struct rusage usage;
599
600 if (getrusage(RUSAGE_SELF, &usage) < 0) return -1;
601 *secs = static_cast<uint32_t>(usage.ru_utime.tv_sec);
602 *usecs = static_cast<uint32_t>(usage.ru_utime.tv_usec);
603 return 0;
604 }
605 #endif
606
TimeCurrentMillis()607 double OS::TimeCurrentMillis() {
608 return Time::Now().ToJsTime();
609 }
610
DaylightSavingsOffset(double time)611 double PosixTimezoneCache::DaylightSavingsOffset(double time) {
612 if (std::isnan(time)) return std::numeric_limits<double>::quiet_NaN();
613 time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
614 struct tm tm;
615 struct tm* t = localtime_r(&tv, &tm);
616 if (nullptr == t) return std::numeric_limits<double>::quiet_NaN();
617 return t->tm_isdst > 0 ? 3600 * msPerSecond : 0;
618 }
619
620
GetLastError()621 int OS::GetLastError() {
622 return errno;
623 }
624
625
626 // ----------------------------------------------------------------------------
627 // POSIX stdio support.
628 //
629
FOpen(const char * path,const char * mode)630 FILE* OS::FOpen(const char* path, const char* mode) {
631 FILE* file = fopen(path, mode);
632 if (file == nullptr) return nullptr;
633 struct stat file_stat;
634 if (fstat(fileno(file), &file_stat) != 0) {
635 fclose(file);
636 return nullptr;
637 }
638 bool is_regular_file = ((file_stat.st_mode & S_IFREG) != 0);
639 if (is_regular_file) return file;
640 fclose(file);
641 return nullptr;
642 }
643
644
Remove(const char * path)645 bool OS::Remove(const char* path) {
646 return (remove(path) == 0);
647 }
648
DirectorySeparator()649 char OS::DirectorySeparator() { return '/'; }
650
isDirectorySeparator(const char ch)651 bool OS::isDirectorySeparator(const char ch) {
652 return ch == DirectorySeparator();
653 }
654
655
OpenTemporaryFile()656 FILE* OS::OpenTemporaryFile() {
657 return tmpfile();
658 }
659
660
661 const char* const OS::LogFileOpenMode = "w";
662
663
Print(const char * format,...)664 void OS::Print(const char* format, ...) {
665 va_list args;
666 va_start(args, format);
667 VPrint(format, args);
668 va_end(args);
669 }
670
671
VPrint(const char * format,va_list args)672 void OS::VPrint(const char* format, va_list args) {
673 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
674 __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
675 #else
676 vprintf(format, args);
677 #endif
678 }
679
680
FPrint(FILE * out,const char * format,...)681 void OS::FPrint(FILE* out, const char* format, ...) {
682 va_list args;
683 va_start(args, format);
684 VFPrint(out, format, args);
685 va_end(args);
686 }
687
688
VFPrint(FILE * out,const char * format,va_list args)689 void OS::VFPrint(FILE* out, const char* format, va_list args) {
690 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
691 __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
692 #else
693 vfprintf(out, format, args);
694 #endif
695 }
696
697
PrintError(const char * format,...)698 void OS::PrintError(const char* format, ...) {
699 va_list args;
700 va_start(args, format);
701 VPrintError(format, args);
702 va_end(args);
703 }
704
705
VPrintError(const char * format,va_list args)706 void OS::VPrintError(const char* format, va_list args) {
707 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
708 __android_log_vprint(ANDROID_LOG_ERROR, LOG_TAG, format, args);
709 #else
710 vfprintf(stderr, format, args);
711 #endif
712 }
713
714
SNPrintF(char * str,int length,const char * format,...)715 int OS::SNPrintF(char* str, int length, const char* format, ...) {
716 va_list args;
717 va_start(args, format);
718 int result = VSNPrintF(str, length, format, args);
719 va_end(args);
720 return result;
721 }
722
723
VSNPrintF(char * str,int length,const char * format,va_list args)724 int OS::VSNPrintF(char* str,
725 int length,
726 const char* format,
727 va_list args) {
728 int n = vsnprintf(str, length, format, args);
729 if (n < 0 || n >= length) {
730 // If the length is zero, the assignment fails.
731 if (length > 0)
732 str[length - 1] = '\0';
733 return -1;
734 } else {
735 return n;
736 }
737 }
738
739
740 // ----------------------------------------------------------------------------
741 // POSIX string support.
742 //
743
StrNCpy(char * dest,int length,const char * src,size_t n)744 void OS::StrNCpy(char* dest, int length, const char* src, size_t n) {
745 strncpy(dest, src, n);
746 }
747
748
749 // ----------------------------------------------------------------------------
750 // POSIX thread support.
751 //
752
753 class Thread::PlatformData {
754 public:
PlatformData()755 PlatformData() : thread_(kNoThread) {}
756 pthread_t thread_; // Thread handle for pthread.
757 // Synchronizes thread creation
758 Mutex thread_creation_mutex_;
759 };
760
Thread(const Options & options)761 Thread::Thread(const Options& options)
762 : data_(new PlatformData),
763 stack_size_(options.stack_size()),
764 start_semaphore_(nullptr) {
765 if (stack_size_ > 0 && static_cast<size_t>(stack_size_) < PTHREAD_STACK_MIN) {
766 stack_size_ = PTHREAD_STACK_MIN;
767 }
768 set_name(options.name());
769 }
770
771
~Thread()772 Thread::~Thread() {
773 delete data_;
774 }
775
776
SetThreadName(const char * name)777 static void SetThreadName(const char* name) {
778 #if V8_OS_DRAGONFLYBSD || V8_OS_FREEBSD || V8_OS_OPENBSD
779 pthread_set_name_np(pthread_self(), name);
780 #elif V8_OS_NETBSD
781 STATIC_ASSERT(Thread::kMaxThreadNameLength <= PTHREAD_MAX_NAMELEN_NP);
782 pthread_setname_np(pthread_self(), "%s", name);
783 #elif V8_OS_MACOSX
784 // pthread_setname_np is only available in 10.6 or later, so test
785 // for it at runtime.
786 int (*dynamic_pthread_setname_np)(const char*);
787 *reinterpret_cast<void**>(&dynamic_pthread_setname_np) =
788 dlsym(RTLD_DEFAULT, "pthread_setname_np");
789 if (dynamic_pthread_setname_np == nullptr) return;
790
791 // Mac OS X does not expose the length limit of the name, so hardcode it.
792 static const int kMaxNameLength = 63;
793 STATIC_ASSERT(Thread::kMaxThreadNameLength <= kMaxNameLength);
794 dynamic_pthread_setname_np(name);
795 #elif defined(PR_SET_NAME)
796 prctl(PR_SET_NAME,
797 reinterpret_cast<unsigned long>(name), // NOLINT
798 0, 0, 0);
799 #endif
800 }
801
802
ThreadEntry(void * arg)803 static void* ThreadEntry(void* arg) {
804 Thread* thread = reinterpret_cast<Thread*>(arg);
805 // We take the lock here to make sure that pthread_create finished first since
806 // we don't know which thread will run first (the original thread or the new
807 // one).
808 { MutexGuard lock_guard(&thread->data()->thread_creation_mutex_); }
809 SetThreadName(thread->name());
810 DCHECK_NE(thread->data()->thread_, kNoThread);
811 thread->NotifyStartedAndRun();
812 return nullptr;
813 }
814
815
set_name(const char * name)816 void Thread::set_name(const char* name) {
817 strncpy(name_, name, sizeof(name_) - 1);
818 name_[sizeof(name_) - 1] = '\0';
819 }
820
Start()821 bool Thread::Start() {
822 int result;
823 pthread_attr_t attr;
824 memset(&attr, 0, sizeof(attr));
825 result = pthread_attr_init(&attr);
826 if (result != 0) return false;
827 size_t stack_size = stack_size_;
828 if (stack_size == 0) {
829 #if V8_OS_MACOSX
830 // Default on Mac OS X is 512kB -- bump up to 1MB
831 stack_size = 1 * 1024 * 1024;
832 #elif V8_OS_AIX
833 // Default on AIX is 96kB -- bump up to 2MB
834 stack_size = 2 * 1024 * 1024;
835 #endif
836 }
837 if (stack_size > 0) {
838 result = pthread_attr_setstacksize(&attr, stack_size);
839 if (result != 0) return pthread_attr_destroy(&attr), false;
840 }
841 {
842 MutexGuard lock_guard(&data_->thread_creation_mutex_);
843 result = pthread_create(&data_->thread_, &attr, ThreadEntry, this);
844 if (result != 0 || data_->thread_ == kNoThread) {
845 return pthread_attr_destroy(&attr), false;
846 }
847 }
848 result = pthread_attr_destroy(&attr);
849 return result == 0;
850 }
851
Join()852 void Thread::Join() { pthread_join(data_->thread_, nullptr); }
853
PthreadKeyToLocalKey(pthread_key_t pthread_key)854 static Thread::LocalStorageKey PthreadKeyToLocalKey(pthread_key_t pthread_key) {
855 #if V8_OS_CYGWIN
856 // We need to cast pthread_key_t to Thread::LocalStorageKey in two steps
857 // because pthread_key_t is a pointer type on Cygwin. This will probably not
858 // work on 64-bit platforms, but Cygwin doesn't support 64-bit anyway.
859 STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
860 intptr_t ptr_key = reinterpret_cast<intptr_t>(pthread_key);
861 return static_cast<Thread::LocalStorageKey>(ptr_key);
862 #else
863 return static_cast<Thread::LocalStorageKey>(pthread_key);
864 #endif
865 }
866
867
LocalKeyToPthreadKey(Thread::LocalStorageKey local_key)868 static pthread_key_t LocalKeyToPthreadKey(Thread::LocalStorageKey local_key) {
869 #if V8_OS_CYGWIN
870 STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
871 intptr_t ptr_key = static_cast<intptr_t>(local_key);
872 return reinterpret_cast<pthread_key_t>(ptr_key);
873 #else
874 return static_cast<pthread_key_t>(local_key);
875 #endif
876 }
877
878
879 #ifdef V8_FAST_TLS_SUPPORTED
880
881 static std::atomic<bool> tls_base_offset_initialized{false};
882 intptr_t kMacTlsBaseOffset = 0;
883
884 // It's safe to do the initialization more that once, but it has to be
885 // done at least once.
InitializeTlsBaseOffset()886 static void InitializeTlsBaseOffset() {
887 const size_t kBufferSize = 128;
888 char buffer[kBufferSize];
889 size_t buffer_size = kBufferSize;
890 int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
891 if (sysctl(ctl_name, 2, buffer, &buffer_size, nullptr, 0) != 0) {
892 FATAL("V8 failed to get kernel version");
893 }
894 // The buffer now contains a string of the form XX.YY.ZZ, where
895 // XX is the major kernel version component.
896 // Make sure the buffer is 0-terminated.
897 buffer[kBufferSize - 1] = '\0';
898 char* period_pos = strchr(buffer, '.');
899 *period_pos = '\0';
900 int kernel_version_major =
901 static_cast<int>(strtol(buffer, nullptr, 10)); // NOLINT
902 // The constants below are taken from pthreads.s from the XNU kernel
903 // sources archive at www.opensource.apple.com.
904 if (kernel_version_major < 11) {
905 // 8.x.x (Tiger), 9.x.x (Leopard), 10.x.x (Snow Leopard) have the
906 // same offsets.
907 #if V8_HOST_ARCH_IA32
908 kMacTlsBaseOffset = 0x48;
909 #else
910 kMacTlsBaseOffset = 0x60;
911 #endif
912 } else {
913 // 11.x.x (Lion) changed the offset.
914 kMacTlsBaseOffset = 0;
915 }
916
917 tls_base_offset_initialized.store(true, std::memory_order_release);
918 }
919
920
CheckFastTls(Thread::LocalStorageKey key)921 static void CheckFastTls(Thread::LocalStorageKey key) {
922 void* expected = reinterpret_cast<void*>(0x1234CAFE);
923 Thread::SetThreadLocal(key, expected);
924 void* actual = Thread::GetExistingThreadLocal(key);
925 if (expected != actual) {
926 FATAL("V8 failed to initialize fast TLS on current kernel");
927 }
928 Thread::SetThreadLocal(key, nullptr);
929 }
930
931 #endif // V8_FAST_TLS_SUPPORTED
932
933
CreateThreadLocalKey()934 Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
935 #ifdef V8_FAST_TLS_SUPPORTED
936 bool check_fast_tls = false;
937 if (!tls_base_offset_initialized.load(std::memory_order_acquire)) {
938 check_fast_tls = true;
939 InitializeTlsBaseOffset();
940 }
941 #endif
942 pthread_key_t key;
943 int result = pthread_key_create(&key, nullptr);
944 DCHECK_EQ(0, result);
945 USE(result);
946 LocalStorageKey local_key = PthreadKeyToLocalKey(key);
947 #ifdef V8_FAST_TLS_SUPPORTED
948 // If we just initialized fast TLS support, make sure it works.
949 if (check_fast_tls) CheckFastTls(local_key);
950 #endif
951 return local_key;
952 }
953
954
DeleteThreadLocalKey(LocalStorageKey key)955 void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
956 pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
957 int result = pthread_key_delete(pthread_key);
958 DCHECK_EQ(0, result);
959 USE(result);
960 }
961
962
GetThreadLocal(LocalStorageKey key)963 void* Thread::GetThreadLocal(LocalStorageKey key) {
964 pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
965 return pthread_getspecific(pthread_key);
966 }
967
968
SetThreadLocal(LocalStorageKey key,void * value)969 void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
970 pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
971 int result = pthread_setspecific(pthread_key, value);
972 DCHECK_EQ(0, result);
973 USE(result);
974 }
975
976 // pthread_getattr_np used below is non portable (hence the _np suffix). We
977 // keep this version in POSIX as most Linux-compatible derivatives will
978 // support it. MacOS and FreeBSD are different here.
979 #if !defined(V8_OS_FREEBSD) && !defined(V8_OS_MACOSX) && !defined(V8_OS_DRAGONFLYBSD)
980
981 // static
GetStackStart()982 void* Stack::GetStackStart() {
983 pthread_attr_t attr;
984 int error = pthread_getattr_np(pthread_self(), &attr);
985 if (!error) {
986 void* base;
987 size_t size;
988 error = pthread_attr_getstack(&attr, &base, &size);
989 CHECK(!error);
990 pthread_attr_destroy(&attr);
991 return reinterpret_cast<uint8_t*>(base) + size;
992 }
993 pthread_attr_destroy(&attr);
994
995 #if defined(V8_LIBC_GLIBC)
996 // pthread_getattr_np can fail for the main thread. In this case
997 // just like NaCl we rely on the __libc_stack_end to give us
998 // the start of the stack.
999 // See https://code.google.com/p/nativeclient/issues/detail?id=3431.
1000 return __libc_stack_end;
1001 #endif // !defined(V8_LIBC_GLIBC)
1002 return nullptr;
1003 }
1004
1005 #endif // !defined(V8_OS_FREEBSD) && !defined(V8_OS_MACOSX)
1006
1007 // static
GetCurrentStackPosition()1008 void* Stack::GetCurrentStackPosition() { return __builtin_frame_address(0); }
1009
1010 #undef LOG_TAG
1011 #undef MAP_ANONYMOUS
1012 #undef MADV_FREE
1013
1014 } // namespace base
1015 } // namespace v8
1016