1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 // Platform-specific code for POSIX goes here. This is not a platform on its
6 // own, but contains the parts which are the same across the POSIX platforms
7 // Linux, MacOS, FreeBSD, OpenBSD, NetBSD and QNX.
8 
9 #include <errno.h>
10 #include <limits.h>
11 #include <pthread.h>
12 #if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__)
13 #include <pthread_np.h>  // for pthread_set_name_np
14 #endif
15 #include <sched.h>  // for sched_yield
16 #include <stdio.h>
17 #include <time.h>
18 #include <unistd.h>
19 
20 #include <sys/mman.h>
21 #include <sys/stat.h>
22 #include <sys/time.h>
23 #include <sys/types.h>
24 #if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) || \
25     defined(__NetBSD__) || defined(__OpenBSD__)
26 #include <sys/sysctl.h>  // NOLINT, for sysctl
27 #endif
28 
29 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
30 #define LOG_TAG "v8"
31 #include <android/log.h>  // NOLINT
32 #endif
33 
34 #include <cmath>
35 #include <cstdlib>
36 
37 #include "src/base/platform/platform-posix.h"
38 
39 #include "src/base/lazy-instance.h"
40 #include "src/base/macros.h"
41 #include "src/base/platform/platform.h"
42 #include "src/base/platform/time.h"
43 #include "src/base/utils/random-number-generator.h"
44 
45 #ifdef V8_FAST_TLS_SUPPORTED
46 #include "src/base/atomicops.h"
47 #endif
48 
49 #if V8_OS_MACOSX
50 #include <dlfcn.h>
51 #endif
52 
53 #if V8_OS_LINUX
54 #include <sys/prctl.h>  // NOLINT, for prctl
55 #endif
56 
57 #if defined(V8_OS_FUCHSIA)
58 #include <zircon/process.h>
59 #else
60 #include <sys/resource.h>
61 #endif
62 
63 #if !defined(_AIX) && !defined(V8_OS_FUCHSIA)
64 #include <sys/syscall.h>
65 #endif
66 
67 #if V8_OS_FREEBSD || V8_OS_MACOSX || V8_OS_OPENBSD || V8_OS_SOLARIS || V8_OS_DRAGONFLYBSD
68 #define MAP_ANONYMOUS MAP_ANON
69 #endif
70 
71 #if defined(V8_OS_SOLARIS)
72 #if (defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE > 2) || defined(__EXTENSIONS__)
73 extern "C" int madvise(caddr_t, size_t, int);
74 #else
75 extern int madvise(caddr_t, size_t, int);
76 #endif
77 #endif
78 
79 #ifndef MADV_FREE
80 #define MADV_FREE MADV_DONTNEED
81 #endif
82 
83 namespace v8 {
84 namespace base {
85 
86 namespace {
87 
88 // 0 is never a valid thread id.
89 const pthread_t kNoThread = (pthread_t) 0;
90 
91 bool g_hard_abort = false;
92 
93 const char* g_gc_fake_mmap = nullptr;
94 
95 static LazyInstance<RandomNumberGenerator>::type
96     platform_random_number_generator = LAZY_INSTANCE_INITIALIZER;
97 static LazyMutex rng_mutex = LAZY_MUTEX_INITIALIZER;
98 
99 #if !V8_OS_FUCHSIA
100 #if V8_OS_MACOSX
101 // kMmapFd is used to pass vm_alloc flags to tag the region with the user
102 // defined tag 255 This helps identify V8-allocated regions in memory analysis
103 // tools like vmmap(1).
104 const int kMmapFd = VM_MAKE_TAG(255);
105 #else   // !V8_OS_MACOSX
106 const int kMmapFd = -1;
107 #endif  // !V8_OS_MACOSX
108 
109 const int kMmapFdOffset = 0;
110 
GetProtectionFromMemoryPermission(OS::MemoryPermission access)111 int GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
112   switch (access) {
113     case OS::MemoryPermission::kNoAccess:
114       return PROT_NONE;
115     case OS::MemoryPermission::kRead:
116       return PROT_READ;
117     case OS::MemoryPermission::kReadWrite:
118       return PROT_READ | PROT_WRITE;
119     case OS::MemoryPermission::kReadWriteExecute:
120       return PROT_READ | PROT_WRITE | PROT_EXEC;
121     case OS::MemoryPermission::kReadExecute:
122       return PROT_READ | PROT_EXEC;
123   }
124   UNREACHABLE();
125 }
126 
GetFlagsForMemoryPermission(OS::MemoryPermission access)127 int GetFlagsForMemoryPermission(OS::MemoryPermission access) {
128   int flags = MAP_PRIVATE | MAP_ANONYMOUS;
129   if (access == OS::MemoryPermission::kNoAccess) {
130 #if !V8_OS_AIX && !V8_OS_FREEBSD && !V8_OS_QNX && !V8_OS_DRAGONFLYBSD
131     flags |= MAP_NORESERVE;
132 #endif  // !V8_OS_AIX && !V8_OS_FREEBSD && !V8_OS_QNX
133 #if V8_OS_QNX
134     flags |= MAP_LAZY;
135 #endif  // V8_OS_QNX
136   }
137   return flags;
138 }
139 
Allocate(void * address,size_t size,OS::MemoryPermission access)140 void* Allocate(void* address, size_t size, OS::MemoryPermission access) {
141   int prot = GetProtectionFromMemoryPermission(access);
142   int flags = GetFlagsForMemoryPermission(access);
143   void* result = mmap(address, size, prot, flags, kMmapFd, kMmapFdOffset);
144   if (result == MAP_FAILED) return nullptr;
145   return result;
146 }
147 
ReclaimInaccessibleMemory(void * address,size_t size)148 int ReclaimInaccessibleMemory(void* address, size_t size) {
149 #if defined(OS_MACOSX)
150   // On OSX, MADV_FREE_REUSABLE has comparable behavior to MADV_FREE, but also
151   // marks the pages with the reusable bit, which allows both Activity Monitor
152   // and memory-infra to correctly track the pages.
153   int ret = madvise(address, size, MADV_FREE_REUSABLE);
154 #elif defined(_AIX) || defined(V8_OS_SOLARIS)
155   int ret = madvise(reinterpret_cast<caddr_t>(address), size, MADV_FREE);
156 #else
157   int ret = madvise(address, size, MADV_FREE);
158 #endif
159   if (ret != 0 && errno == EINVAL) {
160     // MADV_FREE only works on Linux 4.5+ . If request failed, retry with older
161     // MADV_DONTNEED . Note that MADV_FREE being defined at compile time doesn't
162     // imply runtime support.
163 #if defined(_AIX) || defined(V8_OS_SOLARIS)
164     ret = madvise(reinterpret_cast<caddr_t>(address), size, MADV_DONTNEED);
165 #else
166     ret = madvise(address, size, MADV_DONTNEED);
167 #endif
168   }
169   return ret;
170 }
171 
172 #endif  // !V8_OS_FUCHSIA
173 
174 }  // namespace
175 
Initialize(bool hard_abort,const char * const gc_fake_mmap)176 void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) {
177   g_hard_abort = hard_abort;
178   g_gc_fake_mmap = gc_fake_mmap;
179 }
180 
ActivationFrameAlignment()181 int OS::ActivationFrameAlignment() {
182 #if V8_TARGET_ARCH_ARM
183   // On EABI ARM targets this is required for fp correctness in the
184   // runtime system.
185   return 8;
186 #elif V8_TARGET_ARCH_MIPS
187   return 8;
188 #elif V8_TARGET_ARCH_S390
189   return 8;
190 #else
191   // Otherwise we just assume 16 byte alignment, i.e.:
192   // - With gcc 4.4 the tree vectorization optimizer can generate code
193   //   that requires 16 byte alignment such as movdqa on x86.
194   // - Mac OS X, PPC and Solaris (64-bit) activation frames must
195   //   be 16 byte-aligned;  see "Mac OS X ABI Function Call Guide"
196   return 16;
197 #endif
198 }
199 
200 // static
AllocatePageSize()201 size_t OS::AllocatePageSize() {
202   return static_cast<size_t>(sysconf(_SC_PAGESIZE));
203 }
204 
205 // static
CommitPageSize()206 size_t OS::CommitPageSize() {
207   static size_t page_size = getpagesize();
208   return page_size;
209 }
210 
211 // static
SetRandomMmapSeed(int64_t seed)212 void OS::SetRandomMmapSeed(int64_t seed) {
213   if (seed) {
214     LockGuard<Mutex> guard(rng_mutex.Pointer());
215     platform_random_number_generator.Pointer()->SetSeed(seed);
216   }
217 }
218 
219 // static
GetRandomMmapAddr()220 void* OS::GetRandomMmapAddr() {
221   uintptr_t raw_addr;
222   {
223     LockGuard<Mutex> guard(rng_mutex.Pointer());
224     platform_random_number_generator.Pointer()->NextBytes(&raw_addr,
225                                                           sizeof(raw_addr));
226   }
227 #if defined(V8_USE_ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
228     defined(THREAD_SANITIZER) || defined(LEAK_SANITIZER)
229   // If random hint addresses interfere with address ranges hard coded in
230   // sanitizers, bad things happen. This address range is copied from TSAN
231   // source but works with all tools.
232   // See crbug.com/539863.
233   raw_addr &= 0x007fffff0000ULL;
234   raw_addr += 0x7e8000000000ULL;
235 #else
236 #if V8_TARGET_ARCH_X64
237   // Currently available CPUs have 48 bits of virtual addressing.  Truncate
238   // the hint address to 46 bits to give the kernel a fighting chance of
239   // fulfilling our placement request.
240   raw_addr &= uint64_t{0x3FFFFFFFF000};
241 #elif V8_TARGET_ARCH_PPC64
242 #if V8_OS_AIX
243   // AIX: 64 bits of virtual addressing, but we limit address range to:
244   //   a) minimize Segment Lookaside Buffer (SLB) misses and
245   raw_addr &= uint64_t{0x3FFFF000};
246   // Use extra address space to isolate the mmap regions.
247   raw_addr += uint64_t{0x400000000000};
248 #elif V8_TARGET_BIG_ENDIAN
249   // Big-endian Linux: 42 bits of virtual addressing.
250   raw_addr &= uint64_t{0x03FFFFFFF000};
251 #else
252   // Little-endian Linux: 46 bits of virtual addressing.
253   raw_addr &= uint64_t{0x3FFFFFFF0000};
254 #endif
255 #elif V8_TARGET_ARCH_MIPS64
256   // We allocate code in 256 MB aligned segments because of optimizations using
257   // J instruction that require that all code is within a single 256 MB segment
258   raw_addr &= uint64_t{0x3FFFE0000000};
259 #elif V8_TARGET_ARCH_S390X
260   // Linux on Z uses bits 22-32 for Region Indexing, which translates to 42 bits
261   // of virtual addressing.  Truncate to 40 bits to allow kernel chance to
262   // fulfill request.
263   raw_addr &= uint64_t{0xFFFFFFF000};
264 #elif V8_TARGET_ARCH_S390
265   // 31 bits of virtual addressing.  Truncate to 29 bits to allow kernel chance
266   // to fulfill request.
267   raw_addr &= 0x1FFFF000;
268 #else
269   raw_addr &= 0x3FFFF000;
270 
271 #ifdef __sun
272   // For our Solaris/illumos mmap hint, we pick a random address in the bottom
273   // half of the top half of the address space (that is, the third quarter).
274   // Because we do not MAP_FIXED, this will be treated only as a hint -- the
275   // system will not fail to mmap() because something else happens to already
276   // be mapped at our random address. We deliberately set the hint high enough
277   // to get well above the system's break (that is, the heap); Solaris and
278   // illumos will try the hint and if that fails allocate as if there were
279   // no hint at all. The high hint prevents the break from getting hemmed in
280   // at low values, ceding half of the address space to the system heap.
281   raw_addr += 0x80000000;
282 #elif V8_OS_AIX
283   // The range 0x30000000 - 0xD0000000 is available on AIX;
284   // choose the upper range.
285   raw_addr += 0x90000000;
286 #else
287   // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
288   // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
289   // 10.6 and 10.7.
290   raw_addr += 0x20000000;
291 #endif
292 #endif
293 #endif
294   return reinterpret_cast<void*>(raw_addr);
295 }
296 
297 // TODO(bbudge) Move Cygwin and Fuschia stuff into platform-specific files.
298 #if !V8_OS_CYGWIN && !V8_OS_FUCHSIA
299 // static
Allocate(void * address,size_t size,size_t alignment,MemoryPermission access)300 void* OS::Allocate(void* address, size_t size, size_t alignment,
301                    MemoryPermission access) {
302   size_t page_size = AllocatePageSize();
303   DCHECK_EQ(0, size % page_size);
304   DCHECK_EQ(0, alignment % page_size);
305   address = AlignedAddress(address, alignment);
306   // Add the maximum misalignment so we are guaranteed an aligned base address.
307   size_t request_size = size + (alignment - page_size);
308   request_size = RoundUp(request_size, OS::AllocatePageSize());
309   void* result = base::Allocate(address, request_size, access);
310   if (result == nullptr) return nullptr;
311 
312   // Unmap memory allocated before the aligned base address.
313   uint8_t* base = static_cast<uint8_t*>(result);
314   uint8_t* aligned_base = RoundUp(base, alignment);
315   if (aligned_base != base) {
316     DCHECK_LT(base, aligned_base);
317     size_t prefix_size = static_cast<size_t>(aligned_base - base);
318     CHECK(Free(base, prefix_size));
319     request_size -= prefix_size;
320   }
321   // Unmap memory allocated after the potentially unaligned end.
322   if (size != request_size) {
323     DCHECK_LT(size, request_size);
324     size_t suffix_size = request_size - size;
325     CHECK(Free(aligned_base + size, suffix_size));
326     request_size -= suffix_size;
327   }
328 
329   DCHECK_EQ(size, request_size);
330   return static_cast<void*>(aligned_base);
331 }
332 
333 // static
Free(void * address,const size_t size)334 bool OS::Free(void* address, const size_t size) {
335   DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % AllocatePageSize());
336   DCHECK_EQ(0, size % AllocatePageSize());
337   return munmap(address, size) == 0;
338 }
339 
340 // static
Release(void * address,size_t size)341 bool OS::Release(void* address, size_t size) {
342   DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
343   DCHECK_EQ(0, size % CommitPageSize());
344   return munmap(address, size) == 0;
345 }
346 
347 // static
SetPermissions(void * address,size_t size,MemoryPermission access)348 bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
349   DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
350   DCHECK_EQ(0, size % CommitPageSize());
351 
352   int prot = GetProtectionFromMemoryPermission(access);
353   int ret = mprotect(address, size, prot);
354   if (ret == 0 && access == OS::MemoryPermission::kNoAccess) {
355     ret = ReclaimInaccessibleMemory(address, size);
356   }
357 
358 // For accounting purposes, we want to call MADV_FREE_REUSE on macOS after
359 // changing permissions away from OS::MemoryPermission::kNoAccess. Since this
360 // state is not kept at this layer, we always call this if access != kNoAccess.
361 // The cost is a syscall that effectively no-ops.
362 // TODO(erikchen): Fix this to only call MADV_FREE_REUSE when necessary.
363 // https://crbug.com/823915
364 #if defined(OS_MACOSX)
365   if (access != OS::MemoryPermission::kNoAccess)
366     madvise(address, size, MADV_FREE_REUSE);
367 #endif
368 
369   return ret == 0;
370 }
371 
372 // static
HasLazyCommits()373 bool OS::HasLazyCommits() {
374 #if V8_OS_AIX || V8_OS_LINUX || V8_OS_MACOSX
375   return true;
376 #else
377   // TODO(bbudge) Return true for all POSIX platforms.
378   return false;
379 #endif
380 }
381 #endif  // !V8_OS_CYGWIN && !V8_OS_FUCHSIA
382 
GetGCFakeMMapFile()383 const char* OS::GetGCFakeMMapFile() {
384   return g_gc_fake_mmap;
385 }
386 
387 
Sleep(TimeDelta interval)388 void OS::Sleep(TimeDelta interval) {
389   usleep(static_cast<useconds_t>(interval.InMicroseconds()));
390 }
391 
392 
Abort()393 void OS::Abort() {
394   if (g_hard_abort) {
395     V8_IMMEDIATE_CRASH();
396   }
397   // Redirect to std abort to signal abnormal program termination.
398   abort();
399 }
400 
401 
DebugBreak()402 void OS::DebugBreak() {
403 #if V8_HOST_ARCH_ARM
404   asm("bkpt 0");
405 #elif V8_HOST_ARCH_ARM64
406   asm("brk 0");
407 #elif V8_HOST_ARCH_MIPS
408   asm("break");
409 #elif V8_HOST_ARCH_MIPS64
410   asm("break");
411 #elif V8_HOST_ARCH_PPC
412   asm("twge 2,2");
413 #elif V8_HOST_ARCH_IA32
414   asm("int $3");
415 #elif V8_HOST_ARCH_X64
416   asm("int $3");
417 #elif V8_HOST_ARCH_S390
418   // Software breakpoint instruction is 0x0001
419   asm volatile(".word 0x0001");
420 #else
421 #error Unsupported host architecture.
422 #endif
423 }
424 
425 
426 class PosixMemoryMappedFile final : public OS::MemoryMappedFile {
427  public:
PosixMemoryMappedFile(FILE * file,void * memory,size_t size)428   PosixMemoryMappedFile(FILE* file, void* memory, size_t size)
429       : file_(file), memory_(memory), size_(size) {}
430   ~PosixMemoryMappedFile() final;
memory() const431   void* memory() const final { return memory_; }
size() const432   size_t size() const final { return size_; }
433 
434  private:
435   FILE* const file_;
436   void* const memory_;
437   size_t const size_;
438 };
439 
440 
441 // static
open(const char * name)442 OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
443   if (FILE* file = fopen(name, "r+")) {
444     if (fseek(file, 0, SEEK_END) == 0) {
445       long size = ftell(file);  // NOLINT(runtime/int)
446       if (size >= 0) {
447         void* const memory =
448             mmap(OS::GetRandomMmapAddr(), size, PROT_READ | PROT_WRITE,
449                  MAP_SHARED, fileno(file), 0);
450         if (memory != MAP_FAILED) {
451           return new PosixMemoryMappedFile(file, memory, size);
452         }
453       }
454     }
455     fclose(file);
456   }
457   return nullptr;
458 }
459 
460 
461 // static
create(const char * name,size_t size,void * initial)462 OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name,
463                                                    size_t size, void* initial) {
464   if (FILE* file = fopen(name, "w+")) {
465     size_t result = fwrite(initial, 1, size, file);
466     if (result == size && !ferror(file)) {
467       void* memory = mmap(OS::GetRandomMmapAddr(), result,
468                           PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
469       if (memory != MAP_FAILED) {
470         return new PosixMemoryMappedFile(file, memory, result);
471       }
472     }
473     fclose(file);
474   }
475   return nullptr;
476 }
477 
478 
~PosixMemoryMappedFile()479 PosixMemoryMappedFile::~PosixMemoryMappedFile() {
480   if (memory_) CHECK(OS::Free(memory_, size_));
481   fclose(file_);
482 }
483 
484 
GetCurrentProcessId()485 int OS::GetCurrentProcessId() {
486   return static_cast<int>(getpid());
487 }
488 
489 
GetCurrentThreadId()490 int OS::GetCurrentThreadId() {
491 #if V8_OS_MACOSX || (V8_OS_ANDROID && defined(__APPLE__))
492   return static_cast<int>(pthread_mach_thread_np(pthread_self()));
493 #elif V8_OS_LINUX
494   return static_cast<int>(syscall(__NR_gettid));
495 #elif V8_OS_ANDROID
496   return static_cast<int>(gettid());
497 #elif V8_OS_AIX
498   return static_cast<int>(thread_self());
499 #elif V8_OS_FUCHSIA
500   return static_cast<int>(zx_thread_self());
501 #elif V8_OS_SOLARIS
502   return static_cast<int>(pthread_self());
503 #else
504   return static_cast<int>(reinterpret_cast<intptr_t>(pthread_self()));
505 #endif
506 }
507 
ExitProcess(int exit_code)508 void OS::ExitProcess(int exit_code) {
509   // Use _exit instead of exit to avoid races between isolate
510   // threads and static destructors.
511   fflush(stdout);
512   fflush(stderr);
513   _exit(exit_code);
514 }
515 
516 // ----------------------------------------------------------------------------
517 // POSIX date/time support.
518 //
519 
520 #if !defined(V8_OS_FUCHSIA)
GetUserTime(uint32_t * secs,uint32_t * usecs)521 int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
522   struct rusage usage;
523 
524   if (getrusage(RUSAGE_SELF, &usage) < 0) return -1;
525   *secs = static_cast<uint32_t>(usage.ru_utime.tv_sec);
526   *usecs = static_cast<uint32_t>(usage.ru_utime.tv_usec);
527   return 0;
528 }
529 #endif
530 
TimeCurrentMillis()531 double OS::TimeCurrentMillis() {
532   return Time::Now().ToJsTime();
533 }
534 
DaylightSavingsOffset(double time)535 double PosixTimezoneCache::DaylightSavingsOffset(double time) {
536   if (std::isnan(time)) return std::numeric_limits<double>::quiet_NaN();
537   time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
538   struct tm tm;
539   struct tm* t = localtime_r(&tv, &tm);
540   if (nullptr == t) return std::numeric_limits<double>::quiet_NaN();
541   return t->tm_isdst > 0 ? 3600 * msPerSecond : 0;
542 }
543 
544 
GetLastError()545 int OS::GetLastError() {
546   return errno;
547 }
548 
549 
550 // ----------------------------------------------------------------------------
551 // POSIX stdio support.
552 //
553 
FOpen(const char * path,const char * mode)554 FILE* OS::FOpen(const char* path, const char* mode) {
555   FILE* file = fopen(path, mode);
556   if (file == nullptr) return nullptr;
557   struct stat file_stat;
558   if (fstat(fileno(file), &file_stat) != 0) {
559     fclose(file);
560     return nullptr;
561   }
562   bool is_regular_file = ((file_stat.st_mode & S_IFREG) != 0);
563   if (is_regular_file) return file;
564   fclose(file);
565   return nullptr;
566 }
567 
568 
Remove(const char * path)569 bool OS::Remove(const char* path) {
570   return (remove(path) == 0);
571 }
572 
DirectorySeparator()573 char OS::DirectorySeparator() { return '/'; }
574 
isDirectorySeparator(const char ch)575 bool OS::isDirectorySeparator(const char ch) {
576   return ch == DirectorySeparator();
577 }
578 
579 
OpenTemporaryFile()580 FILE* OS::OpenTemporaryFile() {
581   return tmpfile();
582 }
583 
584 
585 const char* const OS::LogFileOpenMode = "w";
586 
587 
Print(const char * format,...)588 void OS::Print(const char* format, ...) {
589   va_list args;
590   va_start(args, format);
591   VPrint(format, args);
592   va_end(args);
593 }
594 
595 
VPrint(const char * format,va_list args)596 void OS::VPrint(const char* format, va_list args) {
597 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
598   __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
599 #else
600   vprintf(format, args);
601 #endif
602 }
603 
604 
FPrint(FILE * out,const char * format,...)605 void OS::FPrint(FILE* out, const char* format, ...) {
606   va_list args;
607   va_start(args, format);
608   VFPrint(out, format, args);
609   va_end(args);
610 }
611 
612 
VFPrint(FILE * out,const char * format,va_list args)613 void OS::VFPrint(FILE* out, const char* format, va_list args) {
614 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
615   __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
616 #else
617   vfprintf(out, format, args);
618 #endif
619 }
620 
621 
PrintError(const char * format,...)622 void OS::PrintError(const char* format, ...) {
623   va_list args;
624   va_start(args, format);
625   VPrintError(format, args);
626   va_end(args);
627 }
628 
629 
VPrintError(const char * format,va_list args)630 void OS::VPrintError(const char* format, va_list args) {
631 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
632   __android_log_vprint(ANDROID_LOG_ERROR, LOG_TAG, format, args);
633 #else
634   vfprintf(stderr, format, args);
635 #endif
636 }
637 
638 
SNPrintF(char * str,int length,const char * format,...)639 int OS::SNPrintF(char* str, int length, const char* format, ...) {
640   va_list args;
641   va_start(args, format);
642   int result = VSNPrintF(str, length, format, args);
643   va_end(args);
644   return result;
645 }
646 
647 
VSNPrintF(char * str,int length,const char * format,va_list args)648 int OS::VSNPrintF(char* str,
649                   int length,
650                   const char* format,
651                   va_list args) {
652   int n = vsnprintf(str, length, format, args);
653   if (n < 0 || n >= length) {
654     // If the length is zero, the assignment fails.
655     if (length > 0)
656       str[length - 1] = '\0';
657     return -1;
658   } else {
659     return n;
660   }
661 }
662 
663 
664 // ----------------------------------------------------------------------------
665 // POSIX string support.
666 //
667 
StrChr(char * str,int c)668 char* OS::StrChr(char* str, int c) {
669   return strchr(str, c);
670 }
671 
672 
StrNCpy(char * dest,int length,const char * src,size_t n)673 void OS::StrNCpy(char* dest, int length, const char* src, size_t n) {
674   strncpy(dest, src, n);
675 }
676 
677 
678 // ----------------------------------------------------------------------------
679 // POSIX thread support.
680 //
681 
682 class Thread::PlatformData {
683  public:
PlatformData()684   PlatformData() : thread_(kNoThread) {}
685   pthread_t thread_;  // Thread handle for pthread.
686   // Synchronizes thread creation
687   Mutex thread_creation_mutex_;
688 };
689 
Thread(const Options & options)690 Thread::Thread(const Options& options)
691     : data_(new PlatformData),
692       stack_size_(options.stack_size()),
693       start_semaphore_(nullptr) {
694   if (stack_size_ > 0 && static_cast<size_t>(stack_size_) < PTHREAD_STACK_MIN) {
695     stack_size_ = PTHREAD_STACK_MIN;
696   }
697   set_name(options.name());
698 }
699 
700 
~Thread()701 Thread::~Thread() {
702   delete data_;
703 }
704 
705 
SetThreadName(const char * name)706 static void SetThreadName(const char* name) {
707 #if V8_OS_DRAGONFLYBSD || V8_OS_FREEBSD || V8_OS_OPENBSD || V8_OS_DRAGONFLYBSD
708   pthread_set_name_np(pthread_self(), name);
709 #elif V8_OS_NETBSD
710   STATIC_ASSERT(Thread::kMaxThreadNameLength <= PTHREAD_MAX_NAMELEN_NP);
711   pthread_setname_np(pthread_self(), "%s", name);
712 #elif V8_OS_MACOSX
713   // pthread_setname_np is only available in 10.6 or later, so test
714   // for it at runtime.
715   int (*dynamic_pthread_setname_np)(const char*);
716   *reinterpret_cast<void**>(&dynamic_pthread_setname_np) =
717     dlsym(RTLD_DEFAULT, "pthread_setname_np");
718   if (dynamic_pthread_setname_np == nullptr) return;
719 
720   // Mac OS X does not expose the length limit of the name, so hardcode it.
721   static const int kMaxNameLength = 63;
722   STATIC_ASSERT(Thread::kMaxThreadNameLength <= kMaxNameLength);
723   dynamic_pthread_setname_np(name);
724 #elif defined(PR_SET_NAME)
725   prctl(PR_SET_NAME,
726         reinterpret_cast<unsigned long>(name),  // NOLINT
727         0, 0, 0);
728 #endif
729 }
730 
731 
ThreadEntry(void * arg)732 static void* ThreadEntry(void* arg) {
733   Thread* thread = reinterpret_cast<Thread*>(arg);
734   // We take the lock here to make sure that pthread_create finished first since
735   // we don't know which thread will run first (the original thread or the new
736   // one).
737   { LockGuard<Mutex> lock_guard(&thread->data()->thread_creation_mutex_); }
738   SetThreadName(thread->name());
739   DCHECK_NE(thread->data()->thread_, kNoThread);
740   thread->NotifyStartedAndRun();
741   return nullptr;
742 }
743 
744 
set_name(const char * name)745 void Thread::set_name(const char* name) {
746   strncpy(name_, name, sizeof(name_));
747   name_[sizeof(name_) - 1] = '\0';
748 }
749 
750 
Start()751 void Thread::Start() {
752   int result;
753   pthread_attr_t attr;
754   memset(&attr, 0, sizeof(attr));
755   result = pthread_attr_init(&attr);
756   DCHECK_EQ(0, result);
757   size_t stack_size = stack_size_;
758   if (stack_size == 0) {
759 #if V8_OS_MACOSX
760     // Default on Mac OS X is 512kB -- bump up to 1MB
761     stack_size = 1 * 1024 * 1024;
762 #elif V8_OS_AIX
763     // Default on AIX is 96kB -- bump up to 2MB
764     stack_size = 2 * 1024 * 1024;
765 #endif
766   }
767   if (stack_size > 0) {
768     result = pthread_attr_setstacksize(&attr, stack_size);
769     DCHECK_EQ(0, result);
770   }
771   {
772     LockGuard<Mutex> lock_guard(&data_->thread_creation_mutex_);
773     result = pthread_create(&data_->thread_, &attr, ThreadEntry, this);
774   }
775   DCHECK_EQ(0, result);
776   result = pthread_attr_destroy(&attr);
777   DCHECK_EQ(0, result);
778   DCHECK_NE(data_->thread_, kNoThread);
779   USE(result);
780 }
781 
Join()782 void Thread::Join() { pthread_join(data_->thread_, nullptr); }
783 
PthreadKeyToLocalKey(pthread_key_t pthread_key)784 static Thread::LocalStorageKey PthreadKeyToLocalKey(pthread_key_t pthread_key) {
785 #if V8_OS_CYGWIN
786   // We need to cast pthread_key_t to Thread::LocalStorageKey in two steps
787   // because pthread_key_t is a pointer type on Cygwin. This will probably not
788   // work on 64-bit platforms, but Cygwin doesn't support 64-bit anyway.
789   STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
790   intptr_t ptr_key = reinterpret_cast<intptr_t>(pthread_key);
791   return static_cast<Thread::LocalStorageKey>(ptr_key);
792 #else
793   return static_cast<Thread::LocalStorageKey>(pthread_key);
794 #endif
795 }
796 
797 
LocalKeyToPthreadKey(Thread::LocalStorageKey local_key)798 static pthread_key_t LocalKeyToPthreadKey(Thread::LocalStorageKey local_key) {
799 #if V8_OS_CYGWIN
800   STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
801   intptr_t ptr_key = static_cast<intptr_t>(local_key);
802   return reinterpret_cast<pthread_key_t>(ptr_key);
803 #else
804   return static_cast<pthread_key_t>(local_key);
805 #endif
806 }
807 
808 
809 #ifdef V8_FAST_TLS_SUPPORTED
810 
811 static Atomic32 tls_base_offset_initialized = 0;
812 intptr_t kMacTlsBaseOffset = 0;
813 
814 // It's safe to do the initialization more that once, but it has to be
815 // done at least once.
InitializeTlsBaseOffset()816 static void InitializeTlsBaseOffset() {
817   const size_t kBufferSize = 128;
818   char buffer[kBufferSize];
819   size_t buffer_size = kBufferSize;
820   int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
821   if (sysctl(ctl_name, 2, buffer, &buffer_size, nullptr, 0) != 0) {
822     FATAL("V8 failed to get kernel version");
823   }
824   // The buffer now contains a string of the form XX.YY.ZZ, where
825   // XX is the major kernel version component.
826   // Make sure the buffer is 0-terminated.
827   buffer[kBufferSize - 1] = '\0';
828   char* period_pos = strchr(buffer, '.');
829   *period_pos = '\0';
830   int kernel_version_major =
831       static_cast<int>(strtol(buffer, nullptr, 10));  // NOLINT
832   // The constants below are taken from pthreads.s from the XNU kernel
833   // sources archive at www.opensource.apple.com.
834   if (kernel_version_major < 11) {
835     // 8.x.x (Tiger), 9.x.x (Leopard), 10.x.x (Snow Leopard) have the
836     // same offsets.
837 #if V8_HOST_ARCH_IA32
838     kMacTlsBaseOffset = 0x48;
839 #else
840     kMacTlsBaseOffset = 0x60;
841 #endif
842   } else {
843     // 11.x.x (Lion) changed the offset.
844     kMacTlsBaseOffset = 0;
845   }
846 
847   Release_Store(&tls_base_offset_initialized, 1);
848 }
849 
850 
CheckFastTls(Thread::LocalStorageKey key)851 static void CheckFastTls(Thread::LocalStorageKey key) {
852   void* expected = reinterpret_cast<void*>(0x1234CAFE);
853   Thread::SetThreadLocal(key, expected);
854   void* actual = Thread::GetExistingThreadLocal(key);
855   if (expected != actual) {
856     FATAL("V8 failed to initialize fast TLS on current kernel");
857   }
858   Thread::SetThreadLocal(key, nullptr);
859 }
860 
861 #endif  // V8_FAST_TLS_SUPPORTED
862 
863 
CreateThreadLocalKey()864 Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
865 #ifdef V8_FAST_TLS_SUPPORTED
866   bool check_fast_tls = false;
867   if (tls_base_offset_initialized == 0) {
868     check_fast_tls = true;
869     InitializeTlsBaseOffset();
870   }
871 #endif
872   pthread_key_t key;
873   int result = pthread_key_create(&key, nullptr);
874   DCHECK_EQ(0, result);
875   USE(result);
876   LocalStorageKey local_key = PthreadKeyToLocalKey(key);
877 #ifdef V8_FAST_TLS_SUPPORTED
878   // If we just initialized fast TLS support, make sure it works.
879   if (check_fast_tls) CheckFastTls(local_key);
880 #endif
881   return local_key;
882 }
883 
884 
DeleteThreadLocalKey(LocalStorageKey key)885 void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
886   pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
887   int result = pthread_key_delete(pthread_key);
888   DCHECK_EQ(0, result);
889   USE(result);
890 }
891 
892 
GetThreadLocal(LocalStorageKey key)893 void* Thread::GetThreadLocal(LocalStorageKey key) {
894   pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
895   return pthread_getspecific(pthread_key);
896 }
897 
898 
SetThreadLocal(LocalStorageKey key,void * value)899 void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
900   pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
901   int result = pthread_setspecific(pthread_key, value);
902   DCHECK_EQ(0, result);
903   USE(result);
904 }
905 
906 #undef LOG_TAG
907 #undef MAP_ANONYMOUS
908 #undef MADV_FREE
909 
910 }  // namespace base
911 }  // namespace v8
912