1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/libsampler/sampler.h"
6 
7 #if V8_OS_POSIX && !V8_OS_CYGWIN && !V8_OS_FUCHSIA
8 
9 #define USE_SIGNALS
10 
11 #include <errno.h>
12 #include <pthread.h>
13 #include <signal.h>
14 #include <sys/time.h>
15 
16 #if !V8_OS_QNX && !V8_OS_AIX
17 #include <sys/syscall.h>  // NOLINT
18 #endif
19 
20 #if V8_OS_MACOSX
21 #include <mach/mach.h>
22 // OpenBSD doesn't have <ucontext.h>. ucontext_t lives in <signal.h>
23 // and is a typedef for struct sigcontext. There is no uc_mcontext.
24 #elif(!V8_OS_ANDROID || defined(__BIONIC_HAVE_UCONTEXT_T)) && !V8_OS_OPENBSD
25 #include <ucontext.h>
26 #endif
27 
28 #include <unistd.h>
29 
30 // GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
31 // Old versions of the C library <signal.h> didn't define the type.
32 #if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
33     (defined(__arm__) || defined(__aarch64__)) && \
34     !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
35 #include <asm/sigcontext.h>  // NOLINT
36 #endif
37 
38 #elif V8_OS_WIN || V8_OS_CYGWIN
39 
40 #include "src/base/win32-headers.h"
41 
42 #elif V8_OS_FUCHSIA
43 
44 #include <zircon/process.h>
45 #include <zircon/syscalls.h>
46 #include <zircon/syscalls/debug.h>
47 #include <zircon/types.h>
48 
49 // TODO(wez): Remove this once the Fuchsia SDK has rolled.
50 #if defined(ZX_THREAD_STATE_REGSET0)
51 #define ZX_THREAD_STATE_GENERAL_REGS ZX_THREAD_STATE_REGSET0
zx_thread_read_state(zx_handle_t h,uint32_t k,void * b,size_t l)52 zx_status_t zx_thread_read_state(zx_handle_t h, uint32_t k, void* b, size_t l) {
53   uint32_t dummy_out_len = 0;
54   return zx_thread_read_state(h, k, b, static_cast<uint32_t>(l),
55                               &dummy_out_len);
56 }
57 #if defined(__x86_64__)
58 typedef zx_x86_64_general_regs_t zx_thread_state_general_regs_t;
59 #else
60 typedef zx_arm64_general_regs_t zx_thread_state_general_regs_t;
61 #endif
62 #endif  // !defined(ZX_THREAD_STATE_GENERAL_REGS)
63 
64 #endif
65 
66 #include <algorithm>
67 #include <vector>
68 #include <map>
69 
70 #include "src/base/atomic-utils.h"
71 #include "src/base/hashmap.h"
72 #include "src/base/platform/platform.h"
73 
74 #if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
75 
76 // Not all versions of Android's C library provide ucontext_t.
77 // Detect this and provide custom but compatible definitions. Note that these
78 // follow the GLibc naming convention to access register values from
79 // mcontext_t.
80 //
81 // See http://code.google.com/p/android/issues/detail?id=34784
82 
83 #if defined(__arm__)
84 
85 typedef struct sigcontext mcontext_t;
86 
87 typedef struct ucontext {
88   uint32_t uc_flags;
89   struct ucontext* uc_link;
90   stack_t uc_stack;
91   mcontext_t uc_mcontext;
92   // Other fields are not used by V8, don't define them here.
93 } ucontext_t;
94 
95 #elif defined(__aarch64__)
96 
97 typedef struct sigcontext mcontext_t;
98 
99 typedef struct ucontext {
100   uint64_t uc_flags;
101   struct ucontext *uc_link;
102   stack_t uc_stack;
103   mcontext_t uc_mcontext;
104   // Other fields are not used by V8, don't define them here.
105 } ucontext_t;
106 
107 #elif defined(__mips__)
108 // MIPS version of sigcontext, for Android bionic.
109 typedef struct {
110   uint32_t regmask;
111   uint32_t status;
112   uint64_t pc;
113   uint64_t gregs[32];
114   uint64_t fpregs[32];
115   uint32_t acx;
116   uint32_t fpc_csr;
117   uint32_t fpc_eir;
118   uint32_t used_math;
119   uint32_t dsp;
120   uint64_t mdhi;
121   uint64_t mdlo;
122   uint32_t hi1;
123   uint32_t lo1;
124   uint32_t hi2;
125   uint32_t lo2;
126   uint32_t hi3;
127   uint32_t lo3;
128 } mcontext_t;
129 
130 typedef struct ucontext {
131   uint32_t uc_flags;
132   struct ucontext* uc_link;
133   stack_t uc_stack;
134   mcontext_t uc_mcontext;
135   // Other fields are not used by V8, don't define them here.
136 } ucontext_t;
137 
138 #elif defined(__i386__)
139 // x86 version for Android.
140 typedef struct {
141   uint32_t gregs[19];
142   void* fpregs;
143   uint32_t oldmask;
144   uint32_t cr2;
145 } mcontext_t;
146 
147 typedef uint32_t kernel_sigset_t[2];  // x86 kernel uses 64-bit signal masks
148 typedef struct ucontext {
149   uint32_t uc_flags;
150   struct ucontext* uc_link;
151   stack_t uc_stack;
152   mcontext_t uc_mcontext;
153   // Other fields are not used by V8, don't define them here.
154 } ucontext_t;
155 enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 };
156 
157 #elif defined(__x86_64__)
158 // x64 version for Android.
159 typedef struct {
160   uint64_t gregs[23];
161   void* fpregs;
162   uint64_t __reserved1[8];
163 } mcontext_t;
164 
165 typedef struct ucontext {
166   uint64_t uc_flags;
167   struct ucontext *uc_link;
168   stack_t uc_stack;
169   mcontext_t uc_mcontext;
170   // Other fields are not used by V8, don't define them here.
171 } ucontext_t;
172 enum { REG_RBP = 10, REG_RSP = 15, REG_RIP = 16 };
173 #endif
174 
175 #endif  // V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
176 
177 
178 namespace v8 {
179 namespace sampler {
180 
181 namespace {
182 
183 #if defined(USE_SIGNALS)
184 typedef std::vector<Sampler*> SamplerList;
185 typedef SamplerList::iterator SamplerListIterator;
186 typedef base::AtomicValue<bool> AtomicMutex;
187 
188 class AtomicGuard {
189  public:
AtomicGuard(AtomicMutex * atomic,bool is_blocking=true)190   explicit AtomicGuard(AtomicMutex* atomic, bool is_blocking = true)
191       : atomic_(atomic), is_success_(false) {
192     do {
193       // Use Acquire_Load to gain mutual exclusion.
194       USE(atomic_->Value());
195       is_success_ = atomic_->TrySetValue(false, true);
196     } while (is_blocking && !is_success_);
197   }
198 
is_success() const199   bool is_success() const { return is_success_; }
200 
~AtomicGuard()201   ~AtomicGuard() {
202     if (!is_success_) return;
203     atomic_->SetValue(false);
204   }
205 
206  private:
207   AtomicMutex* const atomic_;
208   bool is_success_;
209 };
210 
211 // Returns key for hash map.
ThreadKey(pthread_t thread_id)212 void* ThreadKey(pthread_t thread_id) {
213   return reinterpret_cast<void*>(thread_id);
214 }
215 
216 // Returns hash value for hash map.
ThreadHash(pthread_t thread_id)217 uint32_t ThreadHash(pthread_t thread_id) {
218 #if V8_OS_BSD
219   return static_cast<uint32_t>(reinterpret_cast<intptr_t>(thread_id));
220 #else
221   return static_cast<uint32_t>(thread_id);
222 #endif
223 }
224 
225 #endif  // USE_SIGNALS
226 
227 }  // namespace
228 
229 #if defined(USE_SIGNALS)
230 
231 class Sampler::PlatformData {
232  public:
PlatformData()233   PlatformData() : vm_tid_(pthread_self()) {}
vm_tid() const234   pthread_t vm_tid() const { return vm_tid_; }
235 
236  private:
237   pthread_t vm_tid_;
238 };
239 
240 class SamplerManager {
241  public:
SamplerManager()242   SamplerManager() : sampler_map_() {}
243 
AddSampler(Sampler * sampler)244   void AddSampler(Sampler* sampler) {
245     AtomicGuard atomic_guard(&samplers_access_counter_);
246     DCHECK(sampler->IsActive() || !sampler->IsRegistered());
247     // Add sampler into map if needed.
248     pthread_t thread_id = sampler->platform_data()->vm_tid();
249     base::HashMap::Entry* entry =
250             sampler_map_.LookupOrInsert(ThreadKey(thread_id),
251                                         ThreadHash(thread_id));
252     DCHECK_NOT_NULL(entry);
253     if (entry->value == nullptr) {
254       SamplerList* samplers = new SamplerList();
255       samplers->push_back(sampler);
256       entry->value = samplers;
257     } else {
258       SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
259       bool exists = false;
260       for (SamplerListIterator iter = samplers->begin();
261            iter != samplers->end(); ++iter) {
262         if (*iter == sampler) {
263           exists = true;
264           break;
265         }
266       }
267       if (!exists) {
268         samplers->push_back(sampler);
269       }
270     }
271   }
272 
RemoveSampler(Sampler * sampler)273   void RemoveSampler(Sampler* sampler) {
274     AtomicGuard atomic_guard(&samplers_access_counter_);
275     DCHECK(sampler->IsActive() || sampler->IsRegistered());
276     // Remove sampler from map.
277     pthread_t thread_id = sampler->platform_data()->vm_tid();
278     void* thread_key = ThreadKey(thread_id);
279     uint32_t thread_hash = ThreadHash(thread_id);
280     base::HashMap::Entry* entry = sampler_map_.Lookup(thread_key, thread_hash);
281     DCHECK_NOT_NULL(entry);
282     SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
283     for (SamplerListIterator iter = samplers->begin(); iter != samplers->end();
284          ++iter) {
285       if (*iter == sampler) {
286         samplers->erase(iter);
287         break;
288       }
289     }
290     if (samplers->empty()) {
291       sampler_map_.Remove(thread_key, thread_hash);
292       delete samplers;
293     }
294   }
295 
296 #if defined(USE_SIGNALS)
DoSample(const v8::RegisterState & state)297   void DoSample(const v8::RegisterState& state) {
298     AtomicGuard atomic_guard(&SamplerManager::samplers_access_counter_, false);
299     if (!atomic_guard.is_success()) return;
300     pthread_t thread_id = pthread_self();
301     base::HashMap::Entry* entry =
302         sampler_map_.Lookup(ThreadKey(thread_id), ThreadHash(thread_id));
303     if (!entry) return;
304     SamplerList& samplers = *static_cast<SamplerList*>(entry->value);
305 
306     for (size_t i = 0; i < samplers.size(); ++i) {
307       Sampler* sampler = samplers[i];
308       Isolate* isolate = sampler->isolate();
309       // We require a fully initialized and entered isolate.
310       if (isolate == nullptr || !isolate->IsInUse()) continue;
311       if (v8::Locker::IsActive() && !Locker::IsLocked(isolate)) continue;
312       sampler->SampleStack(state);
313     }
314   }
315 #endif
316 
instance()317   static SamplerManager* instance() { return instance_.Pointer(); }
318 
319  private:
320   base::HashMap sampler_map_;
321   static AtomicMutex samplers_access_counter_;
322   static base::LazyInstance<SamplerManager>::type instance_;
323 };
324 
325 AtomicMutex SamplerManager::samplers_access_counter_;
326 base::LazyInstance<SamplerManager>::type SamplerManager::instance_ =
327     LAZY_INSTANCE_INITIALIZER;
328 
329 #elif V8_OS_WIN || V8_OS_CYGWIN
330 
331 // ----------------------------------------------------------------------------
332 // Win32 profiler support. On Cygwin we use the same sampler implementation as
333 // on Win32.
334 
335 class Sampler::PlatformData {
336  public:
337   // Get a handle to the calling thread. This is the thread that we are
338   // going to profile. We need to make a copy of the handle because we are
339   // going to use it in the sampler thread. Using GetThreadHandle() will
340   // not work in this case. We're using OpenThread because DuplicateHandle
341   // for some reason doesn't work in Chrome's sandbox.
PlatformData()342   PlatformData()
343       : profiled_thread_(OpenThread(THREAD_GET_CONTEXT |
344                                     THREAD_SUSPEND_RESUME |
345                                     THREAD_QUERY_INFORMATION,
346                                     false,
347                                     GetCurrentThreadId())) {}
348 
~PlatformData()349   ~PlatformData() {
350     if (profiled_thread_ != nullptr) {
351       CloseHandle(profiled_thread_);
352       profiled_thread_ = nullptr;
353     }
354   }
355 
profiled_thread()356   HANDLE profiled_thread() { return profiled_thread_; }
357 
358  private:
359   HANDLE profiled_thread_;
360 };
361 
362 #elif V8_OS_FUCHSIA
363 
364 class Sampler::PlatformData {
365  public:
PlatformData()366   PlatformData() {
367     zx_handle_duplicate(zx_thread_self(), ZX_RIGHT_SAME_RIGHTS,
368                         &profiled_thread_);
369   }
~PlatformData()370   ~PlatformData() {
371     if (profiled_thread_ != ZX_HANDLE_INVALID) {
372       zx_handle_close(profiled_thread_);
373       profiled_thread_ = ZX_HANDLE_INVALID;
374     }
375   }
376 
profiled_thread()377   zx_handle_t profiled_thread() { return profiled_thread_; }
378 
379  private:
380   zx_handle_t profiled_thread_ = ZX_HANDLE_INVALID;
381 };
382 
383 #endif  // USE_SIGNALS
384 
385 
386 #if defined(USE_SIGNALS)
387 class SignalHandler {
388  public:
SetUp()389   static void SetUp() { if (!mutex_) mutex_ = new base::Mutex(); }
TearDown()390   static void TearDown() {
391     delete mutex_;
392     mutex_ = nullptr;
393   }
394 
IncreaseSamplerCount()395   static void IncreaseSamplerCount() {
396     base::LockGuard<base::Mutex> lock_guard(mutex_);
397     if (++client_count_ == 1) Install();
398   }
399 
DecreaseSamplerCount()400   static void DecreaseSamplerCount() {
401     base::LockGuard<base::Mutex> lock_guard(mutex_);
402     if (--client_count_ == 0) Restore();
403   }
404 
Installed()405   static bool Installed() {
406     base::LockGuard<base::Mutex> lock_guard(mutex_);
407     return signal_handler_installed_;
408   }
409 
410  private:
Install()411   static void Install() {
412     struct sigaction sa;
413     sa.sa_sigaction = &HandleProfilerSignal;
414     sigemptyset(&sa.sa_mask);
415 #if V8_OS_QNX
416     sa.sa_flags = SA_SIGINFO;
417 #else
418     sa.sa_flags = SA_RESTART | SA_SIGINFO;
419 #endif
420     signal_handler_installed_ =
421         (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
422   }
423 
Restore()424   static void Restore() {
425     if (signal_handler_installed_) {
426       sigaction(SIGPROF, &old_signal_handler_, 0);
427       signal_handler_installed_ = false;
428     }
429   }
430 
431   static void FillRegisterState(void* context, RegisterState* regs);
432   static void HandleProfilerSignal(int signal, siginfo_t* info, void* context);
433 
434   // Protects the process wide state below.
435   static base::Mutex* mutex_;
436   static int client_count_;
437   static bool signal_handler_installed_;
438   static struct sigaction old_signal_handler_;
439 };
440 
441 base::Mutex* SignalHandler::mutex_ = nullptr;
442 int SignalHandler::client_count_ = 0;
443 struct sigaction SignalHandler::old_signal_handler_;
444 bool SignalHandler::signal_handler_installed_ = false;
445 
446 
HandleProfilerSignal(int signal,siginfo_t * info,void * context)447 void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
448                                          void* context) {
449   USE(info);
450   if (signal != SIGPROF) return;
451   v8::RegisterState state;
452   FillRegisterState(context, &state);
453   SamplerManager::instance()->DoSample(state);
454 }
455 
FillRegisterState(void * context,RegisterState * state)456 void SignalHandler::FillRegisterState(void* context, RegisterState* state) {
457   // Extracting the sample from the context is extremely machine dependent.
458   ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
459 #if !(V8_OS_OPENBSD || (V8_OS_LINUX && (V8_HOST_ARCH_PPC || V8_HOST_ARCH_S390)))
460   mcontext_t& mcontext = ucontext->uc_mcontext;
461 #endif
462 #if V8_OS_LINUX
463 #if V8_HOST_ARCH_IA32
464   state->pc = reinterpret_cast<void*>(mcontext.gregs[REG_EIP]);
465   state->sp = reinterpret_cast<void*>(mcontext.gregs[REG_ESP]);
466   state->fp = reinterpret_cast<void*>(mcontext.gregs[REG_EBP]);
467 #elif V8_HOST_ARCH_X64
468   state->pc = reinterpret_cast<void*>(mcontext.gregs[REG_RIP]);
469   state->sp = reinterpret_cast<void*>(mcontext.gregs[REG_RSP]);
470   state->fp = reinterpret_cast<void*>(mcontext.gregs[REG_RBP]);
471 #elif V8_HOST_ARCH_ARM
472 #if V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4)
473   // Old GLibc ARM versions used a gregs[] array to access the register
474   // values from mcontext_t.
475   state->pc = reinterpret_cast<void*>(mcontext.gregs[R15]);
476   state->sp = reinterpret_cast<void*>(mcontext.gregs[R13]);
477   state->fp = reinterpret_cast<void*>(mcontext.gregs[R11]);
478 #else
479   state->pc = reinterpret_cast<void*>(mcontext.arm_pc);
480   state->sp = reinterpret_cast<void*>(mcontext.arm_sp);
481   state->fp = reinterpret_cast<void*>(mcontext.arm_fp);
482 #endif  // V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4)
483 #elif V8_HOST_ARCH_ARM64
484   state->pc = reinterpret_cast<void*>(mcontext.pc);
485   state->sp = reinterpret_cast<void*>(mcontext.sp);
486   // FP is an alias for x29.
487   state->fp = reinterpret_cast<void*>(mcontext.regs[29]);
488 #elif V8_HOST_ARCH_MIPS
489   state->pc = reinterpret_cast<void*>(mcontext.pc);
490   state->sp = reinterpret_cast<void*>(mcontext.gregs[29]);
491   state->fp = reinterpret_cast<void*>(mcontext.gregs[30]);
492 #elif V8_HOST_ARCH_MIPS64
493   state->pc = reinterpret_cast<void*>(mcontext.pc);
494   state->sp = reinterpret_cast<void*>(mcontext.gregs[29]);
495   state->fp = reinterpret_cast<void*>(mcontext.gregs[30]);
496 #elif V8_HOST_ARCH_PPC
497 #if V8_LIBC_GLIBC
498   state->pc = reinterpret_cast<void*>(ucontext->uc_mcontext.regs->nip);
499   state->sp =
500       reinterpret_cast<void*>(ucontext->uc_mcontext.regs->gpr[PT_R1]);
501   state->fp =
502       reinterpret_cast<void*>(ucontext->uc_mcontext.regs->gpr[PT_R31]);
503 #else
504   // Some C libraries, notably Musl, define the regs member as a void pointer
505   state->pc = reinterpret_cast<void*>(ucontext->uc_mcontext.gp_regs[32]);
506   state->sp = reinterpret_cast<void*>(ucontext->uc_mcontext.gp_regs[1]);
507   state->fp = reinterpret_cast<void*>(ucontext->uc_mcontext.gp_regs[31]);
508 #endif
509 #elif V8_HOST_ARCH_S390
510 #if V8_TARGET_ARCH_32_BIT
511   // 31-bit target will have bit 0 (MSB) of the PSW set to denote addressing
512   // mode.  This bit needs to be masked out to resolve actual address.
513   state->pc =
514       reinterpret_cast<void*>(ucontext->uc_mcontext.psw.addr & 0x7FFFFFFF);
515 #else
516   state->pc = reinterpret_cast<void*>(ucontext->uc_mcontext.psw.addr);
517 #endif  // V8_TARGET_ARCH_32_BIT
518   state->sp = reinterpret_cast<void*>(ucontext->uc_mcontext.gregs[15]);
519   state->fp = reinterpret_cast<void*>(ucontext->uc_mcontext.gregs[11]);
520 #endif  // V8_HOST_ARCH_*
521 #elif V8_OS_MACOSX
522 #if V8_HOST_ARCH_X64
523 #if __DARWIN_UNIX03
524   state->pc = reinterpret_cast<void*>(mcontext->__ss.__rip);
525   state->sp = reinterpret_cast<void*>(mcontext->__ss.__rsp);
526   state->fp = reinterpret_cast<void*>(mcontext->__ss.__rbp);
527 #else  // !__DARWIN_UNIX03
528   state->pc = reinterpret_cast<void*>(mcontext->ss.rip);
529   state->sp = reinterpret_cast<void*>(mcontext->ss.rsp);
530   state->fp = reinterpret_cast<void*>(mcontext->ss.rbp);
531 #endif  // __DARWIN_UNIX03
532 #elif V8_HOST_ARCH_IA32
533 #if __DARWIN_UNIX03
534   state->pc = reinterpret_cast<void*>(mcontext->__ss.__eip);
535   state->sp = reinterpret_cast<void*>(mcontext->__ss.__esp);
536   state->fp = reinterpret_cast<void*>(mcontext->__ss.__ebp);
537 #else  // !__DARWIN_UNIX03
538   state->pc = reinterpret_cast<void*>(mcontext->ss.eip);
539   state->sp = reinterpret_cast<void*>(mcontext->ss.esp);
540   state->fp = reinterpret_cast<void*>(mcontext->ss.ebp);
541 #endif  // __DARWIN_UNIX03
542 #endif  // V8_HOST_ARCH_IA32
543 #elif V8_OS_FREEBSD
544 #if V8_HOST_ARCH_IA32
545   state->pc = reinterpret_cast<void*>(mcontext.mc_eip);
546   state->sp = reinterpret_cast<void*>(mcontext.mc_esp);
547   state->fp = reinterpret_cast<void*>(mcontext.mc_ebp);
548 #elif V8_HOST_ARCH_X64
549   state->pc = reinterpret_cast<void*>(mcontext.mc_rip);
550   state->sp = reinterpret_cast<void*>(mcontext.mc_rsp);
551   state->fp = reinterpret_cast<void*>(mcontext.mc_rbp);
552 #elif V8_HOST_ARCH_ARM
553   state->pc = reinterpret_cast<void*>(mcontext.__gregs[_REG_PC]);
554   state->sp = reinterpret_cast<void*>(mcontext.__gregs[_REG_SP]);
555   state->fp = reinterpret_cast<void*>(mcontext.__gregs[_REG_FP]);
556 #elif V8_TARGET_ARCH_PPC64
557   state->pc = reinterpret_cast<void*>(mcontext.mc_srr0);
558   state->sp = reinterpret_cast<void*>(mcontext.mc_frame[1]);
559   state->fp = reinterpret_cast<void*>(mcontext.mc_frame[31]);
560 #endif  // V8_HOST_ARCH_*
561 #elif V8_OS_NETBSD
562 #if V8_HOST_ARCH_IA32
563   state->pc = reinterpret_cast<void*>(mcontext.__gregs[_REG_EIP]);
564   state->sp = reinterpret_cast<void*>(mcontext.__gregs[_REG_ESP]);
565   state->fp = reinterpret_cast<void*>(mcontext.__gregs[_REG_EBP]);
566 #elif V8_HOST_ARCH_X64
567   state->pc = reinterpret_cast<void*>(mcontext.__gregs[_REG_RIP]);
568   state->sp = reinterpret_cast<void*>(mcontext.__gregs[_REG_RSP]);
569   state->fp = reinterpret_cast<void*>(mcontext.__gregs[_REG_RBP]);
570 #endif  // V8_HOST_ARCH_*
571 #elif V8_OS_OPENBSD
572 #if V8_HOST_ARCH_IA32
573   state->pc = reinterpret_cast<void*>(ucontext->sc_eip);
574   state->sp = reinterpret_cast<void*>(ucontext->sc_esp);
575   state->fp = reinterpret_cast<void*>(ucontext->sc_ebp);
576 #elif V8_HOST_ARCH_X64
577   state->pc = reinterpret_cast<void*>(ucontext->sc_rip);
578   state->sp = reinterpret_cast<void*>(ucontext->sc_rsp);
579   state->fp = reinterpret_cast<void*>(ucontext->sc_rbp);
580 #endif  // V8_HOST_ARCH_*
581 #elif V8_OS_SOLARIS
582   state->pc = reinterpret_cast<void*>(mcontext.gregs[REG_PC]);
583   state->sp = reinterpret_cast<void*>(mcontext.gregs[REG_SP]);
584   state->fp = reinterpret_cast<void*>(mcontext.gregs[REG_FP]);
585 #elif V8_OS_QNX
586 #if V8_HOST_ARCH_IA32
587   state->pc = reinterpret_cast<void*>(mcontext.cpu.eip);
588   state->sp = reinterpret_cast<void*>(mcontext.cpu.esp);
589   state->fp = reinterpret_cast<void*>(mcontext.cpu.ebp);
590 #elif V8_HOST_ARCH_ARM
591   state->pc = reinterpret_cast<void*>(mcontext.cpu.gpr[ARM_REG_PC]);
592   state->sp = reinterpret_cast<void*>(mcontext.cpu.gpr[ARM_REG_SP]);
593   state->fp = reinterpret_cast<void*>(mcontext.cpu.gpr[ARM_REG_FP]);
594 #endif  // V8_HOST_ARCH_*
595 #elif V8_OS_AIX
596   state->pc = reinterpret_cast<void*>(mcontext.jmp_context.iar);
597   state->sp = reinterpret_cast<void*>(mcontext.jmp_context.gpr[1]);
598   state->fp = reinterpret_cast<void*>(mcontext.jmp_context.gpr[31]);
599 #endif  // V8_OS_AIX
600 }
601 
602 #endif  // USE_SIGNALS
603 
604 
SetUp()605 void Sampler::SetUp() {
606 #if defined(USE_SIGNALS)
607   SignalHandler::SetUp();
608 #endif
609 }
610 
611 
TearDown()612 void Sampler::TearDown() {
613 #if defined(USE_SIGNALS)
614   SignalHandler::TearDown();
615 #endif
616 }
617 
Sampler(Isolate * isolate)618 Sampler::Sampler(Isolate* isolate)
619     : is_counting_samples_(false),
620       js_sample_count_(0),
621       external_sample_count_(0),
622       isolate_(isolate),
623       profiling_(false),
624       has_processing_thread_(false),
625       active_(false),
626       registered_(false) {
627   data_ = new PlatformData;
628 }
629 
~Sampler()630 Sampler::~Sampler() {
631   DCHECK(!IsActive());
632 #if defined(USE_SIGNALS)
633   if (IsRegistered()) {
634     SamplerManager::instance()->RemoveSampler(this);
635   }
636 #endif
637   delete data_;
638 }
639 
Start()640 void Sampler::Start() {
641   DCHECK(!IsActive());
642   SetActive(true);
643 #if defined(USE_SIGNALS)
644   SamplerManager::instance()->AddSampler(this);
645 #endif
646 }
647 
648 
Stop()649 void Sampler::Stop() {
650 #if defined(USE_SIGNALS)
651   SamplerManager::instance()->RemoveSampler(this);
652 #endif
653   DCHECK(IsActive());
654   SetActive(false);
655   SetRegistered(false);
656 }
657 
658 
IncreaseProfilingDepth()659 void Sampler::IncreaseProfilingDepth() {
660   base::Relaxed_AtomicIncrement(&profiling_, 1);
661 #if defined(USE_SIGNALS)
662   SignalHandler::IncreaseSamplerCount();
663 #endif
664 }
665 
666 
DecreaseProfilingDepth()667 void Sampler::DecreaseProfilingDepth() {
668 #if defined(USE_SIGNALS)
669   SignalHandler::DecreaseSamplerCount();
670 #endif
671   base::Relaxed_AtomicIncrement(&profiling_, -1);
672 }
673 
674 
675 #if defined(USE_SIGNALS)
676 
DoSample()677 void Sampler::DoSample() {
678   if (!SignalHandler::Installed()) return;
679   if (!IsActive() && !IsRegistered()) {
680     SamplerManager::instance()->AddSampler(this);
681     SetRegistered(true);
682   }
683   pthread_kill(platform_data()->vm_tid(), SIGPROF);
684 }
685 
686 #elif V8_OS_WIN || V8_OS_CYGWIN
687 
DoSample()688 void Sampler::DoSample() {
689   HANDLE profiled_thread = platform_data()->profiled_thread();
690   if (profiled_thread == nullptr) return;
691 
692   const DWORD kSuspendFailed = static_cast<DWORD>(-1);
693   if (SuspendThread(profiled_thread) == kSuspendFailed) return;
694 
695   // Context used for sampling the register state of the profiled thread.
696   CONTEXT context;
697   memset(&context, 0, sizeof(context));
698   context.ContextFlags = CONTEXT_FULL;
699   if (GetThreadContext(profiled_thread, &context) != 0) {
700     v8::RegisterState state;
701 #if V8_HOST_ARCH_X64
702     state.pc = reinterpret_cast<void*>(context.Rip);
703     state.sp = reinterpret_cast<void*>(context.Rsp);
704     state.fp = reinterpret_cast<void*>(context.Rbp);
705 #else
706     state.pc = reinterpret_cast<void*>(context.Eip);
707     state.sp = reinterpret_cast<void*>(context.Esp);
708     state.fp = reinterpret_cast<void*>(context.Ebp);
709 #endif
710     SampleStack(state);
711   }
712   ResumeThread(profiled_thread);
713 }
714 
715 #elif V8_OS_FUCHSIA
716 
DoSample()717 void Sampler::DoSample() {
718   zx_handle_t profiled_thread = platform_data()->profiled_thread();
719   if (profiled_thread == ZX_HANDLE_INVALID) return;
720 
721   if (zx_task_suspend(profiled_thread) != ZX_OK) return;
722 
723   // Wait for the target thread to become suspended, or to exit.
724   // TODO(wez): There is currently no suspension count for threads, so there
725   // is a risk that some other caller resumes the thread in-between our suspend
726   // and wait calls, causing us to miss the SUSPENDED signal. We apply a 100ms
727   // deadline to protect against hanging the sampler thread in this case.
728   zx_signals_t signals = 0;
729   zx_status_t suspended = zx_object_wait_one(
730       profiled_thread, ZX_THREAD_SUSPENDED | ZX_THREAD_TERMINATED,
731       zx_deadline_after(ZX_MSEC(100)), &signals);
732   if (suspended != ZX_OK || (signals & ZX_THREAD_SUSPENDED) == 0) {
733     zx_task_resume(profiled_thread, 0);
734     return;
735   }
736 
737   // Fetch a copy of its "general register" states.
738   zx_thread_state_general_regs_t thread_state = {};
739   if (zx_thread_read_state(profiled_thread, ZX_THREAD_STATE_GENERAL_REGS,
740                            &thread_state, sizeof(thread_state)) == ZX_OK) {
741     v8::RegisterState state;
742 #if V8_HOST_ARCH_X64
743     state.pc = reinterpret_cast<void*>(thread_state.rip);
744     state.sp = reinterpret_cast<void*>(thread_state.rsp);
745     state.fp = reinterpret_cast<void*>(thread_state.rbp);
746 #elif V8_HOST_ARCH_ARM64
747     state.pc = reinterpret_cast<void*>(thread_state.pc);
748     state.sp = reinterpret_cast<void*>(thread_state.sp);
749     state.fp = reinterpret_cast<void*>(thread_state.r[29]);
750 #endif
751     SampleStack(state);
752   }
753 
754   zx_task_resume(profiled_thread, 0);
755 }
756 
757 // TODO(wez): Remove this once the Fuchsia SDK has rolled.
758 #if defined(ZX_THREAD_STATE_REGSET0)
759 #undef ZX_THREAD_STATE_GENERAL_REGS
760 #endif
761 
762 #endif  // USE_SIGNALS
763 
764 }  // namespace sampler
765 }  // namespace v8
766