1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_V8_PLATFORM_H_
6 #define V8_V8_PLATFORM_H_
7 
8 #include <stddef.h>
9 #include <stdint.h>
10 #include <stdlib.h>  // For abort.
11 #include <memory>
12 #include <string>
13 
14 #include "v8config.h"  // NOLINT(build/include_directory)
15 
16 namespace v8 {
17 
18 class Isolate;
19 
20 // Valid priorities supported by the task scheduling infrastructure.
21 enum class TaskPriority : uint8_t {
22   /**
23    * Best effort tasks are not critical for performance of the application. The
24    * platform implementation should preempt such tasks if higher priority tasks
25    * arrive.
26    */
27   kBestEffort,
28   /**
29    * User visible tasks are long running background tasks that will
30    * improve performance and memory usage of the application upon completion.
31    * Example: background compilation and garbage collection.
32    */
33   kUserVisible,
34   /**
35    * User blocking tasks are highest priority tasks that block the execution
36    * thread (e.g. major garbage collection). They must be finished as soon as
37    * possible.
38    */
39   kUserBlocking,
40 };
41 
42 /**
43  * A Task represents a unit of work.
44  */
45 class Task {
46  public:
47   virtual ~Task() = default;
48 
49   virtual void Run() = 0;
50 };
51 
52 /**
53  * An IdleTask represents a unit of work to be performed in idle time.
54  * The Run method is invoked with an argument that specifies the deadline in
55  * seconds returned by MonotonicallyIncreasingTime().
56  * The idle task is expected to complete by this deadline.
57  */
58 class IdleTask {
59  public:
60   virtual ~IdleTask() = default;
61   virtual void Run(double deadline_in_seconds) = 0;
62 };
63 
64 /**
65  * A TaskRunner allows scheduling of tasks. The TaskRunner may still be used to
66  * post tasks after the isolate gets destructed, but these tasks may not get
67  * executed anymore. All tasks posted to a given TaskRunner will be invoked in
68  * sequence. Tasks can be posted from any thread.
69  */
70 class TaskRunner {
71  public:
72   /**
73    * Schedules a task to be invoked by this TaskRunner. The TaskRunner
74    * implementation takes ownership of |task|.
75    */
76   virtual void PostTask(std::unique_ptr<Task> task) = 0;
77 
78   /**
79    * Schedules a task to be invoked by this TaskRunner. The TaskRunner
80    * implementation takes ownership of |task|. The |task| cannot be nested
81    * within other task executions.
82    *
83    * Tasks which shouldn't be interleaved with JS execution must be posted with
84    * |PostNonNestableTask| or |PostNonNestableDelayedTask|. This is because the
85    * embedder may process tasks in a callback which is called during JS
86    * execution.
87    *
88    * In particular, tasks which execute JS must be non-nestable, since JS
89    * execution is not allowed to nest.
90    *
91    * Requires that |TaskRunner::NonNestableTasksEnabled()| is true.
92    */
PostNonNestableTask(std::unique_ptr<Task> task)93   virtual void PostNonNestableTask(std::unique_ptr<Task> task) {}
94 
95   /**
96    * Schedules a task to be invoked by this TaskRunner. The task is scheduled
97    * after the given number of seconds |delay_in_seconds|. The TaskRunner
98    * implementation takes ownership of |task|.
99    */
100   virtual void PostDelayedTask(std::unique_ptr<Task> task,
101                                double delay_in_seconds) = 0;
102 
103   /**
104    * Schedules a task to be invoked by this TaskRunner. The task is scheduled
105    * after the given number of seconds |delay_in_seconds|. The TaskRunner
106    * implementation takes ownership of |task|. The |task| cannot be nested
107    * within other task executions.
108    *
109    * Tasks which shouldn't be interleaved with JS execution must be posted with
110    * |PostNonNestableTask| or |PostNonNestableDelayedTask|. This is because the
111    * embedder may process tasks in a callback which is called during JS
112    * execution.
113    *
114    * In particular, tasks which execute JS must be non-nestable, since JS
115    * execution is not allowed to nest.
116    *
117    * Requires that |TaskRunner::NonNestableDelayedTasksEnabled()| is true.
118    */
PostNonNestableDelayedTask(std::unique_ptr<Task> task,double delay_in_seconds)119   virtual void PostNonNestableDelayedTask(std::unique_ptr<Task> task,
120                                           double delay_in_seconds) {}
121 
122   /**
123    * Schedules an idle task to be invoked by this TaskRunner. The task is
124    * scheduled when the embedder is idle. Requires that
125    * |TaskRunner::IdleTasksEnabled()| is true. Idle tasks may be reordered
126    * relative to other task types and may be starved for an arbitrarily long
127    * time if no idle time is available. The TaskRunner implementation takes
128    * ownership of |task|.
129    */
130   virtual void PostIdleTask(std::unique_ptr<IdleTask> task) = 0;
131 
132   /**
133    * Returns true if idle tasks are enabled for this TaskRunner.
134    */
135   virtual bool IdleTasksEnabled() = 0;
136 
137   /**
138    * Returns true if non-nestable tasks are enabled for this TaskRunner.
139    */
NonNestableTasksEnabled()140   virtual bool NonNestableTasksEnabled() const { return false; }
141 
142   /**
143    * Returns true if non-nestable delayed tasks are enabled for this TaskRunner.
144    */
NonNestableDelayedTasksEnabled()145   virtual bool NonNestableDelayedTasksEnabled() const { return false; }
146 
147   TaskRunner() = default;
148   virtual ~TaskRunner() = default;
149 
150   TaskRunner(const TaskRunner&) = delete;
151   TaskRunner& operator=(const TaskRunner&) = delete;
152 };
153 
154 /**
155  * Delegate that's passed to Job's worker task, providing an entry point to
156  * communicate with the scheduler.
157  */
158 class JobDelegate {
159  public:
160   /**
161    * Returns true if this thread should return from the worker task on the
162    * current thread ASAP. Workers should periodically invoke ShouldYield (or
163    * YieldIfNeeded()) as often as is reasonable.
164    */
165   virtual bool ShouldYield() = 0;
166 
167   /**
168    * Notifies the scheduler that max concurrency was increased, and the number
169    * of worker should be adjusted accordingly. See Platform::PostJob() for more
170    * details.
171    */
172   virtual void NotifyConcurrencyIncrease() = 0;
173 
174   /**
175    * Returns a task_id unique among threads currently running this job, such
176    * that GetTaskId() < worker count. To achieve this, the same task_id may be
177    * reused by a different thread after a worker_task returns.
178    */
179   virtual uint8_t GetTaskId() = 0;
180 
181   /**
182    * Returns true if the current task is called from the thread currently
183    * running JobHandle::Join().
184    * TODO(etiennep): Make pure virtual once custom embedders implement it.
185    */
IsJoiningThread()186   virtual bool IsJoiningThread() const { return false; }
187 };
188 
189 /**
190  * Handle returned when posting a Job. Provides methods to control execution of
191  * the posted Job.
192  */
193 class JobHandle {
194  public:
195   virtual ~JobHandle() = default;
196 
197   /**
198    * Notifies the scheduler that max concurrency was increased, and the number
199    * of worker should be adjusted accordingly. See Platform::PostJob() for more
200    * details.
201    */
202   virtual void NotifyConcurrencyIncrease() = 0;
203 
204   /**
205    * Contributes to the job on this thread. Doesn't return until all tasks have
206    * completed and max concurrency becomes 0. When Join() is called and max
207    * concurrency reaches 0, it should not increase again. This also promotes
208    * this Job's priority to be at least as high as the calling thread's
209    * priority.
210    */
211   virtual void Join() = 0;
212 
213   /**
214    * Forces all existing workers to yield ASAP. Waits until they have all
215    * returned from the Job's callback before returning.
216    */
217   virtual void Cancel() = 0;
218 
219   /*
220    * Forces all existing workers to yield ASAP but doesn’t wait for them.
221    * Warning, this is dangerous if the Job's callback is bound to or has access
222    * to state which may be deleted after this call.
223    * TODO(etiennep): Cleanup once implemented by all embedders.
224    */
CancelAndDetach()225   virtual void CancelAndDetach() { Cancel(); }
226 
227   /**
228    * Returns true if there's currently no work pending and no worker running.
229    * TODO(etiennep): Deprecate IsCompleted in favor of IsActive once implemented
230    * by all embedders.
231    */
232   virtual bool IsCompleted() = 0;
IsActive()233   virtual bool IsActive() { return !IsCompleted(); }
234 
235   /**
236    * Returns true if associated with a Job and other methods may be called.
237    * Returns false after Join() or Cancel() was called. This may return true
238    * even if no workers are running and IsCompleted() returns true
239    * TODO(etiennep): Deprecate IsRunning in favor of IsValid once implemented by
240    * all embedders.
241    */
242   virtual bool IsRunning() = 0;
IsValid()243   virtual bool IsValid() { return IsRunning(); }
244 
245   /**
246    * Returns true if job priority can be changed.
247    */
UpdatePriorityEnabled()248   virtual bool UpdatePriorityEnabled() const { return false; }
249 
250   /**
251    *  Update this Job's priority.
252    */
UpdatePriority(TaskPriority new_priority)253   virtual void UpdatePriority(TaskPriority new_priority) {}
254 };
255 
256 /**
257  * A JobTask represents work to run in parallel from Platform::PostJob().
258  */
259 class JobTask {
260  public:
261   virtual ~JobTask() = default;
262 
263   virtual void Run(JobDelegate* delegate) = 0;
264 
265   /**
266    * Controls the maximum number of threads calling Run() concurrently, given
267    * the number of threads currently assigned to this job and executing Run().
268    * Run() is only invoked if the number of threads previously running Run() was
269    * less than the value returned. Since GetMaxConcurrency() is a leaf function,
270    * it must not call back any JobHandle methods.
271    */
272   virtual size_t GetMaxConcurrency(size_t worker_count) const = 0;
273 
274   // TODO(1114823): Clean up once all overrides are removed.
275   V8_DEPRECATED("Use the version that takes |worker_count|.")
GetMaxConcurrency()276   virtual size_t GetMaxConcurrency() const { return 0; }
277 };
278 
279 /**
280  * The interface represents complex arguments to trace events.
281  */
282 class ConvertableToTraceFormat {
283  public:
284   virtual ~ConvertableToTraceFormat() = default;
285 
286   /**
287    * Append the class info to the provided |out| string. The appended
288    * data must be a valid JSON object. Strings must be properly quoted, and
289    * escaped. There is no processing applied to the content after it is
290    * appended.
291    */
292   virtual void AppendAsTraceFormat(std::string* out) const = 0;
293 };
294 
295 /**
296  * V8 Tracing controller.
297  *
298  * Can be implemented by an embedder to record trace events from V8.
299  */
300 class TracingController {
301  public:
302   virtual ~TracingController() = default;
303 
304   // In Perfetto mode, trace events are written using Perfetto's Track Event
305   // API directly without going through the embedder. However, it is still
306   // possible to observe tracing being enabled and disabled.
307 #if !defined(V8_USE_PERFETTO)
308   /**
309    * Called by TRACE_EVENT* macros, don't call this directly.
310    * The name parameter is a category group for example:
311    * TRACE_EVENT0("v8,parse", "V8.Parse")
312    * The pointer returned points to a value with zero or more of the bits
313    * defined in CategoryGroupEnabledFlags.
314    **/
GetCategoryGroupEnabled(const char * name)315   virtual const uint8_t* GetCategoryGroupEnabled(const char* name) {
316     static uint8_t no = 0;
317     return &no;
318   }
319 
320   /**
321    * Adds a trace event to the platform tracing system. These function calls are
322    * usually the result of a TRACE_* macro from trace_event_common.h when
323    * tracing and the category of the particular trace are enabled. It is not
324    * advisable to call these functions on their own; they are really only meant
325    * to be used by the trace macros. The returned handle can be used by
326    * UpdateTraceEventDuration to update the duration of COMPLETE events.
327    */
AddTraceEvent(char phase,const uint8_t * category_enabled_flag,const char * name,const char * scope,uint64_t id,uint64_t bind_id,int32_t num_args,const char ** arg_names,const uint8_t * arg_types,const uint64_t * arg_values,std::unique_ptr<ConvertableToTraceFormat> * arg_convertables,unsigned int flags)328   virtual uint64_t AddTraceEvent(
329       char phase, const uint8_t* category_enabled_flag, const char* name,
330       const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
331       const char** arg_names, const uint8_t* arg_types,
332       const uint64_t* arg_values,
333       std::unique_ptr<ConvertableToTraceFormat>* arg_convertables,
334       unsigned int flags) {
335     return 0;
336   }
AddTraceEventWithTimestamp(char phase,const uint8_t * category_enabled_flag,const char * name,const char * scope,uint64_t id,uint64_t bind_id,int32_t num_args,const char ** arg_names,const uint8_t * arg_types,const uint64_t * arg_values,std::unique_ptr<ConvertableToTraceFormat> * arg_convertables,unsigned int flags,int64_t timestamp)337   virtual uint64_t AddTraceEventWithTimestamp(
338       char phase, const uint8_t* category_enabled_flag, const char* name,
339       const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
340       const char** arg_names, const uint8_t* arg_types,
341       const uint64_t* arg_values,
342       std::unique_ptr<ConvertableToTraceFormat>* arg_convertables,
343       unsigned int flags, int64_t timestamp) {
344     return 0;
345   }
346 
347   /**
348    * Sets the duration field of a COMPLETE trace event. It must be called with
349    * the handle returned from AddTraceEvent().
350    **/
UpdateTraceEventDuration(const uint8_t * category_enabled_flag,const char * name,uint64_t handle)351   virtual void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
352                                         const char* name, uint64_t handle) {}
353 #endif  // !defined(V8_USE_PERFETTO)
354 
355   class TraceStateObserver {
356    public:
357     virtual ~TraceStateObserver() = default;
358     virtual void OnTraceEnabled() = 0;
359     virtual void OnTraceDisabled() = 0;
360   };
361 
362   /** Adds tracing state change observer. */
AddTraceStateObserver(TraceStateObserver *)363   virtual void AddTraceStateObserver(TraceStateObserver*) {}
364 
365   /** Removes tracing state change observer. */
RemoveTraceStateObserver(TraceStateObserver *)366   virtual void RemoveTraceStateObserver(TraceStateObserver*) {}
367 };
368 
369 /**
370  * A V8 memory page allocator.
371  *
372  * Can be implemented by an embedder to manage large host OS allocations.
373  */
374 class PageAllocator {
375  public:
376   virtual ~PageAllocator() = default;
377 
378   /**
379    * Gets the page granularity for AllocatePages and FreePages. Addresses and
380    * lengths for those calls should be multiples of AllocatePageSize().
381    */
382   virtual size_t AllocatePageSize() = 0;
383 
384   /**
385    * Gets the page granularity for SetPermissions and ReleasePages. Addresses
386    * and lengths for those calls should be multiples of CommitPageSize().
387    */
388   virtual size_t CommitPageSize() = 0;
389 
390   /**
391    * Sets the random seed so that GetRandomMmapAddr() will generate repeatable
392    * sequences of random mmap addresses.
393    */
394   virtual void SetRandomMmapSeed(int64_t seed) = 0;
395 
396   /**
397    * Returns a randomized address, suitable for memory allocation under ASLR.
398    * The address will be aligned to AllocatePageSize.
399    */
400   virtual void* GetRandomMmapAddr() = 0;
401 
402   /**
403    * Memory permissions.
404    */
405   enum Permission {
406     kNoAccess,
407     kRead,
408     kReadWrite,
409     // TODO(hpayer): Remove this flag. Memory should never be rwx.
410     kReadWriteExecute,
411     kReadExecute,
412     // Set this when reserving memory that will later require kReadWriteExecute
413     // permissions. The resulting behavior is platform-specific, currently
414     // this is used to set the MAP_JIT flag on Apple Silicon.
415     // TODO(jkummerow): Remove this when Wasm has a platform-independent
416     // w^x implementation.
417     kNoAccessWillJitLater
418   };
419 
420   /**
421    * Allocates memory in range with the given alignment and permission.
422    */
423   virtual void* AllocatePages(void* address, size_t length, size_t alignment,
424                               Permission permissions) = 0;
425 
426   /**
427    * Frees memory in a range that was allocated by a call to AllocatePages.
428    */
429   virtual bool FreePages(void* address, size_t length) = 0;
430 
431   /**
432    * Releases memory in a range that was allocated by a call to AllocatePages.
433    */
434   virtual bool ReleasePages(void* address, size_t length,
435                             size_t new_length) = 0;
436 
437   /**
438    * Sets permissions on pages in an allocated range.
439    */
440   virtual bool SetPermissions(void* address, size_t length,
441                               Permission permissions) = 0;
442 
443   /**
444    * Frees memory in the given [address, address + size) range. address and size
445    * should be operating system page-aligned. The next write to this
446    * memory area brings the memory transparently back.
447    */
DiscardSystemPages(void * address,size_t size)448   virtual bool DiscardSystemPages(void* address, size_t size) { return true; }
449 
450   /**
451    * INTERNAL ONLY: This interface has not been stabilised and may change
452    * without notice from one release to another without being deprecated first.
453    */
454   class SharedMemoryMapping {
455    public:
456     // Implementations are expected to free the shared memory mapping in the
457     // destructor.
458     virtual ~SharedMemoryMapping() = default;
459     virtual void* GetMemory() const = 0;
460   };
461 
462   /**
463    * INTERNAL ONLY: This interface has not been stabilised and may change
464    * without notice from one release to another without being deprecated first.
465    */
466   class SharedMemory {
467    public:
468     // Implementations are expected to free the shared memory in the destructor.
469     virtual ~SharedMemory() = default;
470     virtual std::unique_ptr<SharedMemoryMapping> RemapTo(
471         void* new_address) const = 0;
472     virtual void* GetMemory() const = 0;
473     virtual size_t GetSize() const = 0;
474   };
475 
476   /**
477    * INTERNAL ONLY: This interface has not been stabilised and may change
478    * without notice from one release to another without being deprecated first.
479    *
480    * Reserve pages at a fixed address returning whether the reservation is
481    * possible. The reserved memory is detached from the PageAllocator and so
482    * should not be freed by it. It's intended for use with
483    * SharedMemory::RemapTo, where ~SharedMemoryMapping would free the memory.
484    */
ReserveForSharedMemoryMapping(void * address,size_t size)485   virtual bool ReserveForSharedMemoryMapping(void* address, size_t size) {
486     return false;
487   }
488 
489   /**
490    * INTERNAL ONLY: This interface has not been stabilised and may change
491    * without notice from one release to another without being deprecated first.
492    *
493    * Allocates shared memory pages. Not all PageAllocators need support this and
494    * so this method need not be overridden.
495    * Allocates a new read-only shared memory region of size |length| and copies
496    * the memory at |original_address| into it.
497    */
AllocateSharedPages(size_t length,const void * original_address)498   virtual std::unique_ptr<SharedMemory> AllocateSharedPages(
499       size_t length, const void* original_address) {
500     return {};
501   }
502 
503   /**
504    * INTERNAL ONLY: This interface has not been stabilised and may change
505    * without notice from one release to another without being deprecated first.
506    *
507    * If not overridden and changed to return true, V8 will not attempt to call
508    * AllocateSharedPages or RemapSharedPages. If overridden, AllocateSharedPages
509    * and RemapSharedPages must also be overridden.
510    */
CanAllocateSharedPages()511   virtual bool CanAllocateSharedPages() { return false; }
512 };
513 
514 /**
515  * V8 Platform abstraction layer.
516  *
517  * The embedder has to provide an implementation of this interface before
518  * initializing the rest of V8.
519  */
520 class Platform {
521  public:
522   virtual ~Platform() = default;
523 
524   /**
525    * Allows the embedder to manage memory page allocations.
526    */
GetPageAllocator()527   virtual PageAllocator* GetPageAllocator() {
528     // TODO(bbudge) Make this abstract after all embedders implement this.
529     return nullptr;
530   }
531 
532   /**
533    * Enables the embedder to respond in cases where V8 can't allocate large
534    * blocks of memory. V8 retries the failed allocation once after calling this
535    * method. On success, execution continues; otherwise V8 exits with a fatal
536    * error.
537    * Embedder overrides of this function must NOT call back into V8.
538    */
OnCriticalMemoryPressure()539   virtual void OnCriticalMemoryPressure() {
540     // TODO(bbudge) Remove this when embedders override the following method.
541     // See crbug.com/634547.
542   }
543 
544   /**
545    * Enables the embedder to respond in cases where V8 can't allocate large
546    * memory regions. The |length| parameter is the amount of memory needed.
547    * Returns true if memory is now available. Returns false if no memory could
548    * be made available. V8 will retry allocations until this method returns
549    * false.
550    *
551    * Embedder overrides of this function must NOT call back into V8.
552    */
OnCriticalMemoryPressure(size_t length)553   virtual bool OnCriticalMemoryPressure(size_t length) { return false; }
554 
555   /**
556    * Gets the number of worker threads used by
557    * Call(BlockingTask)OnWorkerThread(). This can be used to estimate the number
558    * of tasks a work package should be split into. A return value of 0 means
559    * that there are no worker threads available. Note that a value of 0 won't
560    * prohibit V8 from posting tasks using |CallOnWorkerThread|.
561    */
562   virtual int NumberOfWorkerThreads() = 0;
563 
564   /**
565    * Returns a TaskRunner which can be used to post a task on the foreground.
566    * The TaskRunner's NonNestableTasksEnabled() must be true. This function
567    * should only be called from a foreground thread.
568    */
569   virtual std::shared_ptr<v8::TaskRunner> GetForegroundTaskRunner(
570       Isolate* isolate) = 0;
571 
572   /**
573    * Schedules a task to be invoked on a worker thread.
574    */
575   virtual void CallOnWorkerThread(std::unique_ptr<Task> task) = 0;
576 
577   /**
578    * Schedules a task that blocks the main thread to be invoked with
579    * high-priority on a worker thread.
580    */
CallBlockingTaskOnWorkerThread(std::unique_ptr<Task> task)581   virtual void CallBlockingTaskOnWorkerThread(std::unique_ptr<Task> task) {
582     // Embedders may optionally override this to process these tasks in a high
583     // priority pool.
584     CallOnWorkerThread(std::move(task));
585   }
586 
587   /**
588    * Schedules a task to be invoked with low-priority on a worker thread.
589    */
CallLowPriorityTaskOnWorkerThread(std::unique_ptr<Task> task)590   virtual void CallLowPriorityTaskOnWorkerThread(std::unique_ptr<Task> task) {
591     // Embedders may optionally override this to process these tasks in a low
592     // priority pool.
593     CallOnWorkerThread(std::move(task));
594   }
595 
596   /**
597    * Schedules a task to be invoked on a worker thread after |delay_in_seconds|
598    * expires.
599    */
600   virtual void CallDelayedOnWorkerThread(std::unique_ptr<Task> task,
601                                          double delay_in_seconds) = 0;
602 
603   /**
604    * Returns true if idle tasks are enabled for the given |isolate|.
605    */
IdleTasksEnabled(Isolate * isolate)606   virtual bool IdleTasksEnabled(Isolate* isolate) { return false; }
607 
608   /**
609    * Posts |job_task| to run in parallel. Returns a JobHandle associated with
610    * the Job, which can be joined or canceled.
611    * This avoids degenerate cases:
612    * - Calling CallOnWorkerThread() for each work item, causing significant
613    *   overhead.
614    * - Fixed number of CallOnWorkerThread() calls that split the work and might
615    *   run for a long time. This is problematic when many components post
616    *   "num cores" tasks and all expect to use all the cores. In these cases,
617    *   the scheduler lacks context to be fair to multiple same-priority requests
618    *   and/or ability to request lower priority work to yield when high priority
619    *   work comes in.
620    * A canonical implementation of |job_task| looks like:
621    * class MyJobTask : public JobTask {
622    *  public:
623    *   MyJobTask(...) : worker_queue_(...) {}
624    *   // JobTask:
625    *   void Run(JobDelegate* delegate) override {
626    *     while (!delegate->ShouldYield()) {
627    *       // Smallest unit of work.
628    *       auto work_item = worker_queue_.TakeWorkItem(); // Thread safe.
629    *       if (!work_item) return;
630    *       ProcessWork(work_item);
631    *     }
632    *   }
633    *
634    *   size_t GetMaxConcurrency() const override {
635    *     return worker_queue_.GetSize(); // Thread safe.
636    *   }
637    * };
638    * auto handle = PostJob(TaskPriority::kUserVisible,
639    *                       std::make_unique<MyJobTask>(...));
640    * handle->Join();
641    *
642    * PostJob() and methods of the returned JobHandle/JobDelegate, must never be
643    * called while holding a lock that could be acquired by JobTask::Run or
644    * JobTask::GetMaxConcurrency -- that could result in a deadlock. This is
645    * because [1] JobTask::GetMaxConcurrency may be invoked while holding
646    * internal lock (A), hence JobTask::GetMaxConcurrency can only use a lock (B)
647    * if that lock is *never* held while calling back into JobHandle from any
648    * thread (A=>B/B=>A deadlock) and [2] JobTask::Run or
649    * JobTask::GetMaxConcurrency may be invoked synchronously from JobHandle
650    * (B=>JobHandle::foo=>B deadlock).
651    *
652    * A sufficient PostJob() implementation that uses the default Job provided in
653    * libplatform looks like:
654    *  std::unique_ptr<JobHandle> PostJob(
655    *      TaskPriority priority, std::unique_ptr<JobTask> job_task) override {
656    *    return v8::platform::NewDefaultJobHandle(
657    *        this, priority, std::move(job_task), NumberOfWorkerThreads());
658    * }
659    */
660   virtual std::unique_ptr<JobHandle> PostJob(
661       TaskPriority priority, std::unique_ptr<JobTask> job_task) = 0;
662 
663   /**
664    * Monotonically increasing time in seconds from an arbitrary fixed point in
665    * the past. This function is expected to return at least
666    * millisecond-precision values. For this reason,
667    * it is recommended that the fixed point be no further in the past than
668    * the epoch.
669    **/
670   virtual double MonotonicallyIncreasingTime() = 0;
671 
672   /**
673    * Current wall-clock time in milliseconds since epoch.
674    * This function is expected to return at least millisecond-precision values.
675    */
676   virtual double CurrentClockTimeMillis() = 0;
677 
678   typedef void (*StackTracePrinter)();
679 
680   /**
681    * Returns a function pointer that print a stack trace of the current stack
682    * on invocation. Disables printing of the stack trace if nullptr.
683    */
GetStackTracePrinter()684   virtual StackTracePrinter GetStackTracePrinter() { return nullptr; }
685 
686   /**
687    * Returns an instance of a v8::TracingController. This must be non-nullptr.
688    */
689   virtual TracingController* GetTracingController() = 0;
690 
691   /**
692    * Tells the embedder to generate and upload a crashdump during an unexpected
693    * but non-critical scenario.
694    */
DumpWithoutCrashing()695   virtual void DumpWithoutCrashing() {}
696 
697  protected:
698   /**
699    * Default implementation of current wall-clock time in milliseconds
700    * since epoch. Useful for implementing |CurrentClockTimeMillis| if
701    * nothing special needed.
702    */
703   V8_EXPORT static double SystemClockTimeMillis();
704 };
705 
706 }  // namespace v8
707 
708 #endif  // V8_V8_PLATFORM_H_
709