1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_V8_PLATFORM_H_
6 #define V8_V8_PLATFORM_H_
7 
8 #include <stddef.h>
9 #include <stdint.h>
10 #include <stdlib.h>  // For abort.
11 #include <memory>
12 #include <string>
13 
14 #include "v8config.h"  // NOLINT(build/include_directory)
15 
16 namespace v8 {
17 
18 class Isolate;
19 
20 // Valid priorities supported by the task scheduling infrastructure.
21 enum class TaskPriority : uint8_t {
22   /**
23    * Best effort tasks are not critical for performance of the application. The
24    * platform implementation should preempt such tasks if higher priority tasks
25    * arrive.
26    */
27   kBestEffort,
28   /**
29    * User visible tasks are long running background tasks that will
30    * improve performance and memory usage of the application upon completion.
31    * Example: background compilation and garbage collection.
32    */
33   kUserVisible,
34   /**
35    * User blocking tasks are highest priority tasks that block the execution
36    * thread (e.g. major garbage collection). They must be finished as soon as
37    * possible.
38    */
39   kUserBlocking,
40 };
41 
42 /**
43  * A Task represents a unit of work.
44  */
45 class Task {
46  public:
47   virtual ~Task() = default;
48 
49   virtual void Run() = 0;
50 };
51 
52 /**
53  * An IdleTask represents a unit of work to be performed in idle time.
54  * The Run method is invoked with an argument that specifies the deadline in
55  * seconds returned by MonotonicallyIncreasingTime().
56  * The idle task is expected to complete by this deadline.
57  */
58 class IdleTask {
59  public:
60   virtual ~IdleTask() = default;
61   virtual void Run(double deadline_in_seconds) = 0;
62 };
63 
64 /**
65  * A TaskRunner allows scheduling of tasks. The TaskRunner may still be used to
66  * post tasks after the isolate gets destructed, but these tasks may not get
67  * executed anymore. All tasks posted to a given TaskRunner will be invoked in
68  * sequence. Tasks can be posted from any thread.
69  */
70 class TaskRunner {
71  public:
72   /**
73    * Schedules a task to be invoked by this TaskRunner. The TaskRunner
74    * implementation takes ownership of |task|.
75    */
76   virtual void PostTask(std::unique_ptr<Task> task) = 0;
77 
78   /**
79    * Schedules a task to be invoked by this TaskRunner. The TaskRunner
80    * implementation takes ownership of |task|. The |task| cannot be nested
81    * within other task executions.
82    *
83    * Tasks which shouldn't be interleaved with JS execution must be posted with
84    * |PostNonNestableTask| or |PostNonNestableDelayedTask|. This is because the
85    * embedder may process tasks in a callback which is called during JS
86    * execution.
87    *
88    * In particular, tasks which execute JS must be non-nestable, since JS
89    * execution is not allowed to nest.
90    *
91    * Requires that |TaskRunner::NonNestableTasksEnabled()| is true.
92    */
PostNonNestableTask(std::unique_ptr<Task> task)93   virtual void PostNonNestableTask(std::unique_ptr<Task> task) {}
94 
95   /**
96    * Schedules a task to be invoked by this TaskRunner. The task is scheduled
97    * after the given number of seconds |delay_in_seconds|. The TaskRunner
98    * implementation takes ownership of |task|.
99    */
100   virtual void PostDelayedTask(std::unique_ptr<Task> task,
101                                double delay_in_seconds) = 0;
102 
103   /**
104    * Schedules a task to be invoked by this TaskRunner. The task is scheduled
105    * after the given number of seconds |delay_in_seconds|. The TaskRunner
106    * implementation takes ownership of |task|. The |task| cannot be nested
107    * within other task executions.
108    *
109    * Tasks which shouldn't be interleaved with JS execution must be posted with
110    * |PostNonNestableTask| or |PostNonNestableDelayedTask|. This is because the
111    * embedder may process tasks in a callback which is called during JS
112    * execution.
113    *
114    * In particular, tasks which execute JS must be non-nestable, since JS
115    * execution is not allowed to nest.
116    *
117    * Requires that |TaskRunner::NonNestableDelayedTasksEnabled()| is true.
118    */
PostNonNestableDelayedTask(std::unique_ptr<Task> task,double delay_in_seconds)119   virtual void PostNonNestableDelayedTask(std::unique_ptr<Task> task,
120                                           double delay_in_seconds) {}
121 
122   /**
123    * Schedules an idle task to be invoked by this TaskRunner. The task is
124    * scheduled when the embedder is idle. Requires that
125    * |TaskRunner::IdleTasksEnabled()| is true. Idle tasks may be reordered
126    * relative to other task types and may be starved for an arbitrarily long
127    * time if no idle time is available. The TaskRunner implementation takes
128    * ownership of |task|.
129    */
130   virtual void PostIdleTask(std::unique_ptr<IdleTask> task) = 0;
131 
132   /**
133    * Returns true if idle tasks are enabled for this TaskRunner.
134    */
135   virtual bool IdleTasksEnabled() = 0;
136 
137   /**
138    * Returns true if non-nestable tasks are enabled for this TaskRunner.
139    */
NonNestableTasksEnabled()140   virtual bool NonNestableTasksEnabled() const { return false; }
141 
142   /**
143    * Returns true if non-nestable delayed tasks are enabled for this TaskRunner.
144    */
NonNestableDelayedTasksEnabled()145   virtual bool NonNestableDelayedTasksEnabled() const { return false; }
146 
147   TaskRunner() = default;
148   virtual ~TaskRunner() = default;
149 
150   TaskRunner(const TaskRunner&) = delete;
151   TaskRunner& operator=(const TaskRunner&) = delete;
152 };
153 
154 /**
155  * Delegate that's passed to Job's worker task, providing an entry point to
156  * communicate with the scheduler.
157  */
158 class JobDelegate {
159  public:
160   /**
161    * Returns true if this thread should return from the worker task on the
162    * current thread ASAP. Workers should periodically invoke ShouldYield (or
163    * YieldIfNeeded()) as often as is reasonable.
164    */
165   virtual bool ShouldYield() = 0;
166 
167   /**
168    * Notifies the scheduler that max concurrency was increased, and the number
169    * of worker should be adjusted accordingly. See Platform::PostJob() for more
170    * details.
171    */
172   virtual void NotifyConcurrencyIncrease() = 0;
173 
174   /**
175    * Returns a task_id unique among threads currently running this job, such
176    * that GetTaskId() < worker count. To achieve this, the same task_id may be
177    * reused by a different thread after a worker_task returns.
178    */
179   virtual uint8_t GetTaskId() = 0;
180 
181   /**
182    * Returns true if the current task is called from the thread currently
183    * running JobHandle::Join().
184    */
185   virtual bool IsJoiningThread() const = 0;
186 };
187 
188 /**
189  * Handle returned when posting a Job. Provides methods to control execution of
190  * the posted Job.
191  */
192 class JobHandle {
193  public:
194   virtual ~JobHandle() = default;
195 
196   /**
197    * Notifies the scheduler that max concurrency was increased, and the number
198    * of worker should be adjusted accordingly. See Platform::PostJob() for more
199    * details.
200    */
201   virtual void NotifyConcurrencyIncrease() = 0;
202 
203   /**
204    * Contributes to the job on this thread. Doesn't return until all tasks have
205    * completed and max concurrency becomes 0. When Join() is called and max
206    * concurrency reaches 0, it should not increase again. This also promotes
207    * this Job's priority to be at least as high as the calling thread's
208    * priority.
209    */
210   virtual void Join() = 0;
211 
212   /**
213    * Forces all existing workers to yield ASAP. Waits until they have all
214    * returned from the Job's callback before returning.
215    */
216   virtual void Cancel() = 0;
217 
218   /*
219    * Forces all existing workers to yield ASAP but doesn’t wait for them.
220    * Warning, this is dangerous if the Job's callback is bound to or has access
221    * to state which may be deleted after this call.
222    */
223   virtual void CancelAndDetach() = 0;
224 
225   /**
226    * Returns true if there's any work pending or any worker running.
227    */
228   virtual bool IsActive() = 0;
229 
230   /**
231    * Returns true if associated with a Job and other methods may be called.
232    * Returns false after Join() or Cancel() was called. This may return true
233    * even if no workers are running and IsCompleted() returns true
234    */
235   virtual bool IsValid() = 0;
236 
237   /**
238    * Returns true if job priority can be changed.
239    */
UpdatePriorityEnabled()240   virtual bool UpdatePriorityEnabled() const { return false; }
241 
242   /**
243    *  Update this Job's priority.
244    */
UpdatePriority(TaskPriority new_priority)245   virtual void UpdatePriority(TaskPriority new_priority) {}
246 };
247 
248 /**
249  * A JobTask represents work to run in parallel from Platform::PostJob().
250  */
251 class JobTask {
252  public:
253   virtual ~JobTask() = default;
254 
255   virtual void Run(JobDelegate* delegate) = 0;
256 
257   /**
258    * Controls the maximum number of threads calling Run() concurrently, given
259    * the number of threads currently assigned to this job and executing Run().
260    * Run() is only invoked if the number of threads previously running Run() was
261    * less than the value returned. Since GetMaxConcurrency() is a leaf function,
262    * it must not call back any JobHandle methods.
263    */
264   virtual size_t GetMaxConcurrency(size_t worker_count) const = 0;
265 };
266 
267 /**
268  * The interface represents complex arguments to trace events.
269  */
270 class ConvertableToTraceFormat {
271  public:
272   virtual ~ConvertableToTraceFormat() = default;
273 
274   /**
275    * Append the class info to the provided |out| string. The appended
276    * data must be a valid JSON object. Strings must be properly quoted, and
277    * escaped. There is no processing applied to the content after it is
278    * appended.
279    */
280   virtual void AppendAsTraceFormat(std::string* out) const = 0;
281 };
282 
283 /**
284  * V8 Tracing controller.
285  *
286  * Can be implemented by an embedder to record trace events from V8.
287  */
288 class TracingController {
289  public:
290   virtual ~TracingController() = default;
291 
292   // In Perfetto mode, trace events are written using Perfetto's Track Event
293   // API directly without going through the embedder. However, it is still
294   // possible to observe tracing being enabled and disabled.
295 #if !defined(V8_USE_PERFETTO)
296   /**
297    * Called by TRACE_EVENT* macros, don't call this directly.
298    * The name parameter is a category group for example:
299    * TRACE_EVENT0("v8,parse", "V8.Parse")
300    * The pointer returned points to a value with zero or more of the bits
301    * defined in CategoryGroupEnabledFlags.
302    **/
GetCategoryGroupEnabled(const char * name)303   virtual const uint8_t* GetCategoryGroupEnabled(const char* name) {
304     static uint8_t no = 0;
305     return &no;
306   }
307 
308   /**
309    * Adds a trace event to the platform tracing system. These function calls are
310    * usually the result of a TRACE_* macro from trace_event_common.h when
311    * tracing and the category of the particular trace are enabled. It is not
312    * advisable to call these functions on their own; they are really only meant
313    * to be used by the trace macros. The returned handle can be used by
314    * UpdateTraceEventDuration to update the duration of COMPLETE events.
315    */
AddTraceEvent(char phase,const uint8_t * category_enabled_flag,const char * name,const char * scope,uint64_t id,uint64_t bind_id,int32_t num_args,const char ** arg_names,const uint8_t * arg_types,const uint64_t * arg_values,std::unique_ptr<ConvertableToTraceFormat> * arg_convertables,unsigned int flags)316   virtual uint64_t AddTraceEvent(
317       char phase, const uint8_t* category_enabled_flag, const char* name,
318       const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
319       const char** arg_names, const uint8_t* arg_types,
320       const uint64_t* arg_values,
321       std::unique_ptr<ConvertableToTraceFormat>* arg_convertables,
322       unsigned int flags) {
323     return 0;
324   }
AddTraceEventWithTimestamp(char phase,const uint8_t * category_enabled_flag,const char * name,const char * scope,uint64_t id,uint64_t bind_id,int32_t num_args,const char ** arg_names,const uint8_t * arg_types,const uint64_t * arg_values,std::unique_ptr<ConvertableToTraceFormat> * arg_convertables,unsigned int flags,int64_t timestamp)325   virtual uint64_t AddTraceEventWithTimestamp(
326       char phase, const uint8_t* category_enabled_flag, const char* name,
327       const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
328       const char** arg_names, const uint8_t* arg_types,
329       const uint64_t* arg_values,
330       std::unique_ptr<ConvertableToTraceFormat>* arg_convertables,
331       unsigned int flags, int64_t timestamp) {
332     return 0;
333   }
334 
335   /**
336    * Sets the duration field of a COMPLETE trace event. It must be called with
337    * the handle returned from AddTraceEvent().
338    **/
UpdateTraceEventDuration(const uint8_t * category_enabled_flag,const char * name,uint64_t handle)339   virtual void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
340                                         const char* name, uint64_t handle) {}
341 #endif  // !defined(V8_USE_PERFETTO)
342 
343   class TraceStateObserver {
344    public:
345     virtual ~TraceStateObserver() = default;
346     virtual void OnTraceEnabled() = 0;
347     virtual void OnTraceDisabled() = 0;
348   };
349 
350   /** Adds tracing state change observer. */
AddTraceStateObserver(TraceStateObserver *)351   virtual void AddTraceStateObserver(TraceStateObserver*) {}
352 
353   /** Removes tracing state change observer. */
RemoveTraceStateObserver(TraceStateObserver *)354   virtual void RemoveTraceStateObserver(TraceStateObserver*) {}
355 };
356 
357 /**
358  * A V8 memory page allocator.
359  *
360  * Can be implemented by an embedder to manage large host OS allocations.
361  */
362 class PageAllocator {
363  public:
364   virtual ~PageAllocator() = default;
365 
366   /**
367    * Gets the page granularity for AllocatePages and FreePages. Addresses and
368    * lengths for those calls should be multiples of AllocatePageSize().
369    */
370   virtual size_t AllocatePageSize() = 0;
371 
372   /**
373    * Gets the page granularity for SetPermissions and ReleasePages. Addresses
374    * and lengths for those calls should be multiples of CommitPageSize().
375    */
376   virtual size_t CommitPageSize() = 0;
377 
378   /**
379    * Sets the random seed so that GetRandomMmapAddr() will generate repeatable
380    * sequences of random mmap addresses.
381    */
382   virtual void SetRandomMmapSeed(int64_t seed) = 0;
383 
384   /**
385    * Returns a randomized address, suitable for memory allocation under ASLR.
386    * The address will be aligned to AllocatePageSize.
387    */
388   virtual void* GetRandomMmapAddr() = 0;
389 
390   /**
391    * Memory permissions.
392    */
393   enum Permission {
394     kNoAccess,
395     kRead,
396     kReadWrite,
397     kReadWriteExecute,
398     kReadExecute,
399     // Set this when reserving memory that will later require kReadWriteExecute
400     // permissions. The resulting behavior is platform-specific, currently
401     // this is used to set the MAP_JIT flag on Apple Silicon.
402     // TODO(jkummerow): Remove this when Wasm has a platform-independent
403     // w^x implementation.
404     kNoAccessWillJitLater
405   };
406 
407   /**
408    * Allocates memory in range with the given alignment and permission.
409    */
410   virtual void* AllocatePages(void* address, size_t length, size_t alignment,
411                               Permission permissions) = 0;
412 
413   /**
414    * Frees memory in a range that was allocated by a call to AllocatePages.
415    */
416   virtual bool FreePages(void* address, size_t length) = 0;
417 
418   /**
419    * Releases memory in a range that was allocated by a call to AllocatePages.
420    */
421   virtual bool ReleasePages(void* address, size_t length,
422                             size_t new_length) = 0;
423 
424   /**
425    * Sets permissions on pages in an allocated range.
426    */
427   virtual bool SetPermissions(void* address, size_t length,
428                               Permission permissions) = 0;
429 
430   /**
431    * Frees memory in the given [address, address + size) range. address and size
432    * should be operating system page-aligned. The next write to this
433    * memory area brings the memory transparently back. This should be treated as
434    * a hint to the OS that the pages are no longer needed. It does not guarantee
435    * that the pages will be discarded immediately or at all.
436    */
DiscardSystemPages(void * address,size_t size)437   virtual bool DiscardSystemPages(void* address, size_t size) { return true; }
438 
439   /**
440    * Decommits any wired memory pages in the given range, allowing the OS to
441    * reclaim them, and marks the region as inacessible (kNoAccess). The address
442    * range stays reserved and can be accessed again later by changing its
443    * permissions. However, in that case the memory content is guaranteed to be
444    * zero-initialized again. The memory must have been previously allocated by a
445    * call to AllocatePages. Returns true on success, false otherwise.
446    */
447 #ifdef V8_VIRTUAL_MEMORY_CAGE
448   // Implementing this API is required when the virtual memory cage is enabled.
449   virtual bool DecommitPages(void* address, size_t size) = 0;
450 #else
451   // Otherwise, it is optional for now.
DecommitPages(void * address,size_t size)452   virtual bool DecommitPages(void* address, size_t size) { return false; }
453 #endif
454 
455   /**
456    * INTERNAL ONLY: This interface has not been stabilised and may change
457    * without notice from one release to another without being deprecated first.
458    */
459   class SharedMemoryMapping {
460    public:
461     // Implementations are expected to free the shared memory mapping in the
462     // destructor.
463     virtual ~SharedMemoryMapping() = default;
464     virtual void* GetMemory() const = 0;
465   };
466 
467   /**
468    * INTERNAL ONLY: This interface has not been stabilised and may change
469    * without notice from one release to another without being deprecated first.
470    */
471   class SharedMemory {
472    public:
473     // Implementations are expected to free the shared memory in the destructor.
474     virtual ~SharedMemory() = default;
475     virtual std::unique_ptr<SharedMemoryMapping> RemapTo(
476         void* new_address) const = 0;
477     virtual void* GetMemory() const = 0;
478     virtual size_t GetSize() const = 0;
479   };
480 
481   /**
482    * INTERNAL ONLY: This interface has not been stabilised and may change
483    * without notice from one release to another without being deprecated first.
484    *
485    * Reserve pages at a fixed address returning whether the reservation is
486    * possible. The reserved memory is detached from the PageAllocator and so
487    * should not be freed by it. It's intended for use with
488    * SharedMemory::RemapTo, where ~SharedMemoryMapping would free the memory.
489    */
ReserveForSharedMemoryMapping(void * address,size_t size)490   virtual bool ReserveForSharedMemoryMapping(void* address, size_t size) {
491     return false;
492   }
493 
494   /**
495    * INTERNAL ONLY: This interface has not been stabilised and may change
496    * without notice from one release to another without being deprecated first.
497    *
498    * Allocates shared memory pages. Not all PageAllocators need support this and
499    * so this method need not be overridden.
500    * Allocates a new read-only shared memory region of size |length| and copies
501    * the memory at |original_address| into it.
502    */
AllocateSharedPages(size_t length,const void * original_address)503   virtual std::unique_ptr<SharedMemory> AllocateSharedPages(
504       size_t length, const void* original_address) {
505     return {};
506   }
507 
508   /**
509    * INTERNAL ONLY: This interface has not been stabilised and may change
510    * without notice from one release to another without being deprecated first.
511    *
512    * If not overridden and changed to return true, V8 will not attempt to call
513    * AllocateSharedPages or RemapSharedPages. If overridden, AllocateSharedPages
514    * and RemapSharedPages must also be overridden.
515    */
CanAllocateSharedPages()516   virtual bool CanAllocateSharedPages() { return false; }
517 };
518 
519 /**
520  * V8 Allocator used for allocating zone backings.
521  */
522 class ZoneBackingAllocator {
523  public:
524   using MallocFn = void* (*)(size_t);
525   using FreeFn = void (*)(void*);
526 
GetMallocFn()527   virtual MallocFn GetMallocFn() const { return ::malloc; }
GetFreeFn()528   virtual FreeFn GetFreeFn() const { return ::free; }
529 };
530 
531 /**
532  * V8 Platform abstraction layer.
533  *
534  * The embedder has to provide an implementation of this interface before
535  * initializing the rest of V8.
536  */
537 class Platform {
538  public:
539   virtual ~Platform() = default;
540 
541   /**
542    * Allows the embedder to manage memory page allocations.
543    */
GetPageAllocator()544   virtual PageAllocator* GetPageAllocator() {
545     // TODO(bbudge) Make this abstract after all embedders implement this.
546     return nullptr;
547   }
548 
549   /**
550    * Allows the embedder to specify a custom allocator used for zones.
551    */
GetZoneBackingAllocator()552   virtual ZoneBackingAllocator* GetZoneBackingAllocator() {
553     static ZoneBackingAllocator default_allocator;
554     return &default_allocator;
555   }
556 
557   /**
558    * Enables the embedder to respond in cases where V8 can't allocate large
559    * blocks of memory. V8 retries the failed allocation once after calling this
560    * method. On success, execution continues; otherwise V8 exits with a fatal
561    * error.
562    * Embedder overrides of this function must NOT call back into V8.
563    */
OnCriticalMemoryPressure()564   virtual void OnCriticalMemoryPressure() {
565     // TODO(bbudge) Remove this when embedders override the following method.
566     // See crbug.com/634547.
567   }
568 
569   /**
570    * Enables the embedder to respond in cases where V8 can't allocate large
571    * memory regions. The |length| parameter is the amount of memory needed.
572    * Returns true if memory is now available. Returns false if no memory could
573    * be made available. V8 will retry allocations until this method returns
574    * false.
575    *
576    * Embedder overrides of this function must NOT call back into V8.
577    */
OnCriticalMemoryPressure(size_t length)578   virtual bool OnCriticalMemoryPressure(size_t length) { return false; }
579 
580   /**
581    * Gets the number of worker threads used by
582    * Call(BlockingTask)OnWorkerThread(). This can be used to estimate the number
583    * of tasks a work package should be split into. A return value of 0 means
584    * that there are no worker threads available. Note that a value of 0 won't
585    * prohibit V8 from posting tasks using |CallOnWorkerThread|.
586    */
587   virtual int NumberOfWorkerThreads() = 0;
588 
589   /**
590    * Returns a TaskRunner which can be used to post a task on the foreground.
591    * The TaskRunner's NonNestableTasksEnabled() must be true. This function
592    * should only be called from a foreground thread.
593    */
594   virtual std::shared_ptr<v8::TaskRunner> GetForegroundTaskRunner(
595       Isolate* isolate) = 0;
596 
597   /**
598    * Schedules a task to be invoked on a worker thread.
599    */
600   virtual void CallOnWorkerThread(std::unique_ptr<Task> task) = 0;
601 
602   /**
603    * Schedules a task that blocks the main thread to be invoked with
604    * high-priority on a worker thread.
605    */
CallBlockingTaskOnWorkerThread(std::unique_ptr<Task> task)606   virtual void CallBlockingTaskOnWorkerThread(std::unique_ptr<Task> task) {
607     // Embedders may optionally override this to process these tasks in a high
608     // priority pool.
609     CallOnWorkerThread(std::move(task));
610   }
611 
612   /**
613    * Schedules a task to be invoked with low-priority on a worker thread.
614    */
CallLowPriorityTaskOnWorkerThread(std::unique_ptr<Task> task)615   virtual void CallLowPriorityTaskOnWorkerThread(std::unique_ptr<Task> task) {
616     // Embedders may optionally override this to process these tasks in a low
617     // priority pool.
618     CallOnWorkerThread(std::move(task));
619   }
620 
621   /**
622    * Schedules a task to be invoked on a worker thread after |delay_in_seconds|
623    * expires.
624    */
625   virtual void CallDelayedOnWorkerThread(std::unique_ptr<Task> task,
626                                          double delay_in_seconds) = 0;
627 
628   /**
629    * Returns true if idle tasks are enabled for the given |isolate|.
630    */
IdleTasksEnabled(Isolate * isolate)631   virtual bool IdleTasksEnabled(Isolate* isolate) { return false; }
632 
633   /**
634    * Posts |job_task| to run in parallel. Returns a JobHandle associated with
635    * the Job, which can be joined or canceled.
636    * This avoids degenerate cases:
637    * - Calling CallOnWorkerThread() for each work item, causing significant
638    *   overhead.
639    * - Fixed number of CallOnWorkerThread() calls that split the work and might
640    *   run for a long time. This is problematic when many components post
641    *   "num cores" tasks and all expect to use all the cores. In these cases,
642    *   the scheduler lacks context to be fair to multiple same-priority requests
643    *   and/or ability to request lower priority work to yield when high priority
644    *   work comes in.
645    * A canonical implementation of |job_task| looks like:
646    * class MyJobTask : public JobTask {
647    *  public:
648    *   MyJobTask(...) : worker_queue_(...) {}
649    *   // JobTask:
650    *   void Run(JobDelegate* delegate) override {
651    *     while (!delegate->ShouldYield()) {
652    *       // Smallest unit of work.
653    *       auto work_item = worker_queue_.TakeWorkItem(); // Thread safe.
654    *       if (!work_item) return;
655    *       ProcessWork(work_item);
656    *     }
657    *   }
658    *
659    *   size_t GetMaxConcurrency() const override {
660    *     return worker_queue_.GetSize(); // Thread safe.
661    *   }
662    * };
663    * auto handle = PostJob(TaskPriority::kUserVisible,
664    *                       std::make_unique<MyJobTask>(...));
665    * handle->Join();
666    *
667    * PostJob() and methods of the returned JobHandle/JobDelegate, must never be
668    * called while holding a lock that could be acquired by JobTask::Run or
669    * JobTask::GetMaxConcurrency -- that could result in a deadlock. This is
670    * because [1] JobTask::GetMaxConcurrency may be invoked while holding
671    * internal lock (A), hence JobTask::GetMaxConcurrency can only use a lock (B)
672    * if that lock is *never* held while calling back into JobHandle from any
673    * thread (A=>B/B=>A deadlock) and [2] JobTask::Run or
674    * JobTask::GetMaxConcurrency may be invoked synchronously from JobHandle
675    * (B=>JobHandle::foo=>B deadlock).
676    *
677    * A sufficient PostJob() implementation that uses the default Job provided in
678    * libplatform looks like:
679    *  std::unique_ptr<JobHandle> PostJob(
680    *      TaskPriority priority, std::unique_ptr<JobTask> job_task) override {
681    *    return v8::platform::NewDefaultJobHandle(
682    *        this, priority, std::move(job_task), NumberOfWorkerThreads());
683    * }
684    */
685   virtual std::unique_ptr<JobHandle> PostJob(
686       TaskPriority priority, std::unique_ptr<JobTask> job_task) = 0;
687 
688   /**
689    * Monotonically increasing time in seconds from an arbitrary fixed point in
690    * the past. This function is expected to return at least
691    * millisecond-precision values. For this reason,
692    * it is recommended that the fixed point be no further in the past than
693    * the epoch.
694    **/
695   virtual double MonotonicallyIncreasingTime() = 0;
696 
697   /**
698    * Current wall-clock time in milliseconds since epoch.
699    * This function is expected to return at least millisecond-precision values.
700    */
701   virtual double CurrentClockTimeMillis() = 0;
702 
703   typedef void (*StackTracePrinter)();
704 
705   /**
706    * Returns a function pointer that print a stack trace of the current stack
707    * on invocation. Disables printing of the stack trace if nullptr.
708    */
GetStackTracePrinter()709   virtual StackTracePrinter GetStackTracePrinter() { return nullptr; }
710 
711   /**
712    * Returns an instance of a v8::TracingController. This must be non-nullptr.
713    */
714   virtual TracingController* GetTracingController() = 0;
715 
716   /**
717    * Tells the embedder to generate and upload a crashdump during an unexpected
718    * but non-critical scenario.
719    */
DumpWithoutCrashing()720   virtual void DumpWithoutCrashing() {}
721 
722  protected:
723   /**
724    * Default implementation of current wall-clock time in milliseconds
725    * since epoch. Useful for implementing |CurrentClockTimeMillis| if
726    * nothing special needed.
727    */
728   V8_EXPORT static double SystemClockTimeMillis();
729 };
730 
731 }  // namespace v8
732 
733 #endif  // V8_V8_PLATFORM_H_
734