1 #pragma once 2 3 #include <cstdint> 4 5 // The new threadpool. 6 7 // To help future smart scheduling. 8 enum class TaskType { 9 CPU_COMPUTE, 10 IO_BLOCKING, 11 }; 12 13 // Implement this to make something that you can run on the thread manager. 14 class Task { 15 public: ~Task()16 virtual ~Task() {} 17 virtual void Run() = 0; Cancellable()18 virtual bool Cancellable() { return false; } Cancel()19 virtual void Cancel() {} id()20 virtual uint64_t id() { return 0; } 21 }; 22 23 class Waitable { 24 public: ~Waitable()25 virtual ~Waitable() {} 26 27 virtual void Wait() = 0; 28 WaitAndRelease()29 void WaitAndRelease() { 30 Wait(); 31 delete this; 32 } 33 }; 34 35 struct ThreadContext; 36 struct GlobalThreadContext; 37 38 class ThreadManager { 39 public: 40 ThreadManager(); 41 ~ThreadManager(); 42 43 // The distinction here is to be able to take hyper-threading into account. 44 // It gets even trickier when you think about mobile chips with BIG/LITTLE, but we'll 45 // just ignore it and let the OS handle it. 46 void Init(int numCores, int numLogicalCoresPerCpu); 47 void EnqueueTask(Task *task, TaskType taskType); 48 void EnqueueTaskOnThread(int threadNum, Task *task, TaskType taskType); 49 void Teardown(); 50 51 bool IsInitialized() const; 52 53 // Currently does nothing. It will always be best-effort - maybe it cancels, 54 // maybe it doesn't. Note that the id is the id() returned by the task. You need to make that 55 // something meaningful yourself. 56 void TryCancelTask(uint64_t id); 57 58 // Parallel loops (assumed compute-limited) get one thread per logical core. We have a few extra threads too 59 // for I/O bounds tasks, that can be run concurrently with those. 60 int GetNumLooperThreads() const; 61 62 private: 63 // This is always pointing to a context, initialized in the constructor. 64 GlobalThreadContext *global_; 65 66 int numThreads_ = 0; 67 int numComputeThreads_ = 0; 68 69 friend struct ThreadContext; 70 }; 71 72 extern ThreadManager g_threadManager; 73