1 /* vim:set ts=2 sw=2 sts=2 et: */
2 /* Any copyright is dedicated to the Public Domain.
3 * http://creativecommons.org/publicdomain/zero/1.0/
4 */
5
6 #include "gtest/gtest.h"
7 #include "gmock/gmock.h"
8
9 #include "mozilla/gfx/JobScheduler.h"
10
11 #ifndef WIN32
12 #include <pthread.h>
13 #include <sched.h>
14 #endif
15
16 #include <stdlib.h>
17 #include <time.h>
18
19 namespace test_scheduler {
20
21 using namespace mozilla::gfx;
22 using namespace mozilla;
23
24 // Artificially cause threads to yield randomly in an attempt to make racy
25 // things more apparent (if any).
MaybeYieldThread()26 void MaybeYieldThread()
27 {
28 #ifndef WIN32
29 if (rand() % 5 == 0) {
30 sched_yield();
31 }
32 #endif
33 }
34
35 /// Used by the TestCommand to check that tasks are processed in the right order.
36 struct SanityChecker {
37 std::vector<uint64_t> mAdvancements;
38 mozilla::gfx::CriticalSection mSection;
39
SanityCheckertest_scheduler::SanityChecker40 explicit SanityChecker(uint64_t aNumCmdBuffers)
41 {
42 for (uint32_t i = 0; i < aNumCmdBuffers; ++i) {
43 mAdvancements.push_back(0);
44 }
45 }
46
Checktest_scheduler::SanityChecker47 virtual void Check(uint64_t aJobId, uint64_t aCmdId)
48 {
49 MaybeYieldThread();
50 CriticalSectionAutoEnter lock(&mSection);
51 MOZ_RELEASE_ASSERT(mAdvancements[aJobId] == aCmdId-1);
52 mAdvancements[aJobId] = aCmdId;
53 }
54 };
55
56 /// Run checks that are specific to TestSchulerJoin.
57 struct JoinTestSanityCheck : public SanityChecker {
58 bool mSpecialJobHasRun;
59
JoinTestSanityChecktest_scheduler::JoinTestSanityCheck60 explicit JoinTestSanityCheck(uint64_t aNumCmdBuffers)
61 : SanityChecker(aNumCmdBuffers)
62 , mSpecialJobHasRun(false)
63 {}
64
Checktest_scheduler::JoinTestSanityCheck65 virtual void Check(uint64_t aJobId, uint64_t aCmdId) override
66 {
67 // Job 0 is the special task executed when everything is joined after task 1
68 if (aCmdId == 0) {
69 MOZ_RELEASE_ASSERT(!mSpecialJobHasRun, "GFX: A special task has been executed.");
70 mSpecialJobHasRun = true;
71 for (auto advancement : mAdvancements) {
72 // Because of the synchronization point (beforeFilter), all
73 // task buffers should have run task 1 when task 0 is run.
74 MOZ_RELEASE_ASSERT(advancement == 1, "GFX: task buffer has not run task 1.");
75 }
76 } else {
77 // This check does not apply to task 0.
78 SanityChecker::Check(aJobId, aCmdId);
79 }
80
81 if (aCmdId == 2) {
82 MOZ_RELEASE_ASSERT(mSpecialJobHasRun, "GFX: Special job has not run.");
83 }
84 }
85 };
86
87 class TestJob : public Job
88 {
89 public:
TestJob(uint64_t aCmdId,uint64_t aJobId,SanityChecker * aChecker,SyncObject * aStart,SyncObject * aCompletion)90 TestJob(uint64_t aCmdId, uint64_t aJobId, SanityChecker* aChecker,
91 SyncObject* aStart, SyncObject* aCompletion)
92 : Job(aStart, aCompletion, nullptr)
93 , mCmdId(aCmdId)
94 , mCmdBufferId(aJobId)
95 , mSanityChecker(aChecker)
96 {}
97
Run()98 JobStatus Run()
99 {
100 MaybeYieldThread();
101 mSanityChecker->Check(mCmdBufferId, mCmdId);
102 MaybeYieldThread();
103 return JobStatus::Complete;
104 }
105
106 uint64_t mCmdId;
107 uint64_t mCmdBufferId;
108 SanityChecker* mSanityChecker;
109 };
110
111 /// This test creates aNumCmdBuffers task buffers with sync objects set up
112 /// so that all tasks will join after command 5 before a task buffer runs
113 /// a special task (task 0) after which all task buffers fork again.
114 /// This simulates the kind of scenario where all tiles must join at
115 /// a certain point to execute, say, a filter, and fork again after the filter
116 /// has been processed.
117 /// The main thread is only blocked when waiting for the completion of the entire
118 /// task stream (it doesn't have to wait at the filter's sync points to orchestrate it).
TestSchedulerJoin(uint32_t aNumThreads,uint32_t aNumCmdBuffers)119 void TestSchedulerJoin(uint32_t aNumThreads, uint32_t aNumCmdBuffers)
120 {
121 JoinTestSanityCheck check(aNumCmdBuffers);
122
123 RefPtr<SyncObject> beforeFilter = new SyncObject(aNumCmdBuffers);
124 RefPtr<SyncObject> afterFilter = new SyncObject();
125 RefPtr<SyncObject> completion = new SyncObject(aNumCmdBuffers);
126
127
128 for (uint32_t i = 0; i < aNumCmdBuffers; ++i) {
129 Job* t1 = new TestJob(1, i, &check, nullptr, beforeFilter);
130 JobScheduler::SubmitJob(t1);
131 MaybeYieldThread();
132 }
133 beforeFilter->FreezePrerequisites();
134
135 // This task buffer is executed when all other tasks have joined after task 1
136 JobScheduler::SubmitJob(
137 new TestJob(0, 0, &check, beforeFilter, afterFilter)
138 );
139 afterFilter->FreezePrerequisites();
140
141 for (uint32_t i = 0; i < aNumCmdBuffers; ++i) {
142 Job* t2 = new TestJob(2, i, &check, afterFilter, completion);
143 JobScheduler::SubmitJob(t2);
144 MaybeYieldThread();
145 }
146 completion->FreezePrerequisites();
147
148 JobScheduler::Join(completion);
149
150 MaybeYieldThread();
151
152 for (auto advancement : check.mAdvancements) {
153 EXPECT_TRUE(advancement == 2);
154 }
155 }
156
157 /// This test creates several chains of 10 task, tasks of a given chain are executed
158 /// sequentially, and chains are exectuted in parallel.
159 /// This simulates the typical scenario where we want to process sequences of drawing
160 /// commands for several tiles in parallel.
TestSchedulerChain(uint32_t aNumThreads,uint32_t aNumCmdBuffers)161 void TestSchedulerChain(uint32_t aNumThreads, uint32_t aNumCmdBuffers)
162 {
163 SanityChecker check(aNumCmdBuffers);
164
165 RefPtr<SyncObject> completion = new SyncObject(aNumCmdBuffers);
166
167 uint32_t numJobs = 10;
168
169 for (uint32_t i = 0; i < aNumCmdBuffers; ++i) {
170
171 std::vector<RefPtr<SyncObject>> syncs;
172 std::vector<Job*> tasks;
173 syncs.reserve(numJobs);
174 tasks.reserve(numJobs);
175
176 for (uint32_t t = 0; t < numJobs-1; ++t) {
177 syncs.push_back(new SyncObject());
178 tasks.push_back(new TestJob(t+1, i, &check, t == 0 ? nullptr
179 : syncs[t-1].get(),
180 syncs[t]));
181 syncs.back()->FreezePrerequisites();
182 }
183
184 tasks.push_back(new TestJob(numJobs, i, &check, syncs.back(), completion));
185
186 if (i % 2 == 0) {
187 // submit half of the tasks in order
188 for (Job* task : tasks) {
189 JobScheduler::SubmitJob(task);
190 MaybeYieldThread();
191 }
192 } else {
193 // ... and submit the other half in reverse order
194 for (int32_t reverse = numJobs-1; reverse >= 0; --reverse) {
195 JobScheduler::SubmitJob(tasks[reverse]);
196 MaybeYieldThread();
197 }
198 }
199 }
200 completion->FreezePrerequisites();
201
202 JobScheduler::Join(completion);
203
204 for (auto advancement : check.mAdvancements) {
205 EXPECT_TRUE(advancement == numJobs);
206 }
207 }
208
209 } // namespace test_scheduler
210
TEST(Moz2D,JobScheduler_Shutdown)211 TEST(Moz2D, JobScheduler_Shutdown) {
212 srand(time(nullptr));
213 for (uint32_t threads = 1; threads < 16; ++threads) {
214 for (uint32_t i = 1; i < 1000; ++i) {
215 mozilla::gfx::JobScheduler::Init(threads, threads);
216 mozilla::gfx::JobScheduler::ShutDown();
217 }
218 }
219 }
220
TEST(Moz2D,JobScheduler_Join)221 TEST(Moz2D, JobScheduler_Join) {
222 srand(time(nullptr));
223 for (uint32_t threads = 1; threads < 8; ++threads) {
224 for (uint32_t queues = 1; queues < threads; ++queues) {
225 for (uint32_t buffers = 1; buffers < 100; buffers += 3) {
226 mozilla::gfx::JobScheduler::Init(threads, queues);
227 test_scheduler::TestSchedulerJoin(threads, buffers);
228 mozilla::gfx::JobScheduler::ShutDown();
229 }
230 }
231 }
232 }
233
TEST(Moz2D,JobScheduler_Chain)234 TEST(Moz2D, JobScheduler_Chain) {
235 srand(time(nullptr));
236 for (uint32_t threads = 1; threads < 8; ++threads) {
237 for (uint32_t queues = 1; queues < threads; ++queues) {
238 for (uint32_t buffers = 1; buffers < 100; buffers += 3) {
239 mozilla::gfx::JobScheduler::Init(threads, queues);
240 test_scheduler::TestSchedulerChain(threads, buffers);
241 mozilla::gfx::JobScheduler::ShutDown();
242 }
243 }
244 }
245 }
246