1 // Copyright 2019 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "components/viz/service/display_embedder/skia_output_device_buffer_queue.h"
6 
7 #include <stddef.h>
8 #include <stdint.h>
9 
10 #include <set>
11 #include <utility>
12 
13 #include "base/bind_helpers.h"
14 #include "base/memory/ptr_util.h"
15 #include "base/test/bind_test_util.h"
16 #include "build/build_config.h"
17 #include "gpu/command_buffer/service/scheduler.h"
18 
19 #include "components/viz/service/display_embedder/skia_output_surface_dependency_impl.h"
20 #include "components/viz/service/gl/gpu_service_impl.h"
21 #include "components/viz/test/test_gpu_service_holder.h"
22 #include "gpu/command_buffer/common/shared_image_usage.h"
23 #include "testing/gmock/include/gmock/gmock.h"
24 #include "testing/gtest/include/gtest/gtest.h"
25 #include "ui/display/types/display_snapshot.h"
26 #include "ui/gl/gl_surface_stub.h"
27 
28 using ::testing::_;
29 using ::testing::Expectation;
30 using ::testing::Ne;
31 using ::testing::Return;
32 
33 namespace {
34 
35 // These MACRO and TestOnGpu class make it easier to write tests that runs on
36 // GPU Thread
37 // Use TEST_F_GPU instead of TEST_F in the same manner and in your subclass
38 // of TestOnGpu implement SetUpOnMain/SetUpOnGpu and
39 // TearDownOnMain/TearDownOnGpu instead of SetUp and TearDown respectively.
40 //
41 // NOTE: Most likely you need to implement TearDownOnGpu instead of relying on
42 // destructor to ensure that necessary cleanup happens on GPU Thread.
43 
44 // TODO(vasilyt): Extract this for others to use?
45 
46 #define GTEST_TEST_GPU_(test_suite_name, test_name, parent_class, parent_id)  \
47   class GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)                    \
48       : public parent_class {                                                 \
49    public:                                                                    \
50     GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)() {}                   \
51                                                                               \
52    private:                                                                   \
53     virtual void TestBodyOnGpu();                                             \
54     static ::testing::TestInfo* const test_info_ GTEST_ATTRIBUTE_UNUSED_;     \
55     GTEST_DISALLOW_COPY_AND_ASSIGN_(GTEST_TEST_CLASS_NAME_(test_suite_name,   \
56                                                            test_name));       \
57   };                                                                          \
58                                                                               \
59   ::testing::TestInfo* const GTEST_TEST_CLASS_NAME_(test_suite_name,          \
60                                                     test_name)::test_info_ =  \
61       ::testing::internal::MakeAndRegisterTestInfo(                           \
62           #test_suite_name, #test_name, nullptr, nullptr,                     \
63           ::testing::internal::CodeLocation(__FILE__, __LINE__), (parent_id), \
64           ::testing::internal::SuiteApiResolver<                              \
65               parent_class>::GetSetUpCaseOrSuite(__FILE__, __LINE__),         \
66           ::testing::internal::SuiteApiResolver<                              \
67               parent_class>::GetTearDownCaseOrSuite(__FILE__, __LINE__),      \
68           new ::testing::internal::TestFactoryImpl<GTEST_TEST_CLASS_NAME_(    \
69               test_suite_name, test_name)>);                                  \
70   void GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)::TestBodyOnGpu()
71 
72 #define TEST_F_GPU(test_fixture, test_name)              \
73   GTEST_TEST_GPU_(test_fixture, test_name, test_fixture, \
74                   ::testing::internal::GetTypeId<test_fixture>())
75 
76 class TestOnGpu : public ::testing::Test {
77  protected:
TestOnGpu()78   TestOnGpu()
79       : wait_(base::WaitableEvent::ResetPolicy::AUTOMATIC,
80               base::WaitableEvent::InitialState::NOT_SIGNALED) {}
81 
TestBody()82   void TestBody() override {
83     auto callback =
84         base::BindLambdaForTesting([&]() { this->TestBodyOnGpu(); });
85     ScheduleGpuTask(std::move(callback));
86   }
87 
SetUp()88   void SetUp() override {
89     gpu_service_holder_ = viz::TestGpuServiceHolder::GetInstance();
90     SetUpOnMain();
91 
92     auto setup = base::BindLambdaForTesting([&]() { this->SetUpOnGpu(); });
93     ScheduleGpuTask(setup);
94   }
95 
TearDown()96   void TearDown() override {
97     auto teardown =
98         base::BindLambdaForTesting([&]() { this->TearDownOnGpu(); });
99     ScheduleGpuTask(teardown);
100 
101     TearDownOnMain();
102   }
103 
CallOnGpuAndUnblockMain(base::OnceClosure callback)104   void CallOnGpuAndUnblockMain(base::OnceClosure callback) {
105     DCHECK(!wait_.IsSignaled());
106     std::move(callback).Run();
107     wait_.Signal();
108   }
109 
ScheduleGpuTask(base::OnceClosure callback)110   void ScheduleGpuTask(base::OnceClosure callback) {
111     auto wrap = base::BindOnce(&TestOnGpu::CallOnGpuAndUnblockMain,
112                                base::Unretained(this), std::move(callback));
113     gpu_service_holder_->ScheduleGpuTask(std::move(wrap));
114     wait_.Wait();
115   }
116 
SetUpOnMain()117   virtual void SetUpOnMain() {}
SetUpOnGpu()118   virtual void SetUpOnGpu() {}
TearDownOnMain()119   virtual void TearDownOnMain() {}
TearDownOnGpu()120   virtual void TearDownOnGpu() {}
TestBodyOnGpu()121   virtual void TestBodyOnGpu() {}
122 
123   viz::TestGpuServiceHolder* gpu_service_holder_;
124   base::WaitableEvent wait_;
125 };
126 
127 // Here starts SkiaOutputDeviceBufferQueue test related code
128 
129 class MockGLSurfaceAsync : public gl::GLSurfaceStub {
130  public:
SupportsAsyncSwap()131   bool SupportsAsyncSwap() override { return true; }
132 
SwapBuffersAsync(SwapCompletionCallback completion_callback,PresentationCallback presentation_callback)133   void SwapBuffersAsync(SwapCompletionCallback completion_callback,
134                         PresentationCallback presentation_callback) override {
135     callbacks_.push_back(std::move(completion_callback));
136   }
137 
CommitOverlayPlanesAsync(SwapCompletionCallback completion_callback,PresentationCallback presentation_callback)138   void CommitOverlayPlanesAsync(
139       SwapCompletionCallback completion_callback,
140       PresentationCallback presentation_callback) override {
141     callbacks_.push_back(std::move(completion_callback));
142   }
143 
ScheduleOverlayPlane(int z_order,gfx::OverlayTransform transform,gl::GLImage * image,const gfx::Rect & bounds_rect,const gfx::RectF & crop_rect,bool enable_blend,std::unique_ptr<gfx::GpuFence> gpu_fence)144   bool ScheduleOverlayPlane(int z_order,
145                             gfx::OverlayTransform transform,
146                             gl::GLImage* image,
147                             const gfx::Rect& bounds_rect,
148                             const gfx::RectF& crop_rect,
149                             bool enable_blend,
150                             std::unique_ptr<gfx::GpuFence> gpu_fence) override {
151     return true;
152   }
153 
GetOrigin() const154   gfx::SurfaceOrigin GetOrigin() const override {
155     return gfx::SurfaceOrigin::kTopLeft;
156   }
157 
SwapComplete()158   void SwapComplete() {
159     DCHECK(!callbacks_.empty());
160     std::move(callbacks_.front()).Run(gfx::SwapResult::SWAP_ACK, nullptr);
161     callbacks_.pop_front();
162   }
163 
164  protected:
165   ~MockGLSurfaceAsync() override = default;
166   base::circular_deque<SwapCompletionCallback> callbacks_;
167 };
168 
169 class MemoryTrackerStub : public gpu::MemoryTracker {
170  public:
171   MemoryTrackerStub() = default;
172   MemoryTrackerStub(const MemoryTrackerStub&) = delete;
173   MemoryTrackerStub& operator=(const MemoryTrackerStub&) = delete;
~MemoryTrackerStub()174   ~MemoryTrackerStub() override { DCHECK(!size_); }
175 
176   // MemoryTracker implementation:
TrackMemoryAllocatedChange(int64_t delta)177   void TrackMemoryAllocatedChange(int64_t delta) override {
178     DCHECK(delta >= 0 || size_ >= static_cast<uint64_t>(-delta));
179     size_ += delta;
180   }
181 
GetSize() const182   uint64_t GetSize() const override { return size_; }
ClientTracingId() const183   uint64_t ClientTracingId() const override { return client_tracing_id_; }
ClientId() const184   int ClientId() const override {
185     return gpu::ChannelIdFromCommandBufferId(command_buffer_id_);
186   }
ContextGroupTracingId() const187   uint64_t ContextGroupTracingId() const override {
188     return command_buffer_id_.GetUnsafeValue();
189   }
190 
191  private:
192   gpu::CommandBufferId command_buffer_id_;
193   const uint64_t client_tracing_id_ = 0;
194   uint64_t size_ = 0;
195 };
196 
197 }  // namespace
198 
199 namespace viz {
200 
201 class SkiaOutputDeviceBufferQueueTest : public TestOnGpu {
202  public:
SkiaOutputDeviceBufferQueueTest()203   SkiaOutputDeviceBufferQueueTest() {}
204 
SetUpOnMain()205   void SetUpOnMain() override {
206     gpu::SurfaceHandle surface_handle_ = gpu::kNullSurfaceHandle;
207     dependency_ = std::make_unique<SkiaOutputSurfaceDependencyImpl>(
208         gpu_service_holder_->gpu_service(), surface_handle_);
209   }
210 
SetUpOnGpu()211   void SetUpOnGpu() override {
212     gl_surface_ = base::MakeRefCounted<MockGLSurfaceAsync>();
213     memory_tracker_ = std::make_unique<MemoryTrackerStub>();
214 
215     auto present_callback =
216         base::DoNothing::Repeatedly<gpu::SwapBuffersCompleteParams,
217                                     const gfx::Size&>();
218 
219     uint32_t shared_image_usage =
220         gpu::SHARED_IMAGE_USAGE_DISPLAY |
221         gpu::SHARED_IMAGE_USAGE_GLES2_FRAMEBUFFER_HINT;
222 
223     std::unique_ptr<SkiaOutputDeviceBufferQueue> onscreen_device =
224         std::make_unique<SkiaOutputDeviceBufferQueue>(
225             gl_surface_, dependency_.get(), memory_tracker_.get(),
226             present_callback, shared_image_usage);
227 
228     output_device_ = std::move(onscreen_device);
229   }
230 
TearDownOnGpu()231   void TearDownOnGpu() override { output_device_.reset(); }
232 
233   using Image = SkiaOutputDeviceBufferQueue::Image;
234 
images()235   const std::vector<std::unique_ptr<Image>>& images() {
236     return output_device_->images_;
237   }
238 
current_image()239   Image* current_image() { return output_device_->current_image_; }
240 
available_images()241   const base::circular_deque<Image*>& available_images() {
242     return output_device_->available_images_;
243   }
244 
submitted_image()245   Image* submitted_image() { return output_device_->submitted_image_; }
246 
displayed_image()247   Image* displayed_image() { return output_device_->displayed_image_; }
248 
249   base::circular_deque<std::unique_ptr<
250       SkiaOutputDeviceBufferQueue::CancelableSwapCompletionCallback>>&
swap_completion_callbacks()251   swap_completion_callbacks() {
252     return output_device_->swap_completion_callbacks_;
253   }
254 
memory_tracker()255   const gpu::MemoryTracker& memory_tracker() { return *memory_tracker_; }
256 
CountBuffers()257   int CountBuffers() {
258     int n = available_images().size() + swap_completion_callbacks().size();
259 
260     if (displayed_image())
261       n++;
262     if (current_image())
263       n++;
264     return n;
265   }
266 
CheckUnique()267   void CheckUnique() {
268     std::set<Image*> images;
269     for (auto* image : available_images())
270       images.insert(image);
271 
272     if (displayed_image())
273       images.insert(displayed_image());
274 
275     if (current_image())
276       images.insert(current_image());
277 
278     EXPECT_EQ(images.size() + swap_completion_callbacks().size(),
279               (size_t)CountBuffers());
280   }
281 
PaintAndSchedulePrimaryPlane()282   Image* PaintAndSchedulePrimaryPlane() {
283     // Call Begin/EndPaint to ensusre the image is initialized before use.
284     std::vector<GrBackendSemaphore> end_semaphores;
285     output_device_->BeginPaint(&end_semaphores);
286     output_device_->EndPaint();
287     SchedulePrimaryPlane();
288     return current_image();
289   }
290 
SchedulePrimaryPlane()291   void SchedulePrimaryPlane() {
292     output_device_->SchedulePrimaryPlane(
293         OverlayProcessorInterface::OutputSurfaceOverlayPlane());
294   }
295 
SwapBuffers()296   void SwapBuffers() {
297     auto present_callback =
298         base::DoNothing::Once<const gfx::PresentationFeedback&>();
299 
300     output_device_->SwapBuffers(std::move(present_callback),
301                                 std::vector<ui::LatencyInfo>());
302   }
303 
CommitOverlayPlanes()304   void CommitOverlayPlanes() {
305     auto present_callback =
306         base::DoNothing::Once<const gfx::PresentationFeedback&>();
307 
308     output_device_->CommitOverlayPlanes(std::move(present_callback),
309                                         std::vector<ui::LatencyInfo>());
310   }
311 
PageFlipComplete()312   void PageFlipComplete() { gl_surface_->SwapComplete(); }
313 
314  protected:
315   std::unique_ptr<SkiaOutputSurfaceDependency> dependency_;
316   scoped_refptr<MockGLSurfaceAsync> gl_surface_;
317   std::unique_ptr<MemoryTrackerStub> memory_tracker_;
318   std::unique_ptr<SkiaOutputDeviceBufferQueue> output_device_;
319 };
320 
321 namespace {
322 
323 const gfx::Size screen_size = gfx::Size(30, 30);
324 
325 const gfx::BufferFormat kDefaultFormat = gfx::BufferFormat::RGBA_8888;
326 
TEST_F_GPU(SkiaOutputDeviceBufferQueueTest,MultipleGetCurrentBufferCalls)327 TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, MultipleGetCurrentBufferCalls) {
328   // Check that multiple bind calls do not create or change surfaces.
329 
330   output_device_->Reshape(screen_size, 1.0f, gfx::ColorSpace(), kDefaultFormat,
331                           gfx::OVERLAY_TRANSFORM_NONE);
332   EXPECT_NE(0U, memory_tracker().GetSize());
333   EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
334   EXPECT_NE(0U, memory_tracker().GetSize());
335   EXPECT_EQ(3, CountBuffers());
336   auto* fb = current_image();
337   EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
338   EXPECT_NE(0U, memory_tracker().GetSize());
339   EXPECT_EQ(3, CountBuffers());
340   EXPECT_EQ(fb, current_image());
341 }
342 
TEST_F_GPU(SkiaOutputDeviceBufferQueueTest,CheckDoubleBuffering)343 TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, CheckDoubleBuffering) {
344   // Check buffer flow through double buffering path.
345   output_device_->Reshape(screen_size, 1.0f, gfx::ColorSpace(), kDefaultFormat,
346                           gfx::OVERLAY_TRANSFORM_NONE);
347   EXPECT_NE(0U, memory_tracker().GetSize());
348   EXPECT_EQ(3, CountBuffers());
349 
350   EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
351   EXPECT_NE(0U, memory_tracker().GetSize());
352   EXPECT_EQ(3, CountBuffers());
353   EXPECT_NE(current_image(), nullptr);
354   EXPECT_FALSE(displayed_image());
355   SwapBuffers();
356   EXPECT_EQ(1U, swap_completion_callbacks().size());
357   PageFlipComplete();
358   EXPECT_EQ(0U, swap_completion_callbacks().size());
359   EXPECT_TRUE(displayed_image());
360   EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
361   EXPECT_NE(0U, memory_tracker().GetSize());
362   EXPECT_EQ(3, CountBuffers());
363   CheckUnique();
364   EXPECT_NE(current_image(), nullptr);
365   EXPECT_EQ(0U, swap_completion_callbacks().size());
366   EXPECT_TRUE(displayed_image());
367   SwapBuffers();
368   CheckUnique();
369   EXPECT_EQ(1U, swap_completion_callbacks().size());
370   EXPECT_TRUE(displayed_image());
371 
372   PageFlipComplete();
373   CheckUnique();
374   EXPECT_EQ(0U, swap_completion_callbacks().size());
375   EXPECT_EQ(2U, available_images().size());
376   EXPECT_TRUE(displayed_image());
377   EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
378   EXPECT_NE(0U, memory_tracker().GetSize());
379   EXPECT_EQ(3, CountBuffers());
380   CheckUnique();
381   EXPECT_EQ(1u, available_images().size());
382 }
383 
TEST_F_GPU(SkiaOutputDeviceBufferQueueTest,CheckTripleBuffering)384 TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, CheckTripleBuffering) {
385   // Check buffer flow through triple buffering path.
386   output_device_->Reshape(screen_size, 1.0f, gfx::ColorSpace(), kDefaultFormat,
387                           gfx::OVERLAY_TRANSFORM_NONE);
388   EXPECT_NE(0U, memory_tracker().GetSize());
389 
390   // This bit is the same sequence tested in the doublebuffering case.
391   EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
392   EXPECT_FALSE(displayed_image());
393   SwapBuffers();
394   PageFlipComplete();
395   EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
396   SwapBuffers();
397 
398   EXPECT_NE(0U, memory_tracker().GetSize());
399   EXPECT_EQ(3, CountBuffers());
400   CheckUnique();
401   EXPECT_EQ(1U, swap_completion_callbacks().size());
402   EXPECT_TRUE(displayed_image());
403   EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
404   EXPECT_NE(0U, memory_tracker().GetSize());
405   EXPECT_EQ(3, CountBuffers());
406   CheckUnique();
407   EXPECT_NE(current_image(), nullptr);
408   EXPECT_EQ(1U, swap_completion_callbacks().size());
409   EXPECT_TRUE(displayed_image());
410   PageFlipComplete();
411   EXPECT_EQ(3, CountBuffers());
412   CheckUnique();
413   EXPECT_NE(current_image(), nullptr);
414   EXPECT_EQ(0U, swap_completion_callbacks().size());
415   EXPECT_TRUE(displayed_image());
416   EXPECT_EQ(1U, available_images().size());
417 }
418 
TEST_F_GPU(SkiaOutputDeviceBufferQueueTest,CheckEmptySwap)419 TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, CheckEmptySwap) {
420   // Check empty swap flow, in which the damage is empty and BindFramebuffer
421   // might not be called.
422   output_device_->Reshape(screen_size, 1.0f, gfx::ColorSpace(), kDefaultFormat,
423                           gfx::OVERLAY_TRANSFORM_NONE);
424 
425   EXPECT_EQ(3, CountBuffers());
426   EXPECT_NE(0U, memory_tracker().GetSize());
427   auto* image = PaintAndSchedulePrimaryPlane();
428   EXPECT_NE(image, nullptr);
429   EXPECT_NE(0U, memory_tracker().GetSize());
430   EXPECT_EQ(3, CountBuffers());
431   EXPECT_NE(current_image(), nullptr);
432   EXPECT_FALSE(displayed_image());
433 
434   SwapBuffers();
435   // Make sure we won't be drawing to the texture we just sent for scanout.
436   auto* new_image = PaintAndSchedulePrimaryPlane();
437   EXPECT_NE(new_image, nullptr);
438   EXPECT_NE(image, new_image);
439 
440   EXPECT_EQ(1U, swap_completion_callbacks().size());
441   PageFlipComplete();
442 
443   // Test CommitOverlayPlanes without calling BeginPaint/EndPaint (i.e without
444   // PaintAndSchedulePrimaryPlane)
445   SwapBuffers();
446   EXPECT_EQ(1U, swap_completion_callbacks().size());
447 
448   // Schedule the primary plane without drawing.
449   SchedulePrimaryPlane();
450 
451   PageFlipComplete();
452   EXPECT_EQ(0U, swap_completion_callbacks().size());
453 
454   EXPECT_EQ(current_image(), nullptr);
455   CommitOverlayPlanes();
456   EXPECT_EQ(1U, swap_completion_callbacks().size());
457   PageFlipComplete();
458   EXPECT_EQ(0U, swap_completion_callbacks().size());
459 }
460 
TEST_F_GPU(SkiaOutputDeviceBufferQueueTest,CheckCorrectBufferOrdering)461 TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, CheckCorrectBufferOrdering) {
462   output_device_->Reshape(screen_size, 1.0f, gfx::ColorSpace(), kDefaultFormat,
463                           gfx::OVERLAY_TRANSFORM_NONE);
464   const size_t kSwapCount = 5;
465 
466   EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
467   for (size_t i = 0; i < kSwapCount; ++i) {
468     SwapBuffers();
469     EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
470     PageFlipComplete();
471   }
472 
473   // Note: this must be three, not kSwapCount
474   EXPECT_EQ(3, CountBuffers());
475 
476   for (size_t i = 0; i < kSwapCount; ++i) {
477     auto* next_image = current_image();
478     SwapBuffers();
479     EXPECT_EQ(current_image(), nullptr);
480     EXPECT_EQ(1U, swap_completion_callbacks().size());
481     PageFlipComplete();
482     EXPECT_EQ(displayed_image(), next_image);
483     EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
484   }
485 }
486 
TEST_F_GPU(SkiaOutputDeviceBufferQueueTest,ReshapeWithInFlightSurfaces)487 TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, ReshapeWithInFlightSurfaces) {
488   output_device_->Reshape(screen_size, 1.0f, gfx::ColorSpace(), kDefaultFormat,
489                           gfx::OVERLAY_TRANSFORM_NONE);
490 
491   const size_t kSwapCount = 5;
492 
493   EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
494   for (size_t i = 0; i < kSwapCount; ++i) {
495     SwapBuffers();
496     EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
497     PageFlipComplete();
498   }
499 
500   SwapBuffers();
501 
502   output_device_->Reshape(screen_size, 1.0f, gfx::ColorSpace(), kDefaultFormat,
503                           gfx::OVERLAY_TRANSFORM_NONE);
504 
505   // swap completion callbacks should not be cleared.
506   EXPECT_EQ(1u, swap_completion_callbacks().size());
507 
508   PageFlipComplete();
509   EXPECT_FALSE(displayed_image());
510 
511   // The dummy surfacess left should be discarded.
512   EXPECT_EQ(3u, available_images().size());
513 
514   // Test swap after reshape
515   EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
516   SwapBuffers();
517   PageFlipComplete();
518   EXPECT_NE(displayed_image(), nullptr);
519 }
520 
TEST_F_GPU(SkiaOutputDeviceBufferQueueTest,BufferIsInOrder)521 TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, BufferIsInOrder) {
522   output_device_->Reshape(screen_size, 1.0f, gfx::ColorSpace(), kDefaultFormat,
523                           gfx::OVERLAY_TRANSFORM_NONE);
524   EXPECT_EQ(3u, available_images().size());
525 
526   int current_index = -1;
527   int submitted_index = -1;
528   int displayed_index = -1;
529 
530   EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
531   ++current_index;
532   EXPECT_EQ(current_image(), images()[current_index % 3].get());
533   EXPECT_EQ(submitted_image(), submitted_index < 0
534                                    ? nullptr
535                                    : images()[submitted_index % 3].get());
536   EXPECT_EQ(displayed_image(), displayed_index < 0
537                                    ? nullptr
538                                    : images()[displayed_index % 3].get());
539 
540   SwapBuffers();
541   ++submitted_index;
542   EXPECT_EQ(current_image(), nullptr);
543   EXPECT_EQ(submitted_image(), submitted_index < 0
544                                    ? nullptr
545                                    : images()[submitted_index % 3].get());
546   EXPECT_EQ(displayed_image(), displayed_index < 0
547                                    ? nullptr
548                                    : images()[displayed_index % 3].get());
549 
550   const size_t kSwapCount = 10;
551   for (size_t i = 0; i < kSwapCount; ++i) {
552     EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
553     ++current_index;
554     EXPECT_EQ(current_image(), images()[current_index % 3].get());
555     EXPECT_EQ(submitted_image(), submitted_index < 0
556                                      ? nullptr
557                                      : images()[submitted_index % 3].get());
558     EXPECT_EQ(displayed_image(), displayed_index < 0
559                                      ? nullptr
560                                      : images()[displayed_index % 3].get());
561 
562     SwapBuffers();
563     ++submitted_index;
564     EXPECT_EQ(current_image(), nullptr);
565     EXPECT_EQ(submitted_image(), submitted_index < 0
566                                      ? nullptr
567                                      : images()[submitted_index % 3].get());
568     EXPECT_EQ(displayed_image(), displayed_index < 0
569                                      ? nullptr
570                                      : images()[displayed_index % 3].get());
571 
572     PageFlipComplete();
573     ++displayed_index;
574     EXPECT_EQ(current_image(), nullptr);
575     EXPECT_EQ(submitted_image(), submitted_index < 0
576                                      ? nullptr
577                                      : images()[submitted_index % 3].get());
578     EXPECT_EQ(displayed_image(), displayed_index < 0
579                                      ? nullptr
580                                      : images()[displayed_index % 3].get());
581   }
582 
583   PageFlipComplete();
584   ++displayed_index;
585   EXPECT_EQ(current_image(), nullptr);
586   EXPECT_EQ(submitted_image(), submitted_index < 0
587                                    ? nullptr
588                                    : images()[submitted_index % 3].get());
589   EXPECT_EQ(displayed_image(), displayed_index < 0
590                                    ? nullptr
591                                    : images()[displayed_index % 3].get());
592 }
593 
594 }  // namespace
595 }  // namespace viz
596