1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "components/viz/service/display/display.h"
6 
7 #include <stddef.h>
8 #include <algorithm>
9 #include <limits>
10 #include <utility>
11 
12 #include "base/debug/dump_without_crashing.h"
13 #include "base/metrics/histogram_macros.h"
14 #include "base/optional.h"
15 #include "base/stl_util.h"
16 #include "base/timer/elapsed_timer.h"
17 #include "base/trace_event/trace_event.h"
18 #include "build/build_config.h"
19 #include "cc/base/region.h"
20 #include "cc/base/simple_enclosed_region.h"
21 #include "cc/benchmarks/benchmark_instrumentation.h"
22 #include "components/viz/common/display/renderer_settings.h"
23 #include "components/viz/common/features.h"
24 #include "components/viz/common/frame_sinks/begin_frame_source.h"
25 #include "components/viz/common/quads/compositor_frame.h"
26 #include "components/viz/common/quads/draw_quad.h"
27 #include "components/viz/common/quads/shared_quad_state.h"
28 #include "components/viz/common/viz_utils.h"
29 #include "components/viz/service/display/aggregated_frame.h"
30 #include "components/viz/service/display/damage_frame_annotator.h"
31 #include "components/viz/service/display/direct_renderer.h"
32 #include "components/viz/service/display/display_client.h"
33 #include "components/viz/service/display/display_scheduler.h"
34 #include "components/viz/service/display/gl_renderer.h"
35 #include "components/viz/service/display/output_surface.h"
36 #include "components/viz/service/display/renderer_utils.h"
37 #include "components/viz/service/display/skia_output_surface.h"
38 #include "components/viz/service/display/skia_renderer.h"
39 #include "components/viz/service/display/software_renderer.h"
40 #include "components/viz/service/display/surface_aggregator.h"
41 #include "components/viz/service/surfaces/surface.h"
42 #include "components/viz/service/surfaces/surface_manager.h"
43 #include "gpu/command_buffer/client/context_support.h"
44 #include "gpu/command_buffer/client/gles2_interface.h"
45 #include "gpu/ipc/scheduler_sequence.h"
46 #include "services/viz/public/mojom/compositing/compositor_frame_sink.mojom.h"
47 #include "third_party/perfetto/protos/perfetto/trace/track_event/chrome_latency_info.pbzero.h"
48 #include "ui/gfx/buffer_types.h"
49 #include "ui/gfx/geometry/rect_conversions.h"
50 #include "ui/gfx/overlay_transform_utils.h"
51 #include "ui/gfx/presentation_feedback.h"
52 #include "ui/gfx/swap_result.h"
53 
54 #if defined(OS_ANDROID)
55 #include "ui/gfx/android/android_surface_control_compat.h"
56 #endif
57 namespace viz {
58 
59 namespace {
60 
61 enum class TypeOfVideoInFrame {
62   kNoVideo = 0,
63   kVideo = 1,
64 
65   // This should be the last entry/largest value above.
66   kMaxValue = kVideo,
67 };
68 
69 const DrawQuad::Material kNonSplittableMaterials[] = {
70     // Exclude debug quads from quad splitting
71     DrawQuad::Material::kDebugBorder,
72     // Exclude possible overlay candidates from quad splitting
73     // See OverlayCandidate::FromDrawQuad
74     DrawQuad::Material::kStreamVideoContent,
75     DrawQuad::Material::kTextureContent,
76     DrawQuad::Material::kVideoHole,
77     // See DCLayerOverlayProcessor::ProcessRenderPass
78     DrawQuad::Material::kYuvVideoContent,
79 };
80 
81 constexpr base::TimeDelta kAllowedDeltaFromFuture =
82     base::TimeDelta::FromMilliseconds(16);
83 
84 // Assign each Display instance a starting value for the the display-trace id,
85 // so that multiple Displays all don't start at 0, because that makes it
86 // difficult to associate the trace-events with the particular displays.
GetStartingTraceId()87 int64_t GetStartingTraceId() {
88   static int64_t client = 0;
89   // https://crbug.com/956695
90   return ((++client & 0xffff) << 16);
91 }
92 
SanitizePresentationFeedback(const gfx::PresentationFeedback & feedback,base::TimeTicks draw_time)93 gfx::PresentationFeedback SanitizePresentationFeedback(
94     const gfx::PresentationFeedback& feedback,
95     base::TimeTicks draw_time) {
96   // Temporary to investigate large presentation times.
97   // https://crbug.com/894440
98   DCHECK(!draw_time.is_null());
99   if (feedback.timestamp.is_null())
100     return feedback;
101 
102   // If the presentation-timestamp is from the future, or from the past (i.e.
103   // before swap-time), then invalidate the feedback. Also report how far into
104   // the future (or from the past) the timestamps are.
105   // https://crbug.com/894440
106   const auto now = base::TimeTicks::Now();
107   // The timestamp for the presentation feedback may have a different source and
108   // therefore the timestamp can be slightly in the future in comparison with
109   // base::TimeTicks::Now(). Such presentation feedbacks should not be rejected.
110   // See https://crbug.com/1040178
111   // Sometimes we snap the feedback's time stamp to the nearest vsync, and that
112   // can be offset by one vsync-internal. These feedback has kVSync set.
113   const auto allowed_delta_from_future =
114       ((feedback.flags & (gfx::PresentationFeedback::kHWClock |
115                           gfx::PresentationFeedback::kVSync)) != 0)
116           ? kAllowedDeltaFromFuture
117           : base::TimeDelta();
118   if (feedback.timestamp > now + allowed_delta_from_future) {
119     const auto diff = feedback.timestamp - now;
120     UMA_HISTOGRAM_MEDIUM_TIMES(
121         "Graphics.PresentationTimestamp.InvalidFromFuture", diff);
122     return gfx::PresentationFeedback::Failure();
123   }
124 
125   if (feedback.timestamp < draw_time) {
126     const auto diff = draw_time - feedback.timestamp;
127     UMA_HISTOGRAM_MEDIUM_TIMES(
128         "Graphics.PresentationTimestamp.InvalidBeforeSwap", diff);
129     return gfx::PresentationFeedback::Failure();
130   }
131 
132   const auto difference = feedback.timestamp - draw_time;
133   if (difference.InMinutes() > 3) {
134     UMA_HISTOGRAM_CUSTOM_TIMES(
135         "Graphics.PresentationTimestamp.LargePresentationDelta", difference,
136         base::TimeDelta::FromMinutes(3), base::TimeDelta::FromHours(1), 50);
137   }
138   return feedback;
139 }
140 
141 // Returns the bounds for the largest rect that can be inscribed in a rounded
142 // rect.
GetOccludingRectForRRectF(const gfx::RRectF & bounds)143 gfx::RectF GetOccludingRectForRRectF(const gfx::RRectF& bounds) {
144   if (bounds.IsEmpty())
145     return gfx::RectF();
146   if (bounds.GetType() == gfx::RRectF::Type::kRect)
147     return bounds.rect();
148   gfx::RectF occluding_rect = bounds.rect();
149 
150   // Compute the radius for each corner
151   float top_left = bounds.GetCornerRadii(gfx::RRectF::Corner::kUpperLeft).x();
152   float top_right = bounds.GetCornerRadii(gfx::RRectF::Corner::kUpperRight).x();
153   float lower_right =
154       bounds.GetCornerRadii(gfx::RRectF::Corner::kLowerRight).x();
155   float lower_left = bounds.GetCornerRadii(gfx::RRectF::Corner::kLowerLeft).x();
156 
157   // Get a bounding rect that does not intersect with the rounding clip.
158   // When a rect has rounded corner with radius r, then the largest rect that
159   // can be inscribed inside it has an inset of |((2 - sqrt(2)) / 2) * radius|.
160   occluding_rect.Inset(std::max(top_left, lower_left) * 0.3f,
161                        std::max(top_left, top_right) * 0.3f,
162                        std::max(top_right, lower_right) * 0.3f,
163                        std::max(lower_right, lower_left) * 0.3f);
164   return occluding_rect;
165 }
166 
167 // SkRegion uses INT_MAX as a sentinel. Reduce gfx::Rect values when they are
168 // equal to INT_MAX to prevent conversion to an empty region.
SafeConvertRectForRegion(const gfx::Rect & r)169 gfx::Rect SafeConvertRectForRegion(const gfx::Rect& r) {
170   gfx::Rect safe_rect(r);
171   if (safe_rect.x() == INT_MAX)
172     safe_rect.set_x(INT_MAX - 1);
173   if (safe_rect.y() == INT_MAX)
174     safe_rect.set_y(INT_MAX - 1);
175   if (safe_rect.width() == INT_MAX)
176     safe_rect.set_width(INT_MAX - 1);
177   if (safe_rect.height() == INT_MAX)
178     safe_rect.set_height(INT_MAX - 1);
179   return safe_rect;
180 }
181 
182 // Decides whether or not a DrawQuad should be split into a more complex visible
183 // region in order to avoid overdraw.
CanSplitQuad(const DrawQuad::Material m,const std::vector<gfx::Rect> & visible_region_rects,const gfx::Size & visible_region_bounding_size,int minimum_fragments_reduced,const float device_scale_factor)184 bool CanSplitQuad(const DrawQuad::Material m,
185                   const std::vector<gfx::Rect>& visible_region_rects,
186                   const gfx::Size& visible_region_bounding_size,
187                   int minimum_fragments_reduced,
188                   const float device_scale_factor) {
189   if (base::Contains(kNonSplittableMaterials, m))
190     return false;
191 
192   base::CheckedNumeric<int> area = 0;
193   for (const auto& r : visible_region_rects) {
194     area += r.size().GetCheckedArea();
195     // In calculations below, assume false if this addition overflows.
196     if (!area.IsValid()) {
197       return false;
198     }
199   }
200 
201   base::CheckedNumeric<int> visible_region_bounding_area =
202       visible_region_bounding_size.GetCheckedArea();
203   if (!visible_region_bounding_area.IsValid()) {
204     // In calculations below, assume true if this overflows.
205     return true;
206   }
207 
208   area = visible_region_bounding_area - area;
209   if (!area.IsValid()) {
210     // In calculations below, assume false if this subtraction underflows.
211     return false;
212   }
213 
214   int int_area = area.ValueOrDie();
215   return int_area * device_scale_factor * device_scale_factor >
216          minimum_fragments_reduced;
217 }
218 
219 // Attempts to consolidate rectangles that were only split because of the
220 // nature of base::Region and transforms the region into a list of visible
221 // rectangles. Returns true upon successful reduction of the region to under
222 // |complexity_limit|, false otherwise.
ReduceComplexity(const cc::Region & region,size_t complexity_limit,std::vector<gfx::Rect> * reduced_region)223 bool ReduceComplexity(const cc::Region& region,
224                       size_t complexity_limit,
225                       std::vector<gfx::Rect>* reduced_region) {
226   DCHECK(reduced_region);
227 
228   reduced_region->clear();
229   for (const gfx::Rect& r : region) {
230     auto it =
231         std::find_if(reduced_region->begin(), reduced_region->end(),
232                      [&r](const gfx::Rect& a) { return a.SharesEdgeWith(r); });
233     if (it != reduced_region->end()) {
234       it->Union(r);
235       continue;
236     }
237     reduced_region->push_back(r);
238 
239     if (reduced_region->size() >= complexity_limit)
240       return false;
241   }
242   return true;
243 }
244 
SupportsSetFrameRate(const OutputSurface * output_surface)245 bool SupportsSetFrameRate(const OutputSurface* output_surface) {
246 #if defined(OS_ANDROID)
247   return output_surface->capabilities().supports_surfaceless &&
248          gfx::SurfaceControl::SupportsSetFrameRate();
249 #elif defined(OS_WIN)
250   return output_surface->capabilities().supports_dc_layers &&
251          features::ShouldUseSetPresentDuration();
252 #endif
253   return false;
254 }
255 
256 }  // namespace
257 
258 constexpr base::TimeDelta Display::kDrawToSwapMin;
259 constexpr base::TimeDelta Display::kDrawToSwapMax;
260 
261 Display::PresentationGroupTiming::PresentationGroupTiming() = default;
262 
263 Display::PresentationGroupTiming::PresentationGroupTiming(
264     Display::PresentationGroupTiming&& other) = default;
265 
266 Display::PresentationGroupTiming::~PresentationGroupTiming() = default;
267 
AddPresentationHelper(std::unique_ptr<Surface::PresentationHelper> helper)268 void Display::PresentationGroupTiming::AddPresentationHelper(
269     std::unique_ptr<Surface::PresentationHelper> helper) {
270   presentation_helpers_.push_back(std::move(helper));
271 }
272 
OnDraw(base::TimeTicks draw_start_timestamp)273 void Display::PresentationGroupTiming::OnDraw(
274     base::TimeTicks draw_start_timestamp) {
275   draw_start_timestamp_ = draw_start_timestamp;
276 }
277 
OnSwap(gfx::SwapTimings timings)278 void Display::PresentationGroupTiming::OnSwap(gfx::SwapTimings timings) {
279   swap_timings_ = timings;
280 }
281 
OnPresent(const gfx::PresentationFeedback & feedback)282 void Display::PresentationGroupTiming::OnPresent(
283     const gfx::PresentationFeedback& feedback) {
284   for (auto& presentation_helper : presentation_helpers_) {
285     presentation_helper->DidPresent(draw_start_timestamp_, swap_timings_,
286                                     feedback);
287   }
288 }
289 
Display(SharedBitmapManager * bitmap_manager,const RendererSettings & settings,const DebugRendererSettings * debug_settings,const FrameSinkId & frame_sink_id,std::unique_ptr<DisplayCompositorMemoryAndTaskController> gpu_dependency,std::unique_ptr<OutputSurface> output_surface,std::unique_ptr<OverlayProcessorInterface> overlay_processor,std::unique_ptr<DisplaySchedulerBase> scheduler,scoped_refptr<base::SingleThreadTaskRunner> current_task_runner)290 Display::Display(
291     SharedBitmapManager* bitmap_manager,
292     const RendererSettings& settings,
293     const DebugRendererSettings* debug_settings,
294     const FrameSinkId& frame_sink_id,
295     std::unique_ptr<DisplayCompositorMemoryAndTaskController> gpu_dependency,
296     std::unique_ptr<OutputSurface> output_surface,
297     std::unique_ptr<OverlayProcessorInterface> overlay_processor,
298     std::unique_ptr<DisplaySchedulerBase> scheduler,
299     scoped_refptr<base::SingleThreadTaskRunner> current_task_runner)
300     : bitmap_manager_(bitmap_manager),
301       settings_(settings),
302       debug_settings_(debug_settings),
303       frame_sink_id_(frame_sink_id),
304       gpu_dependency_(std::move(gpu_dependency)),
305       output_surface_(std::move(output_surface)),
306       skia_output_surface_(output_surface_->AsSkiaOutputSurface()),
307       scheduler_(std::move(scheduler)),
308       current_task_runner_(std::move(current_task_runner)),
309       overlay_processor_(std::move(overlay_processor)),
310       swapped_trace_id_(GetStartingTraceId()),
311       last_swap_ack_trace_id_(swapped_trace_id_),
312       last_presented_trace_id_(swapped_trace_id_) {
313   DCHECK(output_surface_);
314   DCHECK(frame_sink_id_.is_valid());
315   if (scheduler_)
316     scheduler_->SetClient(this);
317 }
318 
~Display()319 Display::~Display() {
320 #if DCHECK_IS_ON()
321   allow_schedule_gpu_task_during_destruction_.reset(
322       new gpu::ScopedAllowScheduleGpuTask);
323 #endif
324   if (resource_provider_) {
325     resource_provider_->SetAllowAccessToGPUThread(true);
326   }
327 #if defined(OS_ANDROID)
328   // In certain cases, drivers hang when tearing down the display. Finishing
329   // before teardown appears to address this. As we're during display teardown,
330   // an additional finish should have minimal impact.
331   // TODO(ericrk): Add a more robust workaround. crbug.com/899705
332   if (auto* context = output_surface_->context_provider()) {
333     context->ContextGL()->Finish();
334   }
335 #endif
336 
337   if (no_pending_swaps_callback_)
338     std::move(no_pending_swaps_callback_).Run();
339 
340   for (auto& observer : observers_)
341     observer.OnDisplayDestroyed();
342   observers_.Clear();
343 
344   // Send gfx::PresentationFeedback::Failure() to any surfaces expecting
345   // feedback.
346   pending_presentation_group_timings_.clear();
347 
348   // Only do this if Initialize() happened.
349   if (client_) {
350     if (auto* context = output_surface_->context_provider())
351       context->RemoveObserver(this);
352     if (skia_output_surface_)
353       skia_output_surface_->RemoveContextLostObserver(this);
354   }
355 
356   // Un-register as DisplaySchedulerClient to prevent us from being called in a
357   // partially destructed state.
358   if (scheduler_)
359     scheduler_->SetClient(nullptr);
360 
361   if (damage_tracker_)
362     damage_tracker_->RunDrawCallbacks();
363 }
364 
Initialize(DisplayClient * client,SurfaceManager * surface_manager,bool enable_shared_images,bool hw_support_for_multiple_refresh_rates,size_t num_of_frames_to_toggle_interval)365 void Display::Initialize(DisplayClient* client,
366                          SurfaceManager* surface_manager,
367                          bool enable_shared_images,
368                          bool hw_support_for_multiple_refresh_rates,
369                          size_t num_of_frames_to_toggle_interval) {
370   DCHECK(client);
371   DCHECK(surface_manager);
372   gpu::ScopedAllowScheduleGpuTask allow_schedule_gpu_task;
373   client_ = client;
374   surface_manager_ = surface_manager;
375 
376   output_surface_->BindToClient(this);
377   if (output_surface_->software_device())
378     output_surface_->software_device()->BindToClient(this);
379 
380   frame_rate_decider_ = std::make_unique<FrameRateDecider>(
381       surface_manager_, this, hw_support_for_multiple_refresh_rates,
382       SupportsSetFrameRate(output_surface_.get()),
383       num_of_frames_to_toggle_interval);
384 
385   InitializeRenderer(enable_shared_images);
386 
387   damage_tracker_ = std::make_unique<DisplayDamageTracker>(surface_manager_,
388                                                            aggregator_.get());
389   if (scheduler_)
390     scheduler_->SetDamageTracker(damage_tracker_.get());
391 
392   // This depends on assumptions that Display::Initialize will happen on the
393   // same callstack as the ContextProvider being created/initialized or else
394   // it could miss a callback before setting this.
395   if (auto* context = output_surface_->context_provider())
396     context->AddObserver(this);
397 
398   if (skia_output_surface_)
399     skia_output_surface_->AddContextLostObserver(this);
400 }
401 
AddObserver(DisplayObserver * observer)402 void Display::AddObserver(DisplayObserver* observer) {
403   observers_.AddObserver(observer);
404 }
405 
RemoveObserver(DisplayObserver * observer)406 void Display::RemoveObserver(DisplayObserver* observer) {
407   observers_.RemoveObserver(observer);
408 }
409 
SetLocalSurfaceId(const LocalSurfaceId & id,float device_scale_factor)410 void Display::SetLocalSurfaceId(const LocalSurfaceId& id,
411                                 float device_scale_factor) {
412   if (current_surface_id_.local_surface_id() == id &&
413       device_scale_factor_ == device_scale_factor) {
414     return;
415   }
416 
417   TRACE_EVENT0("viz", "Display::SetSurfaceId");
418   current_surface_id_ = SurfaceId(frame_sink_id_, id);
419   device_scale_factor_ = device_scale_factor;
420 
421   damage_tracker_->SetNewRootSurface(current_surface_id_);
422 }
423 
SetVisible(bool visible)424 void Display::SetVisible(bool visible) {
425   TRACE_EVENT1("viz", "Display::SetVisible", "visible", visible);
426   if (renderer_)
427     renderer_->SetVisible(visible);
428   if (scheduler_)
429     scheduler_->SetVisible(visible);
430   visible_ = visible;
431 
432   if (!visible) {
433     // Damage tracker needs a full reset as renderer resources are dropped when
434     // not visible.
435     if (aggregator_ && current_surface_id_.is_valid())
436       aggregator_->SetFullDamageForSurface(current_surface_id_);
437   }
438 }
439 
Resize(const gfx::Size & size)440 void Display::Resize(const gfx::Size& size) {
441   disable_swap_until_resize_ = false;
442 
443   if (size == current_surface_size_)
444     return;
445 
446   // This DCHECK should probably go at the top of the function, but mac
447   // sometimes calls Resize() with 0x0 before it sets a real size. This will
448   // early out before the DCHECK fails.
449   DCHECK(!size.IsEmpty());
450   TRACE_EVENT0("viz", "Display::Resize");
451 
452   swapped_since_resize_ = false;
453   current_surface_size_ = size;
454 
455   damage_tracker_->DisplayResized();
456 }
457 
DisableSwapUntilResize(base::OnceClosure no_pending_swaps_callback)458 void Display::DisableSwapUntilResize(
459     base::OnceClosure no_pending_swaps_callback) {
460   TRACE_EVENT0("viz", "Display::DisableSwapUntilResize");
461   DCHECK(no_pending_swaps_callback_.is_null());
462 
463   if (!disable_swap_until_resize_) {
464     DCHECK(scheduler_);
465 
466     if (!swapped_since_resize_)
467       scheduler_->ForceImmediateSwapIfPossible();
468 
469     if (no_pending_swaps_callback && pending_swaps_ > 0 &&
470         (output_surface_->context_provider() ||
471          output_surface_->AsSkiaOutputSurface())) {
472       no_pending_swaps_callback_ = std::move(no_pending_swaps_callback);
473     }
474 
475     disable_swap_until_resize_ = true;
476   }
477 
478   // There are no pending swaps for current size so immediately run callback.
479   if (no_pending_swaps_callback)
480     std::move(no_pending_swaps_callback).Run();
481 }
482 
SetColorMatrix(const SkMatrix44 & matrix)483 void Display::SetColorMatrix(const SkMatrix44& matrix) {
484   if (output_surface_)
485     output_surface_->set_color_matrix(matrix);
486 
487   // Force a redraw.
488   if (aggregator_) {
489     if (current_surface_id_.is_valid())
490       aggregator_->SetFullDamageForSurface(current_surface_id_);
491   }
492 
493   damage_tracker_->SetRootSurfaceDamaged();
494 }
495 
SetDisplayColorSpaces(const gfx::DisplayColorSpaces & display_color_spaces)496 void Display::SetDisplayColorSpaces(
497     const gfx::DisplayColorSpaces& display_color_spaces) {
498   display_color_spaces_ = display_color_spaces;
499   if (aggregator_)
500     aggregator_->SetDisplayColorSpaces(display_color_spaces_);
501 }
502 
SetOutputIsSecure(bool secure)503 void Display::SetOutputIsSecure(bool secure) {
504   if (secure == output_is_secure_)
505     return;
506   output_is_secure_ = secure;
507 
508   if (aggregator_) {
509     aggregator_->set_output_is_secure(secure);
510     // Force a redraw.
511     if (current_surface_id_.is_valid())
512       aggregator_->SetFullDamageForSurface(current_surface_id_);
513   }
514 }
515 
InitializeRenderer(bool enable_shared_images)516 void Display::InitializeRenderer(bool enable_shared_images) {
517   bool uses_gpu_resources = output_surface_->context_provider() ||
518                             skia_output_surface_ ||
519                             output_surface_->capabilities().skips_draw;
520 
521   resource_provider_ = std::make_unique<DisplayResourceProvider>(
522       uses_gpu_resources ? DisplayResourceProvider::kGpu
523                          : DisplayResourceProvider::kSoftware,
524       output_surface_->context_provider(), bitmap_manager_,
525       enable_shared_images);
526   if (skia_output_surface_) {
527     renderer_ = std::make_unique<SkiaRenderer>(
528         &settings_, debug_settings_, output_surface_.get(),
529         resource_provider_.get(), overlay_processor_.get(),
530         skia_output_surface_);
531   } else if (output_surface_->context_provider()) {
532     renderer_ = std::make_unique<GLRenderer>(
533         &settings_, debug_settings_, output_surface_.get(),
534         resource_provider_.get(), overlay_processor_.get(),
535         current_task_runner_);
536   } else {
537     DCHECK(!overlay_processor_->IsOverlaySupported());
538     auto renderer = std::make_unique<SoftwareRenderer>(
539         &settings_, debug_settings_, output_surface_.get(),
540         resource_provider_.get(), overlay_processor_.get());
541     software_renderer_ = renderer.get();
542     renderer_ = std::move(renderer);
543   }
544 
545   renderer_->Initialize();
546   renderer_->SetVisible(visible_);
547 
548   // Outputting a partial list of quads might not work in cases where contents
549   // outside the damage rect might be needed by the renderer.
550   bool might_invalidate_outside_damage =
551       !output_surface_->capabilities().only_invalidates_damage_rect ||
552       overlay_processor_->IsOverlaySupported();
553   bool output_partial_list =
554       renderer_->use_partial_swap() &&
555       (!might_invalidate_outside_damage ||
556        output_surface_->capabilities().supports_target_damage);
557 
558   aggregator_ = std::make_unique<SurfaceAggregator>(
559       surface_manager_, resource_provider_.get(), output_partial_list,
560       overlay_processor_->NeedsSurfaceDamageRectList());
561 
562   aggregator_->set_output_is_secure(output_is_secure_);
563   aggregator_->SetDisplayColorSpaces(display_color_spaces_);
564   aggregator_->SetMaxRenderTargetSize(
565       output_surface_->capabilities().max_render_target_size);
566 }
567 
IsRootFrameMissing() const568 bool Display::IsRootFrameMissing() const {
569   return damage_tracker_->root_frame_missing();
570 }
571 
HasPendingSurfaces(const BeginFrameArgs & args) const572 bool Display::HasPendingSurfaces(const BeginFrameArgs& args) const {
573   return damage_tracker_->HasPendingSurfaces(args);
574 }
575 
OnContextLost()576 void Display::OnContextLost() {
577   if (scheduler_)
578     scheduler_->OutputSurfaceLost();
579   // WARNING: The client may delete the Display in this method call. Do not
580   // make any additional references to members after this call.
581   client_->DisplayOutputSurfaceLost();
582 }
583 
DrawAndSwap(base::TimeTicks expected_display_time)584 bool Display::DrawAndSwap(base::TimeTicks expected_display_time) {
585   TRACE_EVENT0("viz", "Display::DrawAndSwap");
586   if (debug_settings_->show_aggregated_damage !=
587       aggregator_->HasFrameAnnotator()) {
588     if (debug_settings_->show_aggregated_damage) {
589       aggregator_->SetFrameAnnotator(std::make_unique<DamageFrameAnnotator>());
590     } else {
591       aggregator_->DestroyFrameAnnotator();
592     }
593   }
594   gpu::ScopedAllowScheduleGpuTask allow_schedule_gpu_task;
595 
596   if (!current_surface_id_.is_valid()) {
597     TRACE_EVENT_INSTANT0("viz", "No root surface.", TRACE_EVENT_SCOPE_THREAD);
598     return false;
599   }
600 
601   if (!output_surface_) {
602     TRACE_EVENT_INSTANT0("viz", "No output surface", TRACE_EVENT_SCOPE_THREAD);
603     return false;
604   }
605 
606   gfx::OverlayTransform current_display_transform = gfx::OVERLAY_TRANSFORM_NONE;
607   Surface* surface = surface_manager_->GetSurfaceForId(current_surface_id_);
608   if (surface->HasActiveFrame()) {
609     current_display_transform =
610         surface->GetActiveFrame().metadata.display_transform_hint;
611     if (current_display_transform != output_surface_->GetDisplayTransform()) {
612       output_surface_->SetDisplayTransformHint(current_display_transform);
613 
614       // Gets the transform from |output_surface_| back so that if it ignores
615       // the hint, the rest of the code ignores the hint too.
616       current_display_transform = output_surface_->GetDisplayTransform();
617     }
618   }
619 
620   // During aggregation, SurfaceAggregator marks all resources used for a draw
621   // in the resource provider.  This has the side effect of deleting unused
622   // resources and their textures, generating sync tokens, and returning the
623   // resources to the client.  This involves GL work which is issued before
624   // drawing commands, and gets prioritized by GPU scheduler because sync token
625   // dependencies aren't issued until the draw.
626   //
627   // Batch and defer returning resources in resource provider.  This defers the
628   // GL commands for deleting resources to after the draw, and prevents context
629   // switching because the scheduler knows sync token dependencies at that time.
630   DisplayResourceProvider::ScopedBatchReturnResources returner(
631       resource_provider_.get(), /*allow_access_to_gpu_thread=*/true);
632 
633   base::ElapsedTimer aggregate_timer;
634   aggregate_timer.Begin();
635   AggregatedFrame frame;
636   {
637     FrameRateDecider::ScopedAggregate scoped_aggregate(
638         frame_rate_decider_.get());
639     gfx::Rect target_damage_bounding_rect;
640     if (output_surface_->capabilities().supports_target_damage)
641       target_damage_bounding_rect = renderer_->GetTargetDamageBoundingRect();
642 
643     // Ensure that the surfaces that were damaged by any delegated ink trail are
644     // aggregated again so that the trail exists for a single frame.
645     target_damage_bounding_rect.Union(
646         renderer_->GetDelegatedInkTrailDamageRect());
647 
648     frame = aggregator_->Aggregate(
649         current_surface_id_, expected_display_time, current_display_transform,
650         target_damage_bounding_rect, ++swapped_trace_id_);
651   }
652 
653   // Records whether the aggregated frame contains video or not.
654   // TODO(vikassoni) : Extend this capability to record whether a video frame is
655   // inline or fullscreen.
656   UMA_HISTOGRAM_ENUMERATION("Compositing.SurfaceAggregator.FrameContainsVideo",
657                             frame.may_contain_video
658                                 ? TypeOfVideoInFrame::kVideo
659                                 : TypeOfVideoInFrame::kNoVideo);
660 
661   if (frame.delegated_ink_metadata) {
662     TRACE_EVENT_INSTANT1(
663         "viz", "Delegated Ink Metadata was aggregated for DrawAndSwap.",
664         TRACE_EVENT_SCOPE_THREAD, "ink metadata",
665         frame.delegated_ink_metadata->ToString());
666     renderer_->SetDelegatedInkMetadata(std::move(frame.delegated_ink_metadata));
667   }
668 
669   UMA_HISTOGRAM_ENUMERATION("Compositing.ColorGamut",
670                             frame.content_color_usage);
671 
672 #if defined(OS_ANDROID)
673   bool wide_color_enabled =
674       display_color_spaces_.GetOutputColorSpace(
675           frame.content_color_usage, true) != gfx::ColorSpace::CreateSRGB();
676   if (wide_color_enabled != last_wide_color_enabled_) {
677     client_->SetWideColorEnabled(wide_color_enabled);
678     last_wide_color_enabled_ = wide_color_enabled;
679   }
680 #endif
681 
682   UMA_HISTOGRAM_COUNTS_1M("Compositing.SurfaceAggregator.AggregateUs",
683                           aggregate_timer.Elapsed().InMicroseconds());
684 
685   if (frame.render_pass_list.empty()) {
686     TRACE_EVENT_INSTANT0("viz", "Empty aggregated frame.",
687                          TRACE_EVENT_SCOPE_THREAD);
688     return false;
689   }
690 
691   TRACE_EVENT_ASYNC_BEGIN0("viz,benchmark", "Graphics.Pipeline.DrawAndSwap",
692                            swapped_trace_id_);
693 
694   // Run callbacks early to allow pipelining and collect presented callbacks.
695   damage_tracker_->RunDrawCallbacks();
696 
697   if (output_surface_->capabilities().skips_draw) {
698     TRACE_EVENT_INSTANT0("viz", "Skip draw", TRACE_EVENT_SCOPE_THREAD);
699     // Aggregation needs to happen before generating hit test for the unified
700     // desktop display. After this point skip drawing anything for real.
701     client_->DisplayWillDrawAndSwap(false, &frame.render_pass_list);
702     return true;
703   }
704 
705   frame.latency_info.insert(frame.latency_info.end(),
706                             stored_latency_info_.begin(),
707                             stored_latency_info_.end());
708   stored_latency_info_.clear();
709   bool have_copy_requests = frame.has_copy_requests;
710 
711   gfx::Size surface_size;
712   bool have_damage = false;
713   auto& last_render_pass = *frame.render_pass_list.back();
714 
715   // The CompositorFrame provided by the SurfaceAggregator includes the display
716   // transform while |current_surface_size_| is the pre-transform size received
717   // from the client.
718   const gfx::Transform display_transform = gfx::OverlayTransformToTransform(
719       current_display_transform, gfx::SizeF(current_surface_size_));
720   const gfx::Size current_surface_size =
721       cc::MathUtil::MapEnclosedRectWith2dAxisAlignedTransform(
722           display_transform, gfx::Rect(current_surface_size_))
723           .size();
724   if (settings_.auto_resize_output_surface &&
725       last_render_pass.output_rect.size() != current_surface_size &&
726       last_render_pass.damage_rect == last_render_pass.output_rect &&
727       !current_surface_size.IsEmpty()) {
728     // Resize the output rect to the current surface size so that we won't
729     // skip the draw and so that the GL swap won't stretch the output.
730     last_render_pass.output_rect.set_size(current_surface_size);
731     last_render_pass.damage_rect = last_render_pass.output_rect;
732     frame.surface_damage_rect_list_.push_back(last_render_pass.damage_rect);
733   }
734   surface_size = last_render_pass.output_rect.size();
735   have_damage = !last_render_pass.damage_rect.size().IsEmpty();
736 
737   bool size_matches = surface_size == current_surface_size;
738   if (!size_matches)
739     TRACE_EVENT_INSTANT0("viz", "Size mismatch.", TRACE_EVENT_SCOPE_THREAD);
740 
741   bool should_draw = have_copy_requests || (have_damage && size_matches);
742   client_->DisplayWillDrawAndSwap(should_draw, &frame.render_pass_list);
743 
744   base::Optional<base::ElapsedTimer> draw_timer;
745   if (should_draw) {
746     TRACE_EVENT_ASYNC_STEP_INTO0("viz,benchmark",
747                                  "Graphics.Pipeline.DrawAndSwap",
748                                  swapped_trace_id_, "Draw");
749     base::ElapsedTimer draw_occlusion_timer;
750     RemoveOverdrawQuads(&frame);
751     UMA_HISTOGRAM_COUNTS_1000(
752         "Compositing.Display.Draw.Occlusion.Calculation.Time",
753         draw_occlusion_timer.Elapsed().InMicroseconds());
754 
755     // TODO(vmpstr): This used to set to
756     // frame.metadata.is_resourceless_software_draw_with_scroll_or_animation
757     // from CompositedFrame. However, after changing this to AggregatedFrame, it
758     // seems that the value is never changed from the default false (i.e.
759     // SurfaceAggregator has no reference to
760     // is_resourceless_software_draw_with_scroll_or_animation). The TODO here is
761     // to clean up the code below or to figure out if this value is important.
762     bool disable_image_filtering = false;
763     if (software_renderer_) {
764       software_renderer_->SetDisablePictureQuadImageFiltering(
765           disable_image_filtering);
766     } else {
767       // This should only be set for software draws in synchronous compositor.
768       DCHECK(!disable_image_filtering);
769     }
770 
771     draw_timer.emplace();
772     renderer_->DecideRenderPassAllocationsForFrame(frame.render_pass_list);
773     renderer_->DrawFrame(&frame.render_pass_list, device_scale_factor_,
774                          current_surface_size, display_color_spaces_,
775                          &frame.surface_damage_rect_list_);
776     switch (output_surface_->type()) {
777       case OutputSurface::Type::kSoftware:
778         UMA_HISTOGRAM_COUNTS_1M(
779             "Compositing.DirectRenderer.Software.DrawFrameUs",
780             draw_timer->Elapsed().InMicroseconds());
781         break;
782       case OutputSurface::Type::kOpenGL:
783         UMA_HISTOGRAM_COUNTS_1M("Compositing.DirectRenderer.GL.DrawFrameUs",
784                                 draw_timer->Elapsed().InMicroseconds());
785         break;
786       case OutputSurface::Type::kVulkan:
787         UMA_HISTOGRAM_COUNTS_1M("Compositing.DirectRenderer.VK.DrawFrameUs",
788                                 draw_timer->Elapsed().InMicroseconds());
789         break;
790     }
791   } else {
792     TRACE_EVENT_INSTANT0("viz", "Draw skipped.", TRACE_EVENT_SCOPE_THREAD);
793   }
794 
795   bool should_swap = !disable_swap_until_resize_ && should_draw && size_matches;
796   if (should_swap) {
797     PresentationGroupTiming presentation_group_timing;
798     presentation_group_timing.OnDraw(draw_timer->Begin());
799 
800     for (const auto& id_entry : aggregator_->previous_contained_surfaces()) {
801       Surface* surface = surface_manager_->GetSurfaceForId(id_entry.first);
802       if (surface) {
803         std::unique_ptr<Surface::PresentationHelper> helper =
804             surface->TakePresentationHelperForPresentNotification();
805         if (helper) {
806           presentation_group_timing.AddPresentationHelper(std::move(helper));
807         }
808       }
809     }
810     pending_presentation_group_timings_.emplace_back(
811         std::move(presentation_group_timing));
812 
813     TRACE_EVENT_ASYNC_STEP_INTO0("viz,benchmark",
814                                  "Graphics.Pipeline.DrawAndSwap",
815                                  swapped_trace_id_, "WaitForSwap");
816     swapped_since_resize_ = true;
817 
818     ui::LatencyInfo::TraceIntermediateFlowEvents(
819         frame.latency_info,
820         perfetto::protos::pbzero::ChromeLatencyInfo::STEP_DRAW_AND_SWAP);
821 
822     cc::benchmark_instrumentation::IssueDisplayRenderingStatsEvent();
823     DirectRenderer::SwapFrameData swap_frame_data;
824     swap_frame_data.latency_info = std::move(frame.latency_info);
825     if (frame.top_controls_visible_height.has_value()) {
826       swap_frame_data.top_controls_visible_height_changed =
827           last_top_controls_visible_height_ !=
828           *frame.top_controls_visible_height;
829       last_top_controls_visible_height_ = *frame.top_controls_visible_height;
830     }
831 
832     // We must notify scheduler and increase |pending_swaps_| before calling
833     // SwapBuffers() as it can call DidReceiveSwapBuffersAck synchronously.
834     if (scheduler_)
835       scheduler_->DidSwapBuffers();
836     pending_swaps_++;
837 
838     renderer_->SwapBuffers(std::move(swap_frame_data));
839   } else {
840     TRACE_EVENT_INSTANT0("viz", "Swap skipped.", TRACE_EVENT_SCOPE_THREAD);
841 
842     if (have_damage && !size_matches)
843       aggregator_->SetFullDamageForSurface(current_surface_id_);
844 
845     if (have_damage) {
846       // Do not store more than the allowed size.
847       if (ui::LatencyInfo::Verify(frame.latency_info, "Display::DrawAndSwap")) {
848         stored_latency_info_.swap(frame.latency_info);
849       }
850     } else {
851       // There was no damage. Terminate the latency info objects.
852       while (!frame.latency_info.empty()) {
853         auto& latency = frame.latency_info.back();
854         latency.Terminate();
855         frame.latency_info.pop_back();
856       }
857     }
858 
859     renderer_->SwapBuffersSkipped();
860 
861     TRACE_EVENT_ASYNC_END1("viz,benchmark", "Graphics.Pipeline.DrawAndSwap",
862                            swapped_trace_id_, "status", "canceled");
863     --swapped_trace_id_;
864     if (scheduler_) {
865       scheduler_->DidSwapBuffers();
866       scheduler_->DidReceiveSwapBuffersAck();
867     }
868   }
869 
870   client_->DisplayDidDrawAndSwap();
871 
872   // Garbage collection can lead to sync IPCs to the GPU service to verify sync
873   // tokens. We defer garbage collection until the end of DrawAndSwap to avoid
874   // stalling the critical path for compositing.
875   surface_manager_->GarbageCollectSurfaces();
876 
877   return true;
878 }
879 
DidReceiveSwapBuffersAck(const gfx::SwapTimings & timings)880 void Display::DidReceiveSwapBuffersAck(const gfx::SwapTimings& timings) {
881   // Adding to |pending_presentation_group_timings_| must
882   // have been done in DrawAndSwap(), and should not be popped until
883   // DidReceiveSwapBuffersAck.
884   DCHECK(!pending_presentation_group_timings_.empty());
885 
886   ++last_swap_ack_trace_id_;
887   TRACE_EVENT_ASYNC_STEP_INTO_WITH_TIMESTAMP0(
888       "viz,benchmark", "Graphics.Pipeline.DrawAndSwap", last_swap_ack_trace_id_,
889       "Swap", timings.swap_start);
890   TRACE_EVENT_ASYNC_STEP_INTO_WITH_TIMESTAMP0(
891       "viz,benchmark", "Graphics.Pipeline.DrawAndSwap", last_swap_ack_trace_id_,
892       "WaitForPresentation", timings.swap_end);
893 
894   DCHECK_GT(pending_swaps_, 0);
895   pending_swaps_--;
896   if (scheduler_) {
897     scheduler_->DidReceiveSwapBuffersAck();
898   }
899 
900   if (no_pending_swaps_callback_ && pending_swaps_ == 0)
901     std::move(no_pending_swaps_callback_).Run();
902 
903   if (overlay_processor_)
904     overlay_processor_->OverlayPresentationComplete();
905   if (renderer_)
906     renderer_->SwapBuffersComplete();
907 
908   // It's possible to receive multiple calls to DidReceiveSwapBuffersAck()
909   // before DidReceivePresentationFeedback(). Ensure that we're not setting
910   // |swap_timings_| for the same PresentationGroupTiming multiple times.
911   base::TimeTicks draw_start_timestamp;
912   for (auto& group_timing : pending_presentation_group_timings_) {
913     if (!group_timing.HasSwapped()) {
914       group_timing.OnSwap(timings);
915       draw_start_timestamp = group_timing.draw_start_timestamp();
916       break;
917     }
918   }
919 
920   // We should have at least one group that hasn't received a SwapBuffersAck
921   DCHECK(!draw_start_timestamp.is_null());
922 
923   // Check that the swap timings correspond with the timestamp from when
924   // the swap was triggered. Note that not all output surfaces provide timing
925   // information, hence the check for a valid swap_start.
926   if (!timings.swap_start.is_null()) {
927     DCHECK_LE(draw_start_timestamp, timings.swap_start);
928     base::TimeDelta delta = timings.swap_start - draw_start_timestamp;
929     UMA_HISTOGRAM_CUSTOM_MICROSECONDS_TIMES(
930         "Compositing.Display.DrawToSwapUs", delta, kDrawToSwapMin,
931         kDrawToSwapMax, kDrawToSwapUsBuckets);
932   }
933 
934   if (!timings.viz_scheduled_draw.is_null()) {
935     DCHECK(!timings.gpu_started_draw.is_null());
936     DCHECK_LE(timings.viz_scheduled_draw, timings.gpu_started_draw);
937     base::TimeDelta delta =
938         timings.gpu_started_draw - timings.viz_scheduled_draw;
939     UMA_HISTOGRAM_CUSTOM_MICROSECONDS_TIMES(
940         "Compositing.Display.VizScheduledDrawToGpuStartedDrawUs", delta,
941         kDrawToSwapMin, kDrawToSwapMax, kDrawToSwapUsBuckets);
942   }
943 }
944 
DidReceiveTextureInUseResponses(const gpu::TextureInUseResponses & responses)945 void Display::DidReceiveTextureInUseResponses(
946     const gpu::TextureInUseResponses& responses) {
947   if (renderer_)
948     renderer_->DidReceiveTextureInUseResponses(responses);
949 }
950 
DidReceiveCALayerParams(const gfx::CALayerParams & ca_layer_params)951 void Display::DidReceiveCALayerParams(
952     const gfx::CALayerParams& ca_layer_params) {
953   if (client_)
954     client_->DisplayDidReceiveCALayerParams(ca_layer_params);
955 }
956 
DidSwapWithSize(const gfx::Size & pixel_size)957 void Display::DidSwapWithSize(const gfx::Size& pixel_size) {
958   if (client_)
959     client_->DisplayDidCompleteSwapWithSize(pixel_size);
960 }
961 
DidReceivePresentationFeedback(const gfx::PresentationFeedback & feedback)962 void Display::DidReceivePresentationFeedback(
963     const gfx::PresentationFeedback& feedback) {
964   if (pending_presentation_group_timings_.empty()) {
965     DLOG(ERROR) << "Received unexpected PresentationFeedback";
966     return;
967   }
968   ++last_presented_trace_id_;
969   TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP0(
970       "viz,benchmark", "Graphics.Pipeline.DrawAndSwap",
971       last_presented_trace_id_, feedback.timestamp);
972   auto& presentation_group_timing = pending_presentation_group_timings_.front();
973   auto copy_feedback = SanitizePresentationFeedback(
974       feedback, presentation_group_timing.draw_start_timestamp());
975   TRACE_EVENT_INSTANT_WITH_TIMESTAMP0(
976       "benchmark,viz", "Display::FrameDisplayed", TRACE_EVENT_SCOPE_THREAD,
977       copy_feedback.timestamp);
978 
979   if (renderer_->CompositeTimeTracingEnabled()) {
980     if (copy_feedback.ready_timestamp.is_null()) {
981       LOG(WARNING) << "Ready Timestamp unavailable";
982     } else {
983       renderer_->AddCompositeTimeTraces(copy_feedback.ready_timestamp);
984     }
985   }
986 
987   presentation_group_timing.OnPresent(copy_feedback);
988   pending_presentation_group_timings_.pop_front();
989 }
990 
DidReceiveReleasedOverlays(const std::vector<gpu::Mailbox> & released_overlays)991 void Display::DidReceiveReleasedOverlays(
992     const std::vector<gpu::Mailbox>& released_overlays) {
993   if (renderer_)
994     renderer_->DidReceiveReleasedOverlays(released_overlays);
995 }
996 
SetNeedsRedrawRect(const gfx::Rect & damage_rect)997 void Display::SetNeedsRedrawRect(const gfx::Rect& damage_rect) {
998   aggregator_->SetFullDamageForSurface(current_surface_id_);
999   damage_tracker_->SetRootSurfaceDamaged();
1000 }
1001 
DidFinishFrame(const BeginFrameAck & ack)1002 void Display::DidFinishFrame(const BeginFrameAck& ack) {
1003   for (auto& observer : observers_)
1004     observer.OnDisplayDidFinishFrame(ack);
1005 
1006   // Prevent de-jelly skew or a delegated ink trail from staying on the screen
1007   // for more than one frame by forcing a new frame to be produced.
1008   if (aggregator_->last_frame_had_jelly() ||
1009       !renderer_->GetDelegatedInkTrailDamageRect().IsEmpty()) {
1010     scheduler_->SetNeedsOneBeginFrame(true);
1011   }
1012 }
1013 
CurrentSurfaceId()1014 const SurfaceId& Display::CurrentSurfaceId() {
1015   return current_surface_id_;
1016 }
1017 
GetSurfaceAtAggregation(const FrameSinkId & frame_sink_id) const1018 LocalSurfaceId Display::GetSurfaceAtAggregation(
1019     const FrameSinkId& frame_sink_id) const {
1020   if (!aggregator_)
1021     return LocalSurfaceId();
1022   auto it = aggregator_->previous_contained_frame_sinks().find(frame_sink_id);
1023   if (it == aggregator_->previous_contained_frame_sinks().end())
1024     return LocalSurfaceId();
1025   return it->second;
1026 }
1027 
SoftwareDeviceUpdatedCALayerParams(const gfx::CALayerParams & ca_layer_params)1028 void Display::SoftwareDeviceUpdatedCALayerParams(
1029     const gfx::CALayerParams& ca_layer_params) {
1030   if (client_)
1031     client_->DisplayDidReceiveCALayerParams(ca_layer_params);
1032 }
1033 
ForceImmediateDrawAndSwapIfPossible()1034 void Display::ForceImmediateDrawAndSwapIfPossible() {
1035   if (scheduler_)
1036     scheduler_->ForceImmediateSwapIfPossible();
1037 }
1038 
SetNeedsOneBeginFrame()1039 void Display::SetNeedsOneBeginFrame() {
1040   if (scheduler_)
1041     scheduler_->SetNeedsOneBeginFrame(false);
1042 }
1043 
RemoveOverdrawQuads(AggregatedFrame * frame)1044 void Display::RemoveOverdrawQuads(AggregatedFrame* frame) {
1045   if (frame->render_pass_list.empty())
1046     return;
1047 
1048   base::flat_map<AggregatedRenderPassId, gfx::Rect> backdrop_filter_rects;
1049   for (const auto& pass : frame->render_pass_list) {
1050     if (!pass->backdrop_filters.IsEmpty() &&
1051         pass->backdrop_filters.HasFilterThatMovesPixels()) {
1052       backdrop_filter_rects[pass->id] = cc::MathUtil::MapEnclosingClippedRect(
1053           pass->transform_to_root_target, pass->output_rect);
1054     }
1055   }
1056 
1057   for (const auto& pass : frame->render_pass_list) {
1058     const SharedQuadState* last_sqs = nullptr;
1059     cc::Region occlusion_in_target_space;
1060     cc::Region backdrop_filters_in_target_space;
1061     bool current_sqs_intersects_occlusion = false;
1062 
1063     // TODO(yiyix): Add filter effects to draw occlusion calculation
1064     if (!pass->filters.IsEmpty() || !pass->backdrop_filters.IsEmpty())
1065       continue;
1066 
1067     // When there is only one quad in the render pass, occlusion is not
1068     // possible.
1069     if (pass->quad_list.size() == 1)
1070       continue;
1071 
1072     auto quad_list_end = pass->quad_list.end();
1073     cc::Region occlusion_in_quad_content_space;
1074     gfx::Rect render_pass_quads_in_content_space;
1075     for (auto quad = pass->quad_list.begin(); quad != quad_list_end;) {
1076       // Sanity check: we should not have a Compositor
1077       // CompositorRenderPassDrawQuad here.
1078       DCHECK_NE(quad->material, DrawQuad::Material::kCompositorRenderPass);
1079       // Skip quad if it is a AggregatedRenderPassDrawQuad because it is a
1080       // special type of DrawQuad where the visible_rect of shared quad state is
1081       // not entirely covered by draw quads in it.
1082       if (quad->material == DrawQuad::Material::kAggregatedRenderPass) {
1083         // A RenderPass with backdrop filters may apply to a quad underlying
1084         // RenderPassQuad. These regions should be tracked so that correctly
1085         // handle splitting and occlusion of the underlying quad.
1086         auto* rpdq = AggregatedRenderPassDrawQuad::MaterialCast(*quad);
1087         auto it = backdrop_filter_rects.find(rpdq->render_pass_id);
1088         if (it != backdrop_filter_rects.end()) {
1089           backdrop_filters_in_target_space.Union(it->second);
1090         }
1091         ++quad;
1092         continue;
1093       }
1094       // Also skip quad if the DrawQuad is inside a 3d object.
1095       if (quad->shared_quad_state->sorting_context_id != 0) {
1096         ++quad;
1097         continue;
1098       }
1099 
1100       if (!last_sqs)
1101         last_sqs = quad->shared_quad_state;
1102 
1103       gfx::Transform transform =
1104           quad->shared_quad_state->quad_to_target_transform;
1105 
1106       // TODO(yiyix): Find a rect interior to each transformed quad.
1107       if (last_sqs != quad->shared_quad_state) {
1108         if (last_sqs->opacity == 1 && last_sqs->are_contents_opaque &&
1109             (last_sqs->blend_mode == SkBlendMode::kSrcOver ||
1110              last_sqs->blend_mode == SkBlendMode::kSrc) &&
1111             last_sqs->quad_to_target_transform.Preserves2dAxisAlignment()) {
1112           gfx::Rect sqs_rect_in_target =
1113               cc::MathUtil::MapEnclosedRectWith2dAxisAlignedTransform(
1114                   last_sqs->quad_to_target_transform,
1115                   last_sqs->visible_quad_layer_rect);
1116 
1117           // If a rounded corner is being applied then the visible rect for the
1118           // sqs is actually even smaller. Reduce the rect size to get a
1119           // rounded corner adjusted occluding region.
1120           if (last_sqs->mask_filter_info.HasRoundedCorners()) {
1121             sqs_rect_in_target.Intersect(
1122                 gfx::ToEnclosedRect(GetOccludingRectForRRectF(
1123                     last_sqs->mask_filter_info.rounded_corner_bounds())));
1124           }
1125 
1126           if (last_sqs->is_clipped)
1127             sqs_rect_in_target.Intersect(last_sqs->clip_rect);
1128 
1129           // If region complexity is above our threshold, remove the smallest
1130           // rects from occlusion region.
1131           occlusion_in_target_space.Union(sqs_rect_in_target);
1132           while (occlusion_in_target_space.GetRegionComplexity() >
1133                  settings_.kMaximumOccluderComplexity) {
1134             gfx::Rect smallest_rect = *occlusion_in_target_space.begin();
1135             for (const auto& occluding_rect : occlusion_in_target_space) {
1136               if (occluding_rect.size().GetCheckedArea().ValueOrDefault(
1137                       INT_MAX) <
1138                   smallest_rect.size().GetCheckedArea().ValueOrDefault(
1139                       INT_MAX)) {
1140                 smallest_rect = occluding_rect;
1141               }
1142             }
1143             occlusion_in_target_space.Subtract(smallest_rect);
1144           }
1145         }
1146         // If the visible_rect of the current shared quad state does not
1147         // intersect with the occlusion rect, we can skip draw occlusion checks
1148         // for quads in the current SharedQuadState.
1149         last_sqs = quad->shared_quad_state;
1150         occlusion_in_quad_content_space.Clear();
1151         render_pass_quads_in_content_space = gfx::Rect();
1152         const auto current_sqs_in_target_space =
1153             cc::MathUtil::MapEnclosingClippedRect(
1154                 transform, last_sqs->visible_quad_layer_rect);
1155         current_sqs_intersects_occlusion =
1156             occlusion_in_target_space.Intersects(current_sqs_in_target_space);
1157 
1158         // Compute the occlusion region in the quad content space for scale and
1159         // translation transforms. Note that 0 scale transform will fail the
1160         // positive scale check.
1161         if (current_sqs_intersects_occlusion &&
1162             transform.IsPositiveScaleOrTranslation()) {
1163           gfx::Transform reverse_transform;
1164           bool is_invertible = transform.GetInverse(&reverse_transform);
1165           // Scale transform can be inverted by multiplying 1/scale (given
1166           // scale > 0) and translation transform can be inverted by applying
1167           // the reversed directional translation. Therefore, |transform| is
1168           // always invertible.
1169           DCHECK(is_invertible);
1170           DCHECK_LE(occlusion_in_target_space.GetRegionComplexity(),
1171                     settings_.kMaximumOccluderComplexity);
1172 
1173           // Since transform can only be a scale or a translation matrix, it is
1174           // safe to use function MapEnclosedRectWith2dAxisAlignedTransform to
1175           // define occluded region in the quad content space with inverted
1176           // transform.
1177           for (const gfx::Rect& rect_in_target_space :
1178                occlusion_in_target_space) {
1179             if (current_sqs_in_target_space.Intersects(rect_in_target_space)) {
1180               auto rect_in_content =
1181                   cc::MathUtil::MapEnclosedRectWith2dAxisAlignedTransform(
1182                       reverse_transform, rect_in_target_space);
1183               occlusion_in_quad_content_space.Union(
1184                   SafeConvertRectForRegion(rect_in_content));
1185             }
1186           }
1187 
1188           // A render pass quad may apply some filter or transform to an
1189           // underlying quad. Do not split quads when they intersect with a
1190           // render pass quad.
1191           if (current_sqs_in_target_space.Intersects(
1192                   backdrop_filters_in_target_space.bounds())) {
1193             for (const auto& rect_in_target_space :
1194                  backdrop_filters_in_target_space) {
1195               auto rect_in_content =
1196                   cc::MathUtil::MapEnclosedRectWith2dAxisAlignedTransform(
1197                       reverse_transform, rect_in_target_space);
1198               render_pass_quads_in_content_space.Union(rect_in_content);
1199             }
1200           }
1201         }
1202       }
1203 
1204       if (!current_sqs_intersects_occlusion) {
1205         ++quad;
1206         continue;
1207       }
1208 
1209       if (occlusion_in_quad_content_space.Contains(quad->visible_rect)) {
1210         // Case 1: for simple transforms (scale or translation), define the
1211         // occlusion region in the quad content space. If |quad| is not
1212         // shown on the screen, then set its rect and visible_rect to be empty.
1213         quad->visible_rect.set_size(gfx::Size());
1214       } else if (occlusion_in_quad_content_space.Intersects(
1215                      quad->visible_rect)) {
1216         // Case 2: for simple transforms, if the quad is partially shown on
1217         // screen and the region formed by (occlusion region - visible_rect) is
1218         // a rect, then update visible_rect to the resulting rect.
1219         cc::Region visible_region = quad->visible_rect;
1220         visible_region.Subtract(occlusion_in_quad_content_space);
1221         quad->visible_rect = visible_region.bounds();
1222 
1223         // Split quad into multiple draw quads when area can be reduce by
1224         // more than X fragments.
1225         const bool should_split_quads =
1226             !overlay_processor_->DisableSplittingQuads() &&
1227             !visible_region.Intersects(render_pass_quads_in_content_space) &&
1228             ReduceComplexity(visible_region, settings_.quad_split_limit,
1229                              &cached_visible_region_) &&
1230             CanSplitQuad(quad->material, cached_visible_region_,
1231                          visible_region.bounds().size(),
1232                          settings_.minimum_fragments_reduced,
1233                          device_scale_factor_);
1234         if (should_split_quads) {
1235           auto new_quad = pass->quad_list.InsertCopyBeforeDrawQuad(
1236               quad, cached_visible_region_.size() - 1);
1237           for (const auto& visible_rect : cached_visible_region_) {
1238             new_quad->visible_rect = visible_rect;
1239             ++new_quad;
1240           }
1241           quad = new_quad;
1242           continue;
1243         }
1244       } else if (occlusion_in_quad_content_space.IsEmpty() &&
1245                  occlusion_in_target_space.Contains(
1246                      cc::MathUtil::MapEnclosingClippedRect(
1247                          transform, quad->visible_rect))) {
1248         // Case 3: for non simple transforms, define the occlusion region in
1249         // target space. If |quad| is not shown on the screen, then set its
1250         // rect and visible_rect to be empty.
1251         quad->visible_rect.set_size(gfx::Size());
1252       }
1253       ++quad;
1254     }
1255   }
1256 }
1257 
SetPreferredFrameInterval(base::TimeDelta interval)1258 void Display::SetPreferredFrameInterval(base::TimeDelta interval) {
1259   if (frame_rate_decider_->supports_set_frame_rate()) {
1260     float interval_s = interval.InSecondsF();
1261     float frame_rate = interval_s == 0 ? 0 : (1 / interval_s);
1262     output_surface_->SetFrameRate(frame_rate);
1263 #if defined(OS_ANDROID)
1264     // On Android we want to return early because the |client_| callback hits
1265     // a platform API in the browser process.
1266     return;
1267 #endif  // OS_ANDROID
1268   }
1269 
1270   client_->SetPreferredFrameInterval(interval);
1271 }
1272 
GetPreferredFrameIntervalForFrameSinkId(const FrameSinkId & id,mojom::CompositorFrameSinkType * type)1273 base::TimeDelta Display::GetPreferredFrameIntervalForFrameSinkId(
1274     const FrameSinkId& id,
1275     mojom::CompositorFrameSinkType* type) {
1276   return client_->GetPreferredFrameIntervalForFrameSinkId(id, type);
1277 }
1278 
SetSupportedFrameIntervals(std::vector<base::TimeDelta> intervals)1279 void Display::SetSupportedFrameIntervals(
1280     std::vector<base::TimeDelta> intervals) {
1281   frame_rate_decider_->SetSupportedFrameIntervals(std::move(intervals));
1282 }
1283 
GetCacheBackBufferCb()1284 base::ScopedClosureRunner Display::GetCacheBackBufferCb() {
1285   return output_surface_->GetCacheBackBufferCb();
1286 }
1287 
DisableGPUAccessByDefault()1288 void Display::DisableGPUAccessByDefault() {
1289   DCHECK(resource_provider_);
1290   resource_provider_->SetAllowAccessToGPUThread(false);
1291 }
1292 
GetDelegatedInkPointRenderer()1293 DelegatedInkPointRendererBase* Display::GetDelegatedInkPointRenderer() {
1294   return renderer_->GetDelegatedInkPointRenderer();
1295 }
1296 
1297 }  // namespace viz
1298