1 /* This Source Code Form is subject to the terms of the Mozilla Public
2  * License, v. 2.0. If a copy of the MPL was not distributed with this
3  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
4 
5 //! # Prepare pass
6 //!
7 //! TODO: document this!
8 
9 use std::cmp;
10 use api::{PremultipliedColorF, PropertyBinding};
11 use api::{BoxShadowClipMode, BorderStyle, ClipMode};
12 use api::units::*;
13 use euclid::Scale;
14 use smallvec::SmallVec;
15 use crate::image_tiling::{self, Repetition};
16 use crate::border::{get_max_scale_for_border, build_border_instances};
17 use crate::clip::{ClipStore};
18 use crate::spatial_tree::{SpatialNodeIndex, SpatialTree};
19 use crate::clip::{ClipDataStore, ClipNodeFlags, ClipChainInstance, ClipItemKind};
20 use crate::frame_builder::{FrameBuildingContext, FrameBuildingState, PictureContext, PictureState};
21 use crate::gpu_cache::{GpuCacheHandle, GpuDataRequest};
22 use crate::gpu_types::{BrushFlags};
23 use crate::internal_types::{FastHashMap, PlaneSplitAnchor};
24 use crate::picture::{PicturePrimitive, SliceId, TileCacheLogger, ClusterFlags, SurfaceRenderTasks};
25 use crate::picture::{PrimitiveList, PrimitiveCluster, SurfaceIndex, TileCacheInstance, SubpixelMode};
26 use crate::prim_store::line_dec::MAX_LINE_DECORATION_RESOLUTION;
27 use crate::prim_store::*;
28 use crate::render_backend::DataStores;
29 use crate::render_task_graph::RenderTaskId;
30 use crate::render_task_cache::RenderTaskCacheKeyKind;
31 use crate::render_task_cache::{RenderTaskCacheKey, to_cache_size, RenderTaskParent};
32 use crate::render_task::{RenderTaskKind, RenderTask};
33 use crate::segment::SegmentBuilder;
34 use crate::space::SpaceMapper;
35 use crate::util::{clamp_to_scale_factor, pack_as_float, raster_rect_to_device_pixels};
36 use crate::visibility::{compute_conservative_visible_rect, PrimitiveVisibility, VisibilityState};
37 
38 
39 const MAX_MASK_SIZE: f32 = 4096.0;
40 
41 const MIN_BRUSH_SPLIT_AREA: f32 = 128.0 * 128.0;
42 
43 
prepare_primitives( store: &mut PrimitiveStore, prim_list: &mut PrimitiveList, pic_context: &PictureContext, pic_state: &mut PictureState, frame_context: &FrameBuildingContext, frame_state: &mut FrameBuildingState, data_stores: &mut DataStores, scratch: &mut PrimitiveScratchBuffer, tile_cache_log: &mut TileCacheLogger, tile_caches: &mut FastHashMap<SliceId, Box<TileCacheInstance>>, )44 pub fn prepare_primitives(
45     store: &mut PrimitiveStore,
46     prim_list: &mut PrimitiveList,
47     pic_context: &PictureContext,
48     pic_state: &mut PictureState,
49     frame_context: &FrameBuildingContext,
50     frame_state: &mut FrameBuildingState,
51     data_stores: &mut DataStores,
52     scratch: &mut PrimitiveScratchBuffer,
53     tile_cache_log: &mut TileCacheLogger,
54     tile_caches: &mut FastHashMap<SliceId, Box<TileCacheInstance>>,
55 ) {
56     profile_scope!("prepare_primitives");
57     for (cluster_index, cluster) in prim_list.clusters.iter_mut().enumerate() {
58         if !cluster.flags.contains(ClusterFlags::IS_VISIBLE) {
59             continue;
60         }
61         profile_scope!("cluster");
62         pic_state.map_local_to_pic.set_target_spatial_node(
63             cluster.spatial_node_index,
64             frame_context.spatial_tree,
65         );
66 
67         frame_state.surfaces[pic_context.surface_index.0].opaque_rect = PictureRect::zero();
68 
69         for (idx, prim_instance) in (&mut prim_list.prim_instances[cluster.prim_range()]).iter_mut().enumerate() {
70             let prim_instance_index = cluster.prim_range.start + idx;
71 
72             // First check for coarse visibility (if this primitive was completely off-screen)
73             match prim_instance.vis.state {
74                 VisibilityState::Unset => {
75                     panic!("bug: invalid vis state");
76                 }
77                 VisibilityState::Culled => {
78                     continue;
79                 }
80                 VisibilityState::Coarse { ref filter, vis_flags } => {
81                     // The original coarse state was calculated during the initial visibility pass.
82                     // However, it's possible that the dirty rect has got smaller, if tiles were not
83                     // dirty. Intersecting with the dirty rect here eliminates preparing any primitives
84                     // outside the dirty rect, and reduces the size of any off-screen surface allocations
85                     // for clip masks / render tasks that we make.
86 
87                     // Clear the current visibiilty mask, and build a more detailed one based on the dirty rect
88                     // regions below.
89                     let dirty_region = frame_state.current_dirty_region();
90                     let is_in_dirty_region = dirty_region.filters
91                         .iter()
92                         .any(|region_filter| region_filter.matches(filter));
93 
94                     if is_in_dirty_region {
95                         prim_instance.vis.state = VisibilityState::Detailed {
96                             filter: *filter,
97                             vis_flags,
98                         }
99                     } else {
100                         prim_instance.clear_visibility();
101                         continue;
102                     }
103                 }
104                 VisibilityState::Detailed { .. } => {
105                     // Was already set to detailed (picture caching disabled or a root element)
106                 }
107                 VisibilityState::PassThrough => {}
108             }
109 
110             let plane_split_anchor = PlaneSplitAnchor::new(cluster_index, prim_instance_index);
111 
112             if prepare_prim_for_render(
113                 store,
114                 prim_instance,
115                 cluster,
116                 pic_context,
117                 pic_state,
118                 frame_context,
119                 frame_state,
120                 plane_split_anchor,
121                 data_stores,
122                 scratch,
123                 tile_cache_log,
124                 tile_caches,
125             ) {
126                 frame_state.num_visible_primitives += 1;
127             } else {
128                 prim_instance.clear_visibility();
129             }
130         }
131 
132         if !cluster.opaque_rect.is_empty() {
133             let surface = &mut frame_state.surfaces[pic_context.surface_index.0];
134 
135             if let Some(cluster_opaque_rect) = surface.map_local_to_surface.map_inner_bounds(&cluster.opaque_rect) {
136                 surface.opaque_rect = crate::util::conservative_union_rect(&surface.opaque_rect, &cluster_opaque_rect);
137             }
138         }
139     }
140 }
141 
prepare_prim_for_render( store: &mut PrimitiveStore, prim_instance: &mut PrimitiveInstance, cluster: &mut PrimitiveCluster, pic_context: &PictureContext, pic_state: &mut PictureState, frame_context: &FrameBuildingContext, frame_state: &mut FrameBuildingState, plane_split_anchor: PlaneSplitAnchor, data_stores: &mut DataStores, scratch: &mut PrimitiveScratchBuffer, tile_cache_log: &mut TileCacheLogger, tile_caches: &mut FastHashMap<SliceId, Box<TileCacheInstance>>, ) -> bool142 fn prepare_prim_for_render(
143     store: &mut PrimitiveStore,
144     prim_instance: &mut PrimitiveInstance,
145     cluster: &mut PrimitiveCluster,
146     pic_context: &PictureContext,
147     pic_state: &mut PictureState,
148     frame_context: &FrameBuildingContext,
149     frame_state: &mut FrameBuildingState,
150     plane_split_anchor: PlaneSplitAnchor,
151     data_stores: &mut DataStores,
152     scratch: &mut PrimitiveScratchBuffer,
153     tile_cache_log: &mut TileCacheLogger,
154     tile_caches: &mut FastHashMap<SliceId, Box<TileCacheInstance>>,
155 ) -> bool {
156     profile_scope!("prepare_prim_for_render");
157 
158     // If we have dependencies, we need to prepare them first, in order
159     // to know the actual rect of this primitive.
160     // For example, scrolling may affect the location of an item in
161     // local space, which may force us to render this item on a larger
162     // picture target, if being composited.
163     if let PrimitiveInstanceKind::Picture { pic_index, .. } = prim_instance.kind {
164         let pic = &mut store.pictures[pic_index.0];
165 
166         match pic.take_context(
167             pic_index,
168             pic_context.surface_spatial_node_index,
169             pic_context.raster_spatial_node_index,
170             pic_context.surface_index,
171             pic_context.subpixel_mode,
172             frame_state,
173             frame_context,
174             scratch,
175             tile_cache_log,
176             tile_caches,
177         ) {
178             Some((pic_context_for_children, mut pic_state_for_children, mut prim_list)) => {
179                 prepare_primitives(
180                     store,
181                     &mut prim_list,
182                     &pic_context_for_children,
183                     &mut pic_state_for_children,
184                     frame_context,
185                     frame_state,
186                     data_stores,
187                     scratch,
188                     tile_cache_log,
189                     tile_caches,
190                 );
191 
192                 // Restore the dependencies (borrow check dance)
193                 store.pictures[pic_context_for_children.pic_index.0]
194                     .restore_context(
195                         prim_list,
196                         pic_context_for_children,
197                         pic_state_for_children,
198                         frame_state,
199                     );
200             }
201             None => {
202                 if prim_instance.is_chased() {
203                     println!("\tculled for carrying an invisible composite filter");
204                 }
205 
206                 return false;
207             }
208         }
209     }
210 
211     let prim_rect = data_stores.get_local_prim_rect(
212         prim_instance,
213         store,
214     );
215 
216     if !update_clip_task(
217         prim_instance,
218         &prim_rect.min,
219         cluster.spatial_node_index,
220         pic_context.raster_spatial_node_index,
221         pic_context,
222         pic_state,
223         frame_context,
224         frame_state,
225         store,
226         data_stores,
227         scratch,
228     ) {
229         if prim_instance.is_chased() {
230             println!("\tconsidered invisible");
231         }
232         return false;
233     }
234 
235     if prim_instance.is_chased() {
236         println!("\tconsidered visible and ready with local pos {:?}", prim_rect.min);
237     }
238 
239     #[cfg(debug_assertions)]
240     {
241         prim_instance.prepared_frame_id = frame_state.rg_builder.frame_id();
242     }
243 
244     prepare_interned_prim_for_render(
245         store,
246         prim_instance,
247         cluster,
248         plane_split_anchor,
249         pic_context,
250         pic_state,
251         frame_context,
252         frame_state,
253         data_stores,
254         scratch,
255     );
256 
257     true
258 }
259 
260 /// Prepare an interned primitive for rendering, by requesting
261 /// resources, render tasks etc. This is equivalent to the
262 /// prepare_prim_for_render_inner call for old style primitives.
prepare_interned_prim_for_render( store: &mut PrimitiveStore, prim_instance: &mut PrimitiveInstance, cluster: &mut PrimitiveCluster, plane_split_anchor: PlaneSplitAnchor, pic_context: &PictureContext, pic_state: &mut PictureState, frame_context: &FrameBuildingContext, frame_state: &mut FrameBuildingState, data_stores: &mut DataStores, scratch: &mut PrimitiveScratchBuffer, )263 fn prepare_interned_prim_for_render(
264     store: &mut PrimitiveStore,
265     prim_instance: &mut PrimitiveInstance,
266     cluster: &mut PrimitiveCluster,
267     plane_split_anchor: PlaneSplitAnchor,
268     pic_context: &PictureContext,
269     pic_state: &mut PictureState,
270     frame_context: &FrameBuildingContext,
271     frame_state: &mut FrameBuildingState,
272     data_stores: &mut DataStores,
273     scratch: &mut PrimitiveScratchBuffer,
274 ) {
275     let prim_spatial_node_index = cluster.spatial_node_index;
276     let is_chased = prim_instance.is_chased();
277     let device_pixel_scale = frame_state.surfaces[pic_context.surface_index.0].device_pixel_scale;
278     let mut is_opaque = false;
279 
280     match &mut prim_instance.kind {
281         PrimitiveInstanceKind::LineDecoration { data_handle, ref mut render_task, .. } => {
282             profile_scope!("LineDecoration");
283             let prim_data = &mut data_stores.line_decoration[*data_handle];
284             let common_data = &mut prim_data.common;
285             let line_dec_data = &mut prim_data.kind;
286 
287             // Update the template this instane references, which may refresh the GPU
288             // cache with any shared template data.
289             line_dec_data.update(common_data, frame_state);
290 
291             // Work out the device pixel size to be used to cache this line decoration.
292             if is_chased {
293                 println!("\tline decoration key={:?}", line_dec_data.cache_key);
294             }
295 
296             // If we have a cache key, it's a wavy / dashed / dotted line. Otherwise, it's
297             // a simple solid line.
298             if let Some(cache_key) = line_dec_data.cache_key.as_ref() {
299                 // TODO(gw): Do we ever need / want to support scales for text decorations
300                 //           based on the current transform?
301                 let scale_factor = Scale::new(1.0) * device_pixel_scale;
302                 let mut task_size = (LayoutSize::from_au(cache_key.size) * scale_factor).ceil().to_i32();
303                 if task_size.width > MAX_LINE_DECORATION_RESOLUTION as i32 ||
304                    task_size.height > MAX_LINE_DECORATION_RESOLUTION as i32 {
305                      let max_extent = cmp::max(task_size.width, task_size.height);
306                      let task_scale_factor = Scale::new(MAX_LINE_DECORATION_RESOLUTION as f32 / max_extent as f32);
307                      task_size = (LayoutSize::from_au(cache_key.size) * scale_factor * task_scale_factor)
308                                     .ceil().to_i32();
309                 }
310 
311                 // Request a pre-rendered image task.
312                 // TODO(gw): This match is a bit untidy, but it should disappear completely
313                 //           once the prepare_prims and batching are unified. When that
314                 //           happens, we can use the cache handle immediately, and not need
315                 //           to temporarily store it in the primitive instance.
316                 *render_task = Some(frame_state.resource_cache.request_render_task(
317                     RenderTaskCacheKey {
318                         size: task_size,
319                         kind: RenderTaskCacheKeyKind::LineDecoration(cache_key.clone()),
320                     },
321                     frame_state.gpu_cache,
322                     frame_state.rg_builder,
323                     None,
324                     false,
325                     RenderTaskParent::Surface(pic_context.surface_index),
326                     frame_state.surfaces,
327                     |rg_builder| {
328                         rg_builder.add().init(RenderTask::new_dynamic(
329                             task_size,
330                             RenderTaskKind::new_line_decoration(
331                                 cache_key.style,
332                                 cache_key.orientation,
333                                 cache_key.wavy_line_thickness.to_f32_px(),
334                                 LayoutSize::from_au(cache_key.size),
335                             ),
336                         ))
337                     }
338                 ));
339             }
340         }
341         PrimitiveInstanceKind::TextRun { run_index, data_handle, .. } => {
342             profile_scope!("TextRun");
343             let prim_data = &mut data_stores.text_run[*data_handle];
344             let run = &mut store.text_runs[*run_index];
345 
346             prim_data.common.may_need_repetition = false;
347 
348             // The glyph transform has to match `glyph_transform` in "ps_text_run" shader.
349             // It's relative to the rasterizing space of a glyph.
350             let transform = frame_context.spatial_tree
351                 .get_relative_transform(
352                     prim_spatial_node_index,
353                     pic_context.raster_spatial_node_index,
354                 )
355                 .into_fast_transform();
356             let prim_offset = prim_data.common.prim_rect.min.to_vector() - run.reference_frame_relative_offset;
357 
358             let pic = &store.pictures[pic_context.pic_index.0];
359             let surface = &frame_state.surfaces[pic_context.surface_index.0];
360             let root_scaling_factor = match pic.raster_config {
361                 Some(ref raster_config) => raster_config.root_scaling_factor,
362                 None => 1.0
363             };
364 
365             // If subpixel AA is disabled due to the backing surface the glyphs
366             // are being drawn onto, disable it (unless we are using the
367             // specifial subpixel mode that estimates background color).
368             let allow_subpixel = match prim_instance.vis.state {
369                 VisibilityState::Culled |
370                 VisibilityState::Unset |
371                 VisibilityState::Coarse { .. } |
372                 VisibilityState::PassThrough => {
373                     panic!("bug: invalid visibility state");
374                 }
375                 VisibilityState::Detailed { ref filter, .. } => {
376                     // For now, we only allow subpixel AA on primary sub-slices. In future we
377                     // may support other sub-slices if we find content that does this.
378                     if filter.sub_slice_index.is_primary() {
379                         match pic_context.subpixel_mode {
380                             SubpixelMode::Allow => true,
381                             SubpixelMode::Deny => false,
382                             SubpixelMode::Conditional { allowed_rect } => {
383                                 // Conditional mode allows subpixel AA to be enabled for this
384                                 // text run, so long as it's inside the allowed rect.
385                                 allowed_rect.contains_box(&prim_instance.vis.clip_chain.pic_clip_rect)
386                             }
387                         }
388                     } else {
389                         false
390                     }
391                 }
392             };
393 
394             run.request_resources(
395                 prim_offset,
396                 &prim_data.font,
397                 &prim_data.glyphs,
398                 &transform.to_transform().with_destination::<_>(),
399                 surface,
400                 prim_spatial_node_index,
401                 root_scaling_factor,
402                 allow_subpixel,
403                 frame_context.fb_config.low_quality_pinch_zoom,
404                 frame_state.resource_cache,
405                 frame_state.gpu_cache,
406                 frame_context.spatial_tree,
407                 scratch,
408             );
409 
410             // Update the template this instane references, which may refresh the GPU
411             // cache with any shared template data.
412             prim_data.update(frame_state);
413         }
414         PrimitiveInstanceKind::Clear { data_handle, .. } => {
415             profile_scope!("Clear");
416             let prim_data = &mut data_stores.prim[*data_handle];
417 
418             prim_data.common.may_need_repetition = false;
419 
420             // Update the template this instane references, which may refresh the GPU
421             // cache with any shared template data.
422             prim_data.update(frame_state, frame_context.scene_properties);
423         }
424         PrimitiveInstanceKind::NormalBorder { data_handle, ref mut render_task_ids, .. } => {
425             profile_scope!("NormalBorder");
426             let prim_data = &mut data_stores.normal_border[*data_handle];
427             let common_data = &mut prim_data.common;
428             let border_data = &mut prim_data.kind;
429 
430             common_data.may_need_repetition =
431                 matches!(border_data.border.top.style, BorderStyle::Dotted | BorderStyle::Dashed) ||
432                 matches!(border_data.border.right.style, BorderStyle::Dotted | BorderStyle::Dashed) ||
433                 matches!(border_data.border.bottom.style, BorderStyle::Dotted | BorderStyle::Dashed) ||
434                 matches!(border_data.border.left.style, BorderStyle::Dotted | BorderStyle::Dashed);
435 
436 
437             // Update the template this instance references, which may refresh the GPU
438             // cache with any shared template data.
439             border_data.update(common_data, frame_state);
440 
441             // TODO(gw): For now, the scale factors to rasterize borders at are
442             //           based on the true world transform of the primitive. When
443             //           raster roots with local scale are supported in future,
444             //           that will need to be accounted for here.
445             let scale = frame_context
446                 .spatial_tree
447                 .get_world_transform(prim_spatial_node_index)
448                 .scale_factors();
449 
450             // Scale factors are normalized to a power of 2 to reduce the number of
451             // resolution changes.
452             // For frames with a changing scale transform round scale factors up to
453             // nearest power-of-2 boundary so that we don't keep having to redraw
454             // the content as it scales up and down. Rounding up to nearest
455             // power-of-2 boundary ensures we never scale up, only down --- avoiding
456             // jaggies. It also ensures we never scale down by more than a factor of
457             // 2, avoiding bad downscaling quality.
458             let scale_width = clamp_to_scale_factor(scale.0, false);
459             let scale_height = clamp_to_scale_factor(scale.1, false);
460             // Pick the maximum dimension as scale
461             let world_scale = LayoutToWorldScale::new(scale_width.max(scale_height));
462             let mut scale = world_scale * device_pixel_scale;
463             let max_scale = get_max_scale_for_border(border_data);
464             scale.0 = scale.0.min(max_scale.0);
465 
466             // For each edge and corner, request the render task by content key
467             // from the render task cache. This ensures that the render task for
468             // this segment will be available for batching later in the frame.
469             let mut handles: SmallVec<[RenderTaskId; 8]> = SmallVec::new();
470 
471             for segment in &border_data.border_segments {
472                 // Update the cache key device size based on requested scale.
473                 let cache_size = to_cache_size(segment.local_task_size, &mut scale);
474                 let cache_key = RenderTaskCacheKey {
475                     kind: RenderTaskCacheKeyKind::BorderSegment(segment.cache_key.clone()),
476                     size: cache_size,
477                 };
478 
479                 handles.push(frame_state.resource_cache.request_render_task(
480                     cache_key,
481                     frame_state.gpu_cache,
482                     frame_state.rg_builder,
483                     None,
484                     false,          // TODO(gw): We don't calculate opacity for borders yet!
485                     RenderTaskParent::Surface(pic_context.surface_index),
486                     frame_state.surfaces,
487                     |rg_builder| {
488                         rg_builder.add().init(RenderTask::new_dynamic(
489                             cache_size,
490                             RenderTaskKind::new_border_segment(
491                                 build_border_instances(
492                                     &segment.cache_key,
493                                     cache_size,
494                                     &border_data.border,
495                                     scale,
496                                 )
497                             ),
498                         ))
499                     }
500                 ));
501             }
502 
503             *render_task_ids = scratch
504                 .border_cache_handles
505                 .extend(handles);
506         }
507         PrimitiveInstanceKind::ImageBorder { data_handle, .. } => {
508             profile_scope!("ImageBorder");
509             let prim_data = &mut data_stores.image_border[*data_handle];
510 
511             // TODO: get access to the ninepatch and to check whether we need support
512             // for repetitions in the shader.
513 
514             // Update the template this instance references, which may refresh the GPU
515             // cache with any shared template data.
516             prim_data.kind.update(
517                 &mut prim_data.common,
518                 frame_state
519             );
520         }
521         PrimitiveInstanceKind::Rectangle { data_handle, segment_instance_index, color_binding_index, .. } => {
522             profile_scope!("Rectangle");
523             let prim_data = &mut data_stores.prim[*data_handle];
524             prim_data.common.may_need_repetition = false;
525 
526             if *color_binding_index != ColorBindingIndex::INVALID {
527                 match store.color_bindings[*color_binding_index] {
528                     PropertyBinding::Binding(..) => {
529                         // We explicitly invalidate the gpu cache
530                         // if the color is animating.
531                         let gpu_cache_handle =
532                             if *segment_instance_index == SegmentInstanceIndex::INVALID {
533                                 None
534                             } else if *segment_instance_index == SegmentInstanceIndex::UNUSED {
535                                 Some(&prim_data.common.gpu_cache_handle)
536                             } else {
537                                 Some(&scratch.segment_instances[*segment_instance_index].gpu_cache_handle)
538                             };
539                         if let Some(gpu_cache_handle) = gpu_cache_handle {
540                             frame_state.gpu_cache.invalidate(gpu_cache_handle);
541                         }
542                     }
543                     PropertyBinding::Value(..) => {},
544                 }
545             }
546 
547             // Update the template this instane references, which may refresh the GPU
548             // cache with any shared template data.
549             prim_data.update(
550                 frame_state,
551                 frame_context.scene_properties,
552             );
553 
554             is_opaque = prim_data.common.opacity.is_opaque;
555 
556             write_segment(
557                 *segment_instance_index,
558                 frame_state,
559                 &mut scratch.segments,
560                 &mut scratch.segment_instances,
561                 |request| {
562                     prim_data.kind.write_prim_gpu_blocks(
563                         request,
564                         frame_context.scene_properties,
565                     );
566                 }
567             );
568         }
569         PrimitiveInstanceKind::YuvImage { data_handle, segment_instance_index, .. } => {
570             profile_scope!("YuvImage");
571             let prim_data = &mut data_stores.yuv_image[*data_handle];
572             let common_data = &mut prim_data.common;
573             let yuv_image_data = &mut prim_data.kind;
574             is_opaque = true;
575 
576             common_data.may_need_repetition = false;
577 
578             // Update the template this instane references, which may refresh the GPU
579             // cache with any shared template data.
580             yuv_image_data.update(common_data, frame_state);
581 
582             write_segment(
583                 *segment_instance_index,
584                 frame_state,
585                 &mut scratch.segments,
586                 &mut scratch.segment_instances,
587                 |request| {
588                     yuv_image_data.write_prim_gpu_blocks(request);
589                 }
590             );
591         }
592         PrimitiveInstanceKind::Image { data_handle, image_instance_index, .. } => {
593             profile_scope!("Image");
594 
595             let prim_data = &mut data_stores.image[*data_handle];
596             let common_data = &mut prim_data.common;
597             let image_data = &mut prim_data.kind;
598             let image_instance = &mut store.images[*image_instance_index];
599 
600             // Update the template this instance references, which may refresh the GPU
601             // cache with any shared template data.
602             image_data.update(
603                 common_data,
604                 image_instance,
605                 pic_context.surface_index,
606                 prim_spatial_node_index,
607                 frame_state,
608                 frame_context,
609                 &mut prim_instance.vis,
610             );
611 
612             // common_data.opacity.is_opaque is computed in the above update call.
613             is_opaque = common_data.opacity.is_opaque;
614 
615             write_segment(
616                 image_instance.segment_instance_index,
617                 frame_state,
618                 &mut scratch.segments,
619                 &mut scratch.segment_instances,
620                 |request| {
621                     image_data.write_prim_gpu_blocks(request);
622                 },
623             );
624         }
625         PrimitiveInstanceKind::LinearGradient { data_handle, ref mut visible_tiles_range, .. } => {
626             profile_scope!("LinearGradient");
627             let prim_data = &mut data_stores.linear_grad[*data_handle];
628 
629             // Update the template this instane references, which may refresh the GPU
630             // cache with any shared template data.
631             prim_data.update(frame_state, pic_context.surface_index);
632 
633             if prim_data.stretch_size.width >= prim_data.common.prim_rect.width() &&
634                 prim_data.stretch_size.height >= prim_data.common.prim_rect.height() {
635 
636                 prim_data.common.may_need_repetition = false;
637             }
638 
639             if prim_data.tile_spacing != LayoutSize::zero() {
640                 // We are performing the decomposition on the CPU here, no need to
641                 // have it in the shader.
642                 prim_data.common.may_need_repetition = false;
643 
644                 *visible_tiles_range = decompose_repeated_gradient(
645                     &prim_instance.vis,
646                     &prim_data.common.prim_rect,
647                     prim_spatial_node_index,
648                     &prim_data.stretch_size,
649                     &prim_data.tile_spacing,
650                     frame_state,
651                     &mut scratch.gradient_tiles,
652                     &frame_context.spatial_tree,
653                     Some(&mut |_, mut request| {
654                         request.push([
655                             prim_data.start_point.x,
656                             prim_data.start_point.y,
657                             prim_data.end_point.x,
658                             prim_data.end_point.y,
659                         ]);
660                         request.push([
661                             pack_as_float(prim_data.extend_mode as u32),
662                             prim_data.stretch_size.width,
663                             prim_data.stretch_size.height,
664                             0.0,
665                         ]);
666                     }),
667                 );
668 
669                 if visible_tiles_range.is_empty() {
670                     prim_instance.clear_visibility();
671                 }
672             }
673 
674             // TODO(gw): Consider whether it's worth doing segment building
675             //           for gradient primitives.
676         }
677         PrimitiveInstanceKind::CachedLinearGradient { data_handle, ref mut visible_tiles_range, .. } => {
678             profile_scope!("CachedLinearGradient");
679             let prim_data = &mut data_stores.linear_grad[*data_handle];
680             prim_data.common.may_need_repetition = prim_data.stretch_size.width < prim_data.common.prim_rect.width()
681                 || prim_data.stretch_size.height < prim_data.common.prim_rect.height();
682 
683             // Update the template this instance references, which may refresh the GPU
684             // cache with any shared template data.
685             prim_data.update(frame_state, pic_context.surface_index);
686 
687             if prim_data.tile_spacing != LayoutSize::zero() {
688                 prim_data.common.may_need_repetition = false;
689 
690                 *visible_tiles_range = decompose_repeated_gradient(
691                     &prim_instance.vis,
692                     &prim_data.common.prim_rect,
693                     prim_spatial_node_index,
694                     &prim_data.stretch_size,
695                     &prim_data.tile_spacing,
696                     frame_state,
697                     &mut scratch.gradient_tiles,
698                     &frame_context.spatial_tree,
699                     None,
700                 );
701 
702                 if visible_tiles_range.is_empty() {
703                     prim_instance.clear_visibility();
704                 }
705             }
706         }
707         PrimitiveInstanceKind::RadialGradient { data_handle, ref mut visible_tiles_range, .. } => {
708             profile_scope!("RadialGradient");
709             let prim_data = &mut data_stores.radial_grad[*data_handle];
710 
711             prim_data.common.may_need_repetition = prim_data.stretch_size.width < prim_data.common.prim_rect.width()
712                 || prim_data.stretch_size.height < prim_data.common.prim_rect.height();
713 
714             // Update the template this instane references, which may refresh the GPU
715             // cache with any shared template data.
716             prim_data.update(frame_state, pic_context.surface_index);
717 
718             if prim_data.tile_spacing != LayoutSize::zero() {
719                 prim_data.common.may_need_repetition = false;
720 
721                 *visible_tiles_range = decompose_repeated_gradient(
722                     &prim_instance.vis,
723                     &prim_data.common.prim_rect,
724                     prim_spatial_node_index,
725                     &prim_data.stretch_size,
726                     &prim_data.tile_spacing,
727                     frame_state,
728                     &mut scratch.gradient_tiles,
729                     &frame_context.spatial_tree,
730                     None,
731                 );
732 
733                 if visible_tiles_range.is_empty() {
734                     prim_instance.clear_visibility();
735                 }
736             }
737 
738             // TODO(gw): Consider whether it's worth doing segment building
739             //           for gradient primitives.
740         }
741         PrimitiveInstanceKind::ConicGradient { data_handle, ref mut visible_tiles_range, .. } => {
742             profile_scope!("ConicGradient");
743             let prim_data = &mut data_stores.conic_grad[*data_handle];
744 
745             prim_data.common.may_need_repetition = prim_data.stretch_size.width < prim_data.common.prim_rect.width()
746                 || prim_data.stretch_size.height < prim_data.common.prim_rect.height();
747 
748             // Update the template this instane references, which may refresh the GPU
749             // cache with any shared template data.
750             prim_data.update(frame_state, pic_context.surface_index);
751 
752             if prim_data.tile_spacing != LayoutSize::zero() {
753                 prim_data.common.may_need_repetition = false;
754 
755                 *visible_tiles_range = decompose_repeated_gradient(
756                     &prim_instance.vis,
757                     &prim_data.common.prim_rect,
758                     prim_spatial_node_index,
759                     &prim_data.stretch_size,
760                     &prim_data.tile_spacing,
761                     frame_state,
762                     &mut scratch.gradient_tiles,
763                     &frame_context.spatial_tree,
764                     None,
765                 );
766 
767                 if visible_tiles_range.is_empty() {
768                     prim_instance.clear_visibility();
769                 }
770             }
771 
772             // TODO(gw): Consider whether it's worth doing segment building
773             //           for gradient primitives.
774         }
775         PrimitiveInstanceKind::Picture { pic_index, segment_instance_index, .. } => {
776             profile_scope!("Picture");
777             let pic = &mut store.pictures[pic_index.0];
778 
779             if pic.prepare_for_render(
780                 frame_context,
781                 frame_state,
782                 data_stores,
783             ) {
784                 if let Some(ref mut splitter) = pic_state.plane_splitter {
785                     PicturePrimitive::add_split_plane(
786                         splitter,
787                         frame_context.spatial_tree,
788                         prim_spatial_node_index,
789                         pic.precise_local_rect,
790                         &prim_instance.vis.combined_local_clip_rect,
791                         frame_state.current_dirty_region().combined,
792                         plane_split_anchor,
793                     );
794                 }
795 
796                 // If this picture uses segments, ensure the GPU cache is
797                 // up to date with segment local rects.
798                 // TODO(gw): This entire match statement above can now be
799                 //           refactored into prepare_interned_prim_for_render.
800                 if pic.can_use_segments() {
801                     write_segment(
802                         *segment_instance_index,
803                         frame_state,
804                         &mut scratch.segments,
805                         &mut scratch.segment_instances,
806                         |request| {
807                             request.push(PremultipliedColorF::WHITE);
808                             request.push(PremultipliedColorF::WHITE);
809                             request.push([
810                                 -1.0,       // -ve means use prim rect for stretch size
811                                 0.0,
812                                 0.0,
813                                 0.0,
814                             ]);
815                         }
816                     );
817                 }
818             } else {
819                 prim_instance.clear_visibility();
820             }
821         }
822         PrimitiveInstanceKind::Backdrop { data_handle } => {
823             profile_scope!("Backdrop");
824             let backdrop_pic_index = data_stores.backdrop[*data_handle].kind.pic_index;
825 
826             // Setup a dependency on the backdrop picture to ensure it is rendered prior to rendering this primitive.
827             let backdrop_surface_index = store.pictures[backdrop_pic_index.0].raster_config.as_ref().unwrap().surface_index;
828             if let Some(ref backdrop_tasks) = frame_state.surfaces[backdrop_surface_index.0].render_tasks {
829                 // This is untidy / code duplication but matches existing behavior and will be
830                 // removed in follow up patches to this bug to rework how backdrop-filter works.
831                 let backdrop_task_id = match backdrop_tasks {
832                     SurfaceRenderTasks::Tiled(..) => unreachable!(),
833                     SurfaceRenderTasks::Simple(id) => *id,
834                     SurfaceRenderTasks::Chained { port_task_id, .. } => *port_task_id,
835                 };
836 
837                 frame_state.add_child_render_task(
838                     pic_context.surface_index,
839                     backdrop_task_id,
840                 );
841             } else {
842                 if prim_instance.is_chased() {
843                     println!("\tBackdrop primitive culled because backdrop task was not assigned render tasks");
844                 }
845                 prim_instance.clear_visibility();
846             }
847         }
848     };
849 
850     // If the primitive is opaque, see if it can contribut to it's picture surface's opaque rect.
851 
852     is_opaque = is_opaque && {
853         let clip = prim_instance.vis.clip_task_index;
854         clip == ClipTaskIndex::INVALID
855     };
856 
857     is_opaque = is_opaque && !frame_context.spatial_tree.is_relative_transform_complex(
858         prim_spatial_node_index,
859         pic_context.raster_spatial_node_index,
860     );
861 
862     if is_opaque {
863         let prim_local_rect = data_stores.get_local_prim_rect(
864             prim_instance,
865             store,
866         );
867         cluster.opaque_rect = crate::util::conservative_union_rect(&cluster.opaque_rect, &prim_local_rect);
868     }
869 }
870 
871 
write_segment<F>( segment_instance_index: SegmentInstanceIndex, frame_state: &mut FrameBuildingState, segments: &mut SegmentStorage, segment_instances: &mut SegmentInstanceStorage, f: F, ) where F: Fn(&mut GpuDataRequest)872 fn write_segment<F>(
873     segment_instance_index: SegmentInstanceIndex,
874     frame_state: &mut FrameBuildingState,
875     segments: &mut SegmentStorage,
876     segment_instances: &mut SegmentInstanceStorage,
877     f: F,
878 ) where F: Fn(&mut GpuDataRequest) {
879     debug_assert_ne!(segment_instance_index, SegmentInstanceIndex::INVALID);
880     if segment_instance_index != SegmentInstanceIndex::UNUSED {
881         let segment_instance = &mut segment_instances[segment_instance_index];
882 
883         if let Some(mut request) = frame_state.gpu_cache.request(&mut segment_instance.gpu_cache_handle) {
884             let segments = &segments[segment_instance.segments_range];
885 
886             f(&mut request);
887 
888             for segment in segments {
889                 request.write_segment(
890                     segment.local_rect,
891                     [0.0; 4],
892                 );
893             }
894         }
895     }
896 }
897 
decompose_repeated_gradient( prim_vis: &PrimitiveVisibility, prim_local_rect: &LayoutRect, prim_spatial_node_index: SpatialNodeIndex, stretch_size: &LayoutSize, tile_spacing: &LayoutSize, frame_state: &mut FrameBuildingState, gradient_tiles: &mut GradientTileStorage, spatial_tree: &SpatialTree, mut callback: Option<&mut dyn FnMut(&LayoutRect, GpuDataRequest)>, ) -> GradientTileRange898 fn decompose_repeated_gradient(
899     prim_vis: &PrimitiveVisibility,
900     prim_local_rect: &LayoutRect,
901     prim_spatial_node_index: SpatialNodeIndex,
902     stretch_size: &LayoutSize,
903     tile_spacing: &LayoutSize,
904     frame_state: &mut FrameBuildingState,
905     gradient_tiles: &mut GradientTileStorage,
906     spatial_tree: &SpatialTree,
907     mut callback: Option<&mut dyn FnMut(&LayoutRect, GpuDataRequest)>,
908 ) -> GradientTileRange {
909     let mut visible_tiles = Vec::new();
910 
911     // Tighten the clip rect because decomposing the repeated image can
912     // produce primitives that are partially covering the original image
913     // rect and we want to clip these extra parts out.
914     let tight_clip_rect = prim_vis
915         .combined_local_clip_rect
916         .intersection(prim_local_rect).unwrap();
917 
918     let visible_rect = compute_conservative_visible_rect(
919         &prim_vis.clip_chain,
920         frame_state.current_dirty_region().combined,
921         prim_spatial_node_index,
922         spatial_tree,
923     );
924     let stride = *stretch_size + *tile_spacing;
925 
926     let repetitions = image_tiling::repetitions(prim_local_rect, &visible_rect, stride);
927     for Repetition { origin, .. } in repetitions {
928         let mut handle = GpuCacheHandle::new();
929         let rect = LayoutRect::from_origin_and_size(
930             origin,
931             *stretch_size,
932         );
933 
934         if let Some(callback) = &mut callback {
935             if let Some(request) = frame_state.gpu_cache.request(&mut handle) {
936                 callback(&rect, request);
937             }
938         }
939 
940         visible_tiles.push(VisibleGradientTile {
941             local_rect: rect,
942             local_clip_rect: tight_clip_rect,
943             handle
944         });
945     }
946 
947     // At this point if we don't have tiles to show it means we could probably
948     // have done a better a job at culling during an earlier stage.
949     // Clearing the screen rect has the effect of "culling out" the primitive
950     // from the point of view of the batch builder, and ensures we don't hit
951     // assertions later on because we didn't request any image.
952     if visible_tiles.is_empty() {
953         GradientTileRange::empty()
954     } else {
955         gradient_tiles.extend(visible_tiles)
956     }
957 }
958 
959 
update_clip_task_for_brush( instance: &PrimitiveInstance, prim_origin: &LayoutPoint, prim_spatial_node_index: SpatialNodeIndex, root_spatial_node_index: SpatialNodeIndex, pic_context: &PictureContext, pic_state: &mut PictureState, frame_context: &FrameBuildingContext, frame_state: &mut FrameBuildingState, prim_store: &PrimitiveStore, data_stores: &mut DataStores, segments_store: &mut SegmentStorage, segment_instances_store: &mut SegmentInstanceStorage, clip_mask_instances: &mut Vec<ClipMaskKind>, unclipped: &DeviceRect, device_pixel_scale: DevicePixelScale, ) -> Option<ClipTaskIndex>960 fn update_clip_task_for_brush(
961     instance: &PrimitiveInstance,
962     prim_origin: &LayoutPoint,
963     prim_spatial_node_index: SpatialNodeIndex,
964     root_spatial_node_index: SpatialNodeIndex,
965     pic_context: &PictureContext,
966     pic_state: &mut PictureState,
967     frame_context: &FrameBuildingContext,
968     frame_state: &mut FrameBuildingState,
969     prim_store: &PrimitiveStore,
970     data_stores: &mut DataStores,
971     segments_store: &mut SegmentStorage,
972     segment_instances_store: &mut SegmentInstanceStorage,
973     clip_mask_instances: &mut Vec<ClipMaskKind>,
974     unclipped: &DeviceRect,
975     device_pixel_scale: DevicePixelScale,
976 ) -> Option<ClipTaskIndex> {
977     let segments = match instance.kind {
978         PrimitiveInstanceKind::TextRun { .. } |
979         PrimitiveInstanceKind::Clear { .. } |
980         PrimitiveInstanceKind::LineDecoration { .. } |
981         PrimitiveInstanceKind::Backdrop { .. } => {
982             return None;
983         }
984         PrimitiveInstanceKind::Image { image_instance_index, .. } => {
985             let segment_instance_index = prim_store
986                 .images[image_instance_index]
987                 .segment_instance_index;
988 
989             if segment_instance_index == SegmentInstanceIndex::UNUSED {
990                 return None;
991             }
992 
993             let segment_instance = &segment_instances_store[segment_instance_index];
994 
995             &segments_store[segment_instance.segments_range]
996         }
997         PrimitiveInstanceKind::Picture { segment_instance_index, .. } => {
998             // Pictures may not support segment rendering at all (INVALID)
999             // or support segment rendering but choose not to due to size
1000             // or some other factor (UNUSED).
1001             if segment_instance_index == SegmentInstanceIndex::UNUSED ||
1002                segment_instance_index == SegmentInstanceIndex::INVALID {
1003                 return None;
1004             }
1005 
1006             let segment_instance = &segment_instances_store[segment_instance_index];
1007             &segments_store[segment_instance.segments_range]
1008         }
1009         PrimitiveInstanceKind::YuvImage { segment_instance_index, .. } |
1010         PrimitiveInstanceKind::Rectangle { segment_instance_index, .. } => {
1011             debug_assert!(segment_instance_index != SegmentInstanceIndex::INVALID);
1012 
1013             if segment_instance_index == SegmentInstanceIndex::UNUSED {
1014                 return None;
1015             }
1016 
1017             let segment_instance = &segment_instances_store[segment_instance_index];
1018 
1019             &segments_store[segment_instance.segments_range]
1020         }
1021         PrimitiveInstanceKind::ImageBorder { data_handle, .. } => {
1022             let border_data = &data_stores.image_border[data_handle].kind;
1023 
1024             // TODO: This is quite messy - once we remove legacy primitives we
1025             //       can change this to be a tuple match on (instance, template)
1026             border_data.brush_segments.as_slice()
1027         }
1028         PrimitiveInstanceKind::NormalBorder { data_handle, .. } => {
1029             let border_data = &data_stores.normal_border[data_handle].kind;
1030 
1031             // TODO: This is quite messy - once we remove legacy primitives we
1032             //       can change this to be a tuple match on (instance, template)
1033             border_data.brush_segments.as_slice()
1034         }
1035         PrimitiveInstanceKind::LinearGradient { data_handle, .. }
1036         | PrimitiveInstanceKind::CachedLinearGradient { data_handle, .. } => {
1037             let prim_data = &data_stores.linear_grad[data_handle];
1038 
1039             // TODO: This is quite messy - once we remove legacy primitives we
1040             //       can change this to be a tuple match on (instance, template)
1041             if prim_data.brush_segments.is_empty() {
1042                 return None;
1043             }
1044 
1045             prim_data.brush_segments.as_slice()
1046         }
1047         PrimitiveInstanceKind::RadialGradient { data_handle, .. } => {
1048             let prim_data = &data_stores.radial_grad[data_handle];
1049 
1050             // TODO: This is quite messy - once we remove legacy primitives we
1051             //       can change this to be a tuple match on (instance, template)
1052             if prim_data.brush_segments.is_empty() {
1053                 return None;
1054             }
1055 
1056             prim_data.brush_segments.as_slice()
1057         }
1058         PrimitiveInstanceKind::ConicGradient { data_handle, .. } => {
1059             let prim_data = &data_stores.conic_grad[data_handle];
1060 
1061             // TODO: This is quite messy - once we remove legacy primitives we
1062             //       can change this to be a tuple match on (instance, template)
1063             if prim_data.brush_segments.is_empty() {
1064                 return None;
1065             }
1066 
1067             prim_data.brush_segments.as_slice()
1068         }
1069     };
1070 
1071     // If there are no segments, early out to avoid setting a valid
1072     // clip task instance location below.
1073     if segments.is_empty() {
1074         return None;
1075     }
1076 
1077     // Set where in the clip mask instances array the clip mask info
1078     // can be found for this primitive. Each segment will push the
1079     // clip mask information for itself in update_clip_task below.
1080     let clip_task_index = ClipTaskIndex(clip_mask_instances.len() as _);
1081 
1082     // If we only built 1 segment, there is no point in re-running
1083     // the clip chain builder. Instead, just use the clip chain
1084     // instance that was built for the main primitive. This is a
1085     // significant optimization for the common case.
1086     if segments.len() == 1 {
1087         let clip_mask_kind = update_brush_segment_clip_task(
1088             &segments[0],
1089             Some(&instance.vis.clip_chain),
1090             frame_state.current_dirty_region().combined,
1091             root_spatial_node_index,
1092             pic_context.surface_index,
1093             pic_state,
1094             frame_context,
1095             frame_state,
1096             &mut data_stores.clip,
1097             unclipped,
1098             device_pixel_scale,
1099         );
1100         clip_mask_instances.push(clip_mask_kind);
1101     } else {
1102         let dirty_world_rect = frame_state.current_dirty_region().combined;
1103 
1104         for segment in segments {
1105             // Build a clip chain for the smaller segment rect. This will
1106             // often manage to eliminate most/all clips, and sometimes
1107             // clip the segment completely.
1108             frame_state.clip_store.set_active_clips_from_clip_chain(
1109                 &instance.vis.clip_chain,
1110                 prim_spatial_node_index,
1111                 &frame_context.spatial_tree,
1112             );
1113 
1114             let segment_clip_chain = frame_state
1115                 .clip_store
1116                 .build_clip_chain_instance(
1117                     segment.local_rect.translate(prim_origin.to_vector()),
1118                     &pic_state.map_local_to_pic,
1119                     &pic_state.map_pic_to_world,
1120                     &frame_context.spatial_tree,
1121                     frame_state.gpu_cache,
1122                     frame_state.resource_cache,
1123                     device_pixel_scale,
1124                     &dirty_world_rect,
1125                     &mut data_stores.clip,
1126                     false,
1127                     instance.is_chased(),
1128                 );
1129 
1130             let clip_mask_kind = update_brush_segment_clip_task(
1131                 &segment,
1132                 segment_clip_chain.as_ref(),
1133                 frame_state.current_dirty_region().combined,
1134                 root_spatial_node_index,
1135                 pic_context.surface_index,
1136                 pic_state,
1137                 frame_context,
1138                 frame_state,
1139                 &mut data_stores.clip,
1140                 unclipped,
1141                 device_pixel_scale,
1142             );
1143             clip_mask_instances.push(clip_mask_kind);
1144         }
1145     }
1146 
1147     Some(clip_task_index)
1148 }
1149 
update_clip_task( instance: &mut PrimitiveInstance, prim_origin: &LayoutPoint, prim_spatial_node_index: SpatialNodeIndex, root_spatial_node_index: SpatialNodeIndex, pic_context: &PictureContext, pic_state: &mut PictureState, frame_context: &FrameBuildingContext, frame_state: &mut FrameBuildingState, prim_store: &mut PrimitiveStore, data_stores: &mut DataStores, scratch: &mut PrimitiveScratchBuffer, ) -> bool1150 pub fn update_clip_task(
1151     instance: &mut PrimitiveInstance,
1152     prim_origin: &LayoutPoint,
1153     prim_spatial_node_index: SpatialNodeIndex,
1154     root_spatial_node_index: SpatialNodeIndex,
1155     pic_context: &PictureContext,
1156     pic_state: &mut PictureState,
1157     frame_context: &FrameBuildingContext,
1158     frame_state: &mut FrameBuildingState,
1159     prim_store: &mut PrimitiveStore,
1160     data_stores: &mut DataStores,
1161     scratch: &mut PrimitiveScratchBuffer,
1162 ) -> bool {
1163     let device_pixel_scale = frame_state.surfaces[pic_context.surface_index.0].device_pixel_scale;
1164 
1165     if instance.is_chased() {
1166         println!("\tupdating clip task with pic rect {:?}", instance.vis.clip_chain.pic_clip_rect);
1167     }
1168 
1169     // Get the device space rect for the primitive if it was unclipped.
1170     let unclipped = match get_unclipped_device_rect(
1171         instance.vis.clip_chain.pic_clip_rect,
1172         &pic_state.map_pic_to_raster,
1173         device_pixel_scale,
1174     ) {
1175         Some(rect) => rect,
1176         None => return false,
1177     };
1178 
1179     build_segments_if_needed(
1180         instance,
1181         frame_state,
1182         prim_store,
1183         data_stores,
1184         &mut scratch.segments,
1185         &mut scratch.segment_instances,
1186     );
1187 
1188     // First try to  render this primitive's mask using optimized brush rendering.
1189     instance.vis.clip_task_index = if let Some(clip_task_index) = update_clip_task_for_brush(
1190         instance,
1191         prim_origin,
1192         prim_spatial_node_index,
1193         root_spatial_node_index,
1194         pic_context,
1195         pic_state,
1196         frame_context,
1197         frame_state,
1198         prim_store,
1199         data_stores,
1200         &mut scratch.segments,
1201         &mut scratch.segment_instances,
1202         &mut scratch.clip_mask_instances,
1203         &unclipped,
1204         device_pixel_scale,
1205     ) {
1206         if instance.is_chased() {
1207             println!("\tsegment tasks have been created for clipping: {:?}", clip_task_index);
1208         }
1209         clip_task_index
1210     } else if instance.vis.clip_chain.needs_mask {
1211         // Get a minimal device space rect, clipped to the screen that we
1212         // need to allocate for the clip mask, as well as interpolated
1213         // snap offsets.
1214         let unadjusted_device_rect = match get_clipped_device_rect(
1215             &unclipped,
1216             &pic_state.map_raster_to_world,
1217             frame_state.current_dirty_region().combined,
1218             device_pixel_scale,
1219         ) {
1220             Some(device_rect) => device_rect,
1221             None => return false,
1222         };
1223 
1224         let (device_rect, device_pixel_scale) = adjust_mask_scale_for_max_size(
1225             unadjusted_device_rect,
1226             device_pixel_scale,
1227         );
1228         let clip_task_id = RenderTaskKind::new_mask(
1229             device_rect,
1230             instance.vis.clip_chain.clips_range,
1231             root_spatial_node_index,
1232             frame_state.clip_store,
1233             frame_state.gpu_cache,
1234             frame_state.resource_cache,
1235             frame_state.rg_builder,
1236             &mut data_stores.clip,
1237             device_pixel_scale,
1238             frame_context.fb_config,
1239             frame_state.surfaces,
1240         );
1241         if instance.is_chased() {
1242             println!("\tcreated task {:?} with device rect {:?}",
1243                 clip_task_id, device_rect);
1244         }
1245         // Set the global clip mask instance for this primitive.
1246         let clip_task_index = ClipTaskIndex(scratch.clip_mask_instances.len() as _);
1247         scratch.clip_mask_instances.push(ClipMaskKind::Mask(clip_task_id));
1248         instance.vis.clip_task_index = clip_task_index;
1249         frame_state.add_child_render_task(
1250             pic_context.surface_index,
1251             clip_task_id,
1252         );
1253         clip_task_index
1254     } else {
1255         if instance.is_chased() {
1256             println!("\tno mask is needed");
1257         }
1258         ClipTaskIndex::INVALID
1259     };
1260 
1261     true
1262 }
1263 
1264 /// Write out to the clip mask instances array the correct clip mask
1265 /// config for this segment.
update_brush_segment_clip_task( segment: &BrushSegment, clip_chain: Option<&ClipChainInstance>, world_clip_rect: WorldRect, root_spatial_node_index: SpatialNodeIndex, surface_index: SurfaceIndex, pic_state: &mut PictureState, frame_context: &FrameBuildingContext, frame_state: &mut FrameBuildingState, clip_data_store: &mut ClipDataStore, unclipped: &DeviceRect, device_pixel_scale: DevicePixelScale, ) -> ClipMaskKind1266 pub fn update_brush_segment_clip_task(
1267     segment: &BrushSegment,
1268     clip_chain: Option<&ClipChainInstance>,
1269     world_clip_rect: WorldRect,
1270     root_spatial_node_index: SpatialNodeIndex,
1271     surface_index: SurfaceIndex,
1272     pic_state: &mut PictureState,
1273     frame_context: &FrameBuildingContext,
1274     frame_state: &mut FrameBuildingState,
1275     clip_data_store: &mut ClipDataStore,
1276     unclipped: &DeviceRect,
1277     device_pixel_scale: DevicePixelScale,
1278 ) -> ClipMaskKind {
1279     let clip_chain = match clip_chain {
1280         Some(chain) => chain,
1281         None => return ClipMaskKind::Clipped,
1282     };
1283     if !clip_chain.needs_mask ||
1284        (!segment.may_need_clip_mask && !clip_chain.has_non_local_clips) {
1285         return ClipMaskKind::None;
1286     }
1287 
1288     let segment_world_rect = match pic_state.map_pic_to_world.map(&clip_chain.pic_clip_rect) {
1289         Some(rect) => rect,
1290         None => return ClipMaskKind::Clipped,
1291     };
1292 
1293     let segment_world_rect = match segment_world_rect.intersection(&world_clip_rect) {
1294         Some(rect) => rect,
1295         None => return ClipMaskKind::Clipped,
1296     };
1297 
1298     // Get a minimal device space rect, clipped to the screen that we
1299     // need to allocate for the clip mask, as well as interpolated
1300     // snap offsets.
1301     let device_rect = match get_clipped_device_rect(
1302         unclipped,
1303         &pic_state.map_raster_to_world,
1304         segment_world_rect,
1305         device_pixel_scale,
1306     ) {
1307         Some(info) => info,
1308         None => {
1309             return ClipMaskKind::Clipped;
1310         }
1311     };
1312 
1313     let (device_rect, device_pixel_scale) = adjust_mask_scale_for_max_size(device_rect, device_pixel_scale);
1314 
1315     let clip_task_id = RenderTaskKind::new_mask(
1316         device_rect,
1317         clip_chain.clips_range,
1318         root_spatial_node_index,
1319         frame_state.clip_store,
1320         frame_state.gpu_cache,
1321         frame_state.resource_cache,
1322         frame_state.rg_builder,
1323         clip_data_store,
1324         device_pixel_scale,
1325         frame_context.fb_config,
1326         frame_state.surfaces,
1327     );
1328 
1329     frame_state.add_child_render_task(
1330         surface_index,
1331         clip_task_id,
1332     );
1333     ClipMaskKind::Mask(clip_task_id)
1334 }
1335 
1336 
write_brush_segment_description( prim_local_rect: LayoutRect, prim_local_clip_rect: LayoutRect, clip_chain: &ClipChainInstance, segment_builder: &mut SegmentBuilder, clip_store: &ClipStore, data_stores: &DataStores, ) -> bool1337 fn write_brush_segment_description(
1338     prim_local_rect: LayoutRect,
1339     prim_local_clip_rect: LayoutRect,
1340     clip_chain: &ClipChainInstance,
1341     segment_builder: &mut SegmentBuilder,
1342     clip_store: &ClipStore,
1343     data_stores: &DataStores,
1344 ) -> bool {
1345     // If the brush is small, we want to skip building segments
1346     // and just draw it as a single primitive with clip mask.
1347     if prim_local_rect.area() < MIN_BRUSH_SPLIT_AREA {
1348         return false;
1349     }
1350 
1351     segment_builder.initialize(
1352         prim_local_rect,
1353         None,
1354         prim_local_clip_rect
1355     );
1356 
1357     // Segment the primitive on all the local-space clip sources that we can.
1358     for i in 0 .. clip_chain.clips_range.count {
1359         let clip_instance = clip_store
1360             .get_instance_from_range(&clip_chain.clips_range, i);
1361         let clip_node = &data_stores.clip[clip_instance.handle];
1362 
1363         // If this clip item is positioned by another positioning node, its relative position
1364         // could change during scrolling. This means that we would need to resegment. Instead
1365         // of doing that, only segment with clips that have the same positioning node.
1366         // TODO(mrobinson, #2858): It may make sense to include these nodes, resegmenting only
1367         // when necessary while scrolling.
1368         if !clip_instance.flags.contains(ClipNodeFlags::SAME_SPATIAL_NODE) {
1369             continue;
1370         }
1371 
1372         let (local_clip_rect, radius, mode) = match clip_node.item.kind {
1373             ClipItemKind::RoundedRectangle { rect, radius, mode } => {
1374                 (rect, Some(radius), mode)
1375             }
1376             ClipItemKind::Rectangle { rect, mode } => {
1377                 (rect, None, mode)
1378             }
1379             ClipItemKind::BoxShadow { ref source } => {
1380                 // For inset box shadows, we can clip out any
1381                 // pixels that are inside the shadow region
1382                 // and are beyond the inner rect, as they can't
1383                 // be affected by the blur radius.
1384                 let inner_clip_mode = match source.clip_mode {
1385                     BoxShadowClipMode::Outset => None,
1386                     BoxShadowClipMode::Inset => Some(ClipMode::ClipOut),
1387                 };
1388 
1389                 // Push a region into the segment builder where the
1390                 // box-shadow can have an effect on the result. This
1391                 // ensures clip-mask tasks get allocated for these
1392                 // pixel regions, even if no other clips affect them.
1393                 segment_builder.push_mask_region(
1394                     source.prim_shadow_rect,
1395                     source.prim_shadow_rect.inflate(
1396                         -0.5 * source.original_alloc_size.width,
1397                         -0.5 * source.original_alloc_size.height,
1398                     ),
1399                     inner_clip_mode,
1400                 );
1401 
1402                 continue;
1403             }
1404             ClipItemKind::Image { .. } => {
1405                 // If we encounter an image mask, bail out from segment building.
1406                 // It's not possible to know which parts of the primitive are affected
1407                 // by the mask (without inspecting the pixels). We could do something
1408                 // better here in the future if it ever shows up as a performance issue
1409                 // (for instance, at least segment based on the bounding rect of the
1410                 // image mask if it's non-repeating).
1411                 return false;
1412             }
1413         };
1414 
1415         segment_builder.push_clip_rect(local_clip_rect, radius, mode);
1416     }
1417 
1418     true
1419 }
1420 
build_segments_if_needed( instance: &mut PrimitiveInstance, frame_state: &mut FrameBuildingState, prim_store: &mut PrimitiveStore, data_stores: &DataStores, segments_store: &mut SegmentStorage, segment_instances_store: &mut SegmentInstanceStorage, )1421 fn build_segments_if_needed(
1422     instance: &mut PrimitiveInstance,
1423     frame_state: &mut FrameBuildingState,
1424     prim_store: &mut PrimitiveStore,
1425     data_stores: &DataStores,
1426     segments_store: &mut SegmentStorage,
1427     segment_instances_store: &mut SegmentInstanceStorage,
1428 ) {
1429     let prim_clip_chain = &instance.vis.clip_chain;
1430 
1431     // Usually, the primitive rect can be found from information
1432     // in the instance and primitive template.
1433     let prim_local_rect = data_stores.get_local_prim_rect(
1434         instance,
1435         prim_store,
1436     );
1437 
1438     let segment_instance_index = match instance.kind {
1439         PrimitiveInstanceKind::Rectangle { ref mut segment_instance_index, .. } |
1440         PrimitiveInstanceKind::YuvImage { ref mut segment_instance_index, .. } => {
1441             segment_instance_index
1442         }
1443         PrimitiveInstanceKind::Image { data_handle, image_instance_index, .. } => {
1444             let image_data = &data_stores.image[data_handle].kind;
1445             let image_instance = &mut prim_store.images[image_instance_index];
1446             //Note: tiled images don't support automatic segmentation,
1447             // they strictly produce one segment per visible tile instead.
1448             if frame_state
1449                 .resource_cache
1450                 .get_image_properties(image_data.key)
1451                 .and_then(|properties| properties.tiling)
1452                 .is_some()
1453             {
1454                 image_instance.segment_instance_index = SegmentInstanceIndex::UNUSED;
1455                 return;
1456             }
1457             &mut image_instance.segment_instance_index
1458         }
1459         PrimitiveInstanceKind::Picture { ref mut segment_instance_index, pic_index, .. } => {
1460             let pic = &mut prim_store.pictures[pic_index.0];
1461 
1462             // If this picture supports segment rendering
1463             if pic.can_use_segments() {
1464                 // If the segments have been invalidated, ensure the current
1465                 // index of segments is invalid. This ensures that the segment
1466                 // building logic below will be run.
1467                 if !pic.segments_are_valid {
1468                     *segment_instance_index = SegmentInstanceIndex::INVALID;
1469                     pic.segments_are_valid = true;
1470                 }
1471 
1472                 segment_instance_index
1473             } else {
1474                 return;
1475             }
1476         }
1477         PrimitiveInstanceKind::TextRun { .. } |
1478         PrimitiveInstanceKind::NormalBorder { .. } |
1479         PrimitiveInstanceKind::ImageBorder { .. } |
1480         PrimitiveInstanceKind::Clear { .. } |
1481         PrimitiveInstanceKind::LinearGradient { .. } |
1482         PrimitiveInstanceKind::CachedLinearGradient { .. } |
1483         PrimitiveInstanceKind::RadialGradient { .. } |
1484         PrimitiveInstanceKind::ConicGradient { .. } |
1485         PrimitiveInstanceKind::LineDecoration { .. } |
1486         PrimitiveInstanceKind::Backdrop { .. } => {
1487             // These primitives don't support / need segments.
1488             return;
1489         }
1490     };
1491 
1492     if *segment_instance_index == SegmentInstanceIndex::INVALID {
1493         let mut segments: SmallVec<[BrushSegment; 8]> = SmallVec::new();
1494 
1495         if write_brush_segment_description(
1496             prim_local_rect,
1497             instance.clip_set.local_clip_rect,
1498             prim_clip_chain,
1499             &mut frame_state.segment_builder,
1500             frame_state.clip_store,
1501             data_stores,
1502         ) {
1503             frame_state.segment_builder.build(|segment| {
1504                 segments.push(
1505                     BrushSegment::new(
1506                         segment.rect.translate(-prim_local_rect.min.to_vector()),
1507                         segment.has_mask,
1508                         segment.edge_flags,
1509                         [0.0; 4],
1510                         BrushFlags::PERSPECTIVE_INTERPOLATION,
1511                     ),
1512                 );
1513             });
1514         }
1515 
1516         // If only a single segment is produced, there is no benefit to writing
1517         // a segment instance array. Instead, just use the main primitive rect
1518         // written into the GPU cache.
1519         // TODO(gw): This is (sortof) a bandaid - due to a limitation in the current
1520         //           brush encoding, we can only support a total of up to 2^16 segments.
1521         //           This should be (more than) enough for any real world case, so for
1522         //           now we can handle this by skipping cases where we were generating
1523         //           segments where there is no benefit. The long term / robust fix
1524         //           for this is to move the segment building to be done as a more
1525         //           limited nine-patch system during scene building, removing arbitrary
1526         //           segmentation during frame-building (see bug #1617491).
1527         if segments.len() <= 1 {
1528             *segment_instance_index = SegmentInstanceIndex::UNUSED;
1529         } else {
1530             let segments_range = segments_store.extend(segments);
1531 
1532             let instance = SegmentedInstance {
1533                 segments_range,
1534                 gpu_cache_handle: GpuCacheHandle::new(),
1535             };
1536 
1537             *segment_instance_index = segment_instances_store.push(instance);
1538         };
1539     }
1540 }
1541 
1542 /// Retrieve the exact unsnapped device space rectangle for a primitive.
get_unclipped_device_rect( prim_rect: PictureRect, map_to_raster: &SpaceMapper<PicturePixel, RasterPixel>, device_pixel_scale: DevicePixelScale, ) -> Option<DeviceRect>1543 fn get_unclipped_device_rect(
1544     prim_rect: PictureRect,
1545     map_to_raster: &SpaceMapper<PicturePixel, RasterPixel>,
1546     device_pixel_scale: DevicePixelScale,
1547 ) -> Option<DeviceRect> {
1548     let raster_rect = map_to_raster.map(&prim_rect)?;
1549     let world_rect = raster_rect * Scale::new(1.0);
1550     Some(world_rect * device_pixel_scale)
1551 }
1552 
1553 /// Given an unclipped device rect, try to find a minimal device space
1554 /// rect to allocate a clip mask for, by clipping to the screen. This
1555 /// function is very similar to picture::get_raster_rects. It is far from
1556 /// ideal, and should be refactored as part of the support for setting
1557 /// scale per-raster-root.
get_clipped_device_rect( unclipped: &DeviceRect, map_to_world: &SpaceMapper<RasterPixel, WorldPixel>, world_clip_rect: WorldRect, device_pixel_scale: DevicePixelScale, ) -> Option<DeviceRect>1558 fn get_clipped_device_rect(
1559     unclipped: &DeviceRect,
1560     map_to_world: &SpaceMapper<RasterPixel, WorldPixel>,
1561     world_clip_rect: WorldRect,
1562     device_pixel_scale: DevicePixelScale,
1563 ) -> Option<DeviceRect> {
1564     let unclipped_raster_rect = {
1565         let world_rect = (*unclipped) * Scale::new(1.0);
1566         let raster_rect = world_rect * device_pixel_scale.inverse();
1567 
1568         raster_rect.cast_unit()
1569     };
1570 
1571     let unclipped_world_rect = map_to_world.map(&unclipped_raster_rect)?;
1572 
1573     let clipped_world_rect = unclipped_world_rect.intersection(&world_clip_rect)?;
1574 
1575     let clipped_raster_rect = map_to_world.unmap(&clipped_world_rect)?;
1576 
1577     let clipped_raster_rect = clipped_raster_rect.intersection(&unclipped_raster_rect)?;
1578 
1579     // Ensure that we won't try to allocate a zero-sized clip render task.
1580     if clipped_raster_rect.is_empty() {
1581         return None;
1582     }
1583 
1584     let clipped = raster_rect_to_device_pixels(
1585         clipped_raster_rect,
1586         device_pixel_scale,
1587     );
1588 
1589     Some(clipped)
1590 }
1591 
1592 // Ensures that the size of mask render tasks are within MAX_MASK_SIZE.
adjust_mask_scale_for_max_size(device_rect: DeviceRect, device_pixel_scale: DevicePixelScale) -> (DeviceRect, DevicePixelScale)1593 fn adjust_mask_scale_for_max_size(device_rect: DeviceRect, device_pixel_scale: DevicePixelScale) -> (DeviceRect, DevicePixelScale) {
1594     if device_rect.width() > MAX_MASK_SIZE || device_rect.height() > MAX_MASK_SIZE {
1595         // round_out will grow by 1 integer pixel if origin is on a
1596         // fractional position, so keep that margin for error with -1:
1597         let scale = (MAX_MASK_SIZE - 1.0) /
1598             f32::max(device_rect.width(), device_rect.height());
1599         let new_device_pixel_scale = device_pixel_scale * Scale::new(scale);
1600         let new_device_rect = (device_rect.to_f32() * Scale::new(scale))
1601             .round_out();
1602         (new_device_rect, new_device_pixel_scale)
1603     } else {
1604         (device_rect, device_pixel_scale)
1605     }
1606 }
1607 
1608