1 /* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
4
5 use api::{AlphaType, ClipMode, ExternalImageType, ImageRendering, EdgeAaSegmentMask};
6 use api::{YuvColorSpace, YuvFormat, ColorDepth, ColorRange, PremultipliedColorF};
7 use api::units::*;
8 use crate::clip::{ClipDataStore, ClipNodeFlags, ClipNodeRange, ClipItemKind, ClipStore};
9 use crate::spatial_tree::{SpatialTree, ROOT_SPATIAL_NODE_INDEX, SpatialNodeIndex, CoordinateSystemId};
10 use crate::composite::{CompositeState};
11 use crate::glyph_rasterizer::GlyphFormat;
12 use crate::gpu_cache::{GpuBlockData, GpuCache, GpuCacheHandle, GpuCacheAddress};
13 use crate::gpu_types::{BrushFlags, BrushInstance, PrimitiveHeaders, ZBufferId, ZBufferIdGenerator};
14 use crate::gpu_types::{ClipMaskInstance, SplitCompositeInstance, BrushShaderKind};
15 use crate::gpu_types::{PrimitiveInstanceData, RasterizationSpace, GlyphInstance};
16 use crate::gpu_types::{PrimitiveHeader, PrimitiveHeaderIndex, TransformPaletteId, TransformPalette};
17 use crate::gpu_types::{ImageBrushData, get_shader_opacity};
18 use crate::internal_types::{FastHashMap, SavedTargetIndex, Swizzle, TextureSource, Filter};
19 use crate::picture::{Picture3DContext, PictureCompositeMode, PicturePrimitive};
20 use crate::prim_store::{DeferredResolve, PrimitiveInstanceKind, PrimitiveVisibilityIndex, PrimitiveVisibilityMask};
21 use crate::prim_store::{VisibleGradientTile, PrimitiveInstance, PrimitiveOpacity, SegmentInstanceIndex};
22 use crate::prim_store::{BrushSegment, ClipMaskKind, ClipTaskIndex, PrimitiveVisibility, PrimitiveVisibilityFlags};
23 use crate::prim_store::{VECS_PER_SEGMENT, SpaceMapper};
24 use crate::prim_store::image::ImageSource;
25 use crate::render_target::RenderTargetContext;
26 use crate::render_task_graph::{RenderTaskId, RenderTaskGraph};
27 use crate::render_task::RenderTaskAddress;
28 use crate::renderer::{BlendMode, ImageBufferKind, ShaderColorMode};
29 use crate::renderer::{BLOCKS_PER_UV_RECT, MAX_VERTEX_TEXTURE_WIDTH};
30 use crate::resource_cache::{CacheItem, GlyphFetchResult, ImageRequest, ResourceCache};
31 use smallvec::SmallVec;
32 use std::{f32, i32, usize};
33 use crate::util::{project_rect, TransformedRectKind};
34
35 // Special sentinel value recognized by the shader. It is considered to be
36 // a dummy task that doesn't mask out anything.
37 const OPAQUE_TASK_ADDRESS: RenderTaskAddress = RenderTaskAddress(0x7fff);
38
39 /// Used to signal there are no segments provided with this primitive.
40 const INVALID_SEGMENT_INDEX: i32 = 0xffff;
41
42 /// Size in device pixels for tiles that clip masks are drawn in.
43 const CLIP_RECTANGLE_TILE_SIZE: i32 = 128;
44
45 /// The minimum size of a clip mask before trying to draw in tiles.
46 const CLIP_RECTANGLE_AREA_THRESHOLD: i32 = CLIP_RECTANGLE_TILE_SIZE * CLIP_RECTANGLE_TILE_SIZE * 4;
47
48 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
49 #[cfg_attr(feature = "capture", derive(Serialize))]
50 #[cfg_attr(feature = "replay", derive(Deserialize))]
51 pub enum BrushBatchKind {
52 Solid,
53 Image(ImageBufferKind),
54 Blend,
55 MixBlend {
56 task_id: RenderTaskId,
57 source_id: RenderTaskId,
58 backdrop_id: RenderTaskId,
59 },
60 YuvImage(ImageBufferKind, YuvFormat, ColorDepth, YuvColorSpace, ColorRange),
61 ConicGradient,
62 RadialGradient,
63 LinearGradient,
64 Opacity,
65 }
66
67 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
68 #[cfg_attr(feature = "capture", derive(Serialize))]
69 #[cfg_attr(feature = "replay", derive(Deserialize))]
70 pub enum BatchKind {
71 SplitComposite,
72 TextRun(GlyphFormat),
73 Brush(BrushBatchKind),
74 }
75
76 impl BatchKind {
shader_kind(&self) -> BrushShaderKind77 fn shader_kind(&self) -> BrushShaderKind {
78 match self {
79 BatchKind::Brush(BrushBatchKind::Solid) => BrushShaderKind::Solid,
80 BatchKind::Brush(BrushBatchKind::Image(..)) => BrushShaderKind::Image,
81 BatchKind::Brush(BrushBatchKind::LinearGradient) => BrushShaderKind::LinearGradient,
82 BatchKind::Brush(BrushBatchKind::RadialGradient) => BrushShaderKind::RadialGradient,
83 BatchKind::Brush(BrushBatchKind::ConicGradient) => BrushShaderKind::ConicGradient,
84 BatchKind::Brush(BrushBatchKind::Blend) => BrushShaderKind::Blend,
85 BatchKind::Brush(BrushBatchKind::MixBlend { .. }) => BrushShaderKind::MixBlend,
86 BatchKind::Brush(BrushBatchKind::YuvImage(..)) => BrushShaderKind::Yuv,
87 BatchKind::Brush(BrushBatchKind::Opacity) => BrushShaderKind::Opacity,
88 BatchKind::TextRun(..) => BrushShaderKind::Text,
89 _ => BrushShaderKind::None,
90 }
91 }
92 }
93
94 /// Optional textures that can be used as a source in the shaders.
95 /// Textures that are not used by the batch are equal to TextureId::invalid().
96 #[derive(Copy, Clone, Debug)]
97 #[cfg_attr(feature = "capture", derive(Serialize))]
98 #[cfg_attr(feature = "replay", derive(Deserialize))]
99 pub struct BatchTextures {
100 pub colors: [TextureSource; 3],
101 }
102
103 impl BatchTextures {
no_texture() -> Self104 pub fn no_texture() -> Self {
105 BatchTextures {
106 colors: [TextureSource::Invalid; 3],
107 }
108 }
109
render_target_cache() -> Self110 pub fn render_target_cache() -> Self {
111 BatchTextures {
112 colors: [
113 TextureSource::PrevPassColor,
114 TextureSource::PrevPassAlpha,
115 TextureSource::Invalid,
116 ],
117 }
118 }
119
color(texture: TextureSource) -> Self120 pub fn color(texture: TextureSource) -> Self {
121 BatchTextures {
122 colors: [texture, texture, TextureSource::Invalid],
123 }
124 }
125
is_compatible_with(&self, other: &BatchTextures) -> bool126 pub fn is_compatible_with(&self, other: &BatchTextures) -> bool {
127 self.colors.iter().zip(other.colors.iter()).all(|(t1, t2)| textures_compatible(*t1, *t2))
128 }
129
combine_textures(&self, other: BatchTextures) -> Option<BatchTextures>130 pub fn combine_textures(&self, other: BatchTextures) -> Option<BatchTextures> {
131 if !self.is_compatible_with(&other) {
132 return None;
133 }
134
135 let mut new_textures = BatchTextures::no_texture();
136 for (i, (color, other_color)) in self.colors.iter().zip(other.colors.iter()).enumerate() {
137 // If these textures are compatible, for each source either both sources are invalid or only one is not invalid.
138 new_textures.colors[i] = if *color == TextureSource::Invalid {
139 *other_color
140 } else {
141 *color
142 };
143 }
144 Some(new_textures)
145 }
146 }
147
148 #[derive(Copy, Clone, Debug)]
149 #[cfg_attr(feature = "capture", derive(Serialize))]
150 #[cfg_attr(feature = "replay", derive(Deserialize))]
151 pub struct BatchKey {
152 pub kind: BatchKind,
153 pub blend_mode: BlendMode,
154 pub textures: BatchTextures,
155 }
156
157 impl BatchKey {
new(kind: BatchKind, blend_mode: BlendMode, textures: BatchTextures) -> Self158 pub fn new(kind: BatchKind, blend_mode: BlendMode, textures: BatchTextures) -> Self {
159 BatchKey {
160 kind,
161 blend_mode,
162 textures,
163 }
164 }
165
is_compatible_with(&self, other: &BatchKey) -> bool166 pub fn is_compatible_with(&self, other: &BatchKey) -> bool {
167 self.kind == other.kind && self.blend_mode == other.blend_mode && self.textures.is_compatible_with(&other.textures)
168 }
169 }
170
171 #[inline]
textures_compatible(t1: TextureSource, t2: TextureSource) -> bool172 fn textures_compatible(t1: TextureSource, t2: TextureSource) -> bool {
173 t1 == TextureSource::Invalid || t2 == TextureSource::Invalid || t1 == t2
174 }
175
176 pub struct AlphaBatchList {
177 pub batches: Vec<PrimitiveBatch>,
178 pub item_rects: Vec<Vec<PictureRect>>,
179 current_batch_index: usize,
180 current_z_id: ZBufferId,
181 break_advanced_blend_batches: bool,
182 lookback_count: usize,
183 }
184
185 impl AlphaBatchList {
new(break_advanced_blend_batches: bool, lookback_count: usize) -> Self186 fn new(break_advanced_blend_batches: bool, lookback_count: usize) -> Self {
187 AlphaBatchList {
188 batches: Vec::new(),
189 item_rects: Vec::new(),
190 current_z_id: ZBufferId::invalid(),
191 current_batch_index: usize::MAX,
192 break_advanced_blend_batches,
193 lookback_count,
194 }
195 }
196
197 /// Clear all current batches in this list. This is typically used
198 /// when a primitive is encountered that occludes all previous
199 /// content in this batch list.
clear(&mut self)200 fn clear(&mut self) {
201 self.current_batch_index = usize::MAX;
202 self.current_z_id = ZBufferId::invalid();
203 self.batches.clear();
204 self.item_rects.clear();
205 }
206
set_params_and_get_batch( &mut self, key: BatchKey, features: BatchFeatures, z_bounding_rect: &PictureRect, z_id: ZBufferId, ) -> &mut Vec<PrimitiveInstanceData>207 pub fn set_params_and_get_batch(
208 &mut self,
209 key: BatchKey,
210 features: BatchFeatures,
211 // The bounding box of everything at this Z plane. We expect potentially
212 // multiple primitive segments coming with the same `z_id`.
213 z_bounding_rect: &PictureRect,
214 z_id: ZBufferId,
215 ) -> &mut Vec<PrimitiveInstanceData> {
216 if z_id != self.current_z_id ||
217 self.current_batch_index == usize::MAX ||
218 !self.batches[self.current_batch_index].key.is_compatible_with(&key)
219 {
220 let mut selected_batch_index = None;
221
222 match key.blend_mode {
223 BlendMode::SubpixelWithBgColor => {
224 'outer_multipass: for (batch_index, batch) in self.batches.iter().enumerate().rev().take(self.lookback_count) {
225 // Some subpixel batches are drawn in two passes. Because of this, we need
226 // to check for overlaps with every batch (which is a bit different
227 // than the normal batching below).
228 for item_rect in &self.item_rects[batch_index] {
229 if item_rect.intersects(z_bounding_rect) {
230 break 'outer_multipass;
231 }
232 }
233
234 if batch.key.is_compatible_with(&key) {
235 selected_batch_index = Some(batch_index);
236 break;
237 }
238 }
239 }
240 BlendMode::Advanced(_) if self.break_advanced_blend_batches => {
241 // don't try to find a batch
242 }
243 _ => {
244 'outer_default: for (batch_index, batch) in self.batches.iter().enumerate().rev().take(self.lookback_count) {
245 // For normal batches, we only need to check for overlaps for batches
246 // other than the first batch we consider. If the first batch
247 // is compatible, then we know there isn't any potential overlap
248 // issues to worry about.
249 if batch.key.is_compatible_with(&key) {
250 selected_batch_index = Some(batch_index);
251 break;
252 }
253
254 // check for intersections
255 for item_rect in &self.item_rects[batch_index] {
256 if item_rect.intersects(z_bounding_rect) {
257 break 'outer_default;
258 }
259 }
260 }
261 }
262 }
263
264 if selected_batch_index.is_none() {
265 let new_batch = PrimitiveBatch::new(key);
266 selected_batch_index = Some(self.batches.len());
267 self.batches.push(new_batch);
268 self.item_rects.push(Vec::new());
269 }
270
271 self.current_batch_index = selected_batch_index.unwrap();
272 self.item_rects[self.current_batch_index].push(*z_bounding_rect);
273 self.current_z_id = z_id;
274 } else if cfg!(debug_assertions) {
275 // If it's a different segment of the same (larger) primitive, we expect the bounding box
276 // to be the same - coming from the primitive itself, not the segment.
277 assert_eq!(self.item_rects[self.current_batch_index].last(), Some(z_bounding_rect));
278 }
279
280 let batch = &mut self.batches[self.current_batch_index];
281 batch.features |= features;
282
283 &mut batch.instances
284 }
285 }
286
287 pub struct OpaqueBatchList {
288 pub pixel_area_threshold_for_new_batch: f32,
289 pub batches: Vec<PrimitiveBatch>,
290 pub current_batch_index: usize,
291 lookback_count: usize,
292 }
293
294 impl OpaqueBatchList {
new(pixel_area_threshold_for_new_batch: f32, lookback_count: usize) -> Self295 fn new(pixel_area_threshold_for_new_batch: f32, lookback_count: usize) -> Self {
296 OpaqueBatchList {
297 batches: Vec::new(),
298 pixel_area_threshold_for_new_batch,
299 current_batch_index: usize::MAX,
300 lookback_count,
301 }
302 }
303
304 /// Clear all current batches in this list. This is typically used
305 /// when a primitive is encountered that occludes all previous
306 /// content in this batch list.
clear(&mut self)307 fn clear(&mut self) {
308 self.current_batch_index = usize::MAX;
309 self.batches.clear();
310 }
311
set_params_and_get_batch( &mut self, key: BatchKey, features: BatchFeatures, z_bounding_rect: &PictureRect, ) -> &mut Vec<PrimitiveInstanceData>312 pub fn set_params_and_get_batch(
313 &mut self,
314 key: BatchKey,
315 features: BatchFeatures,
316 // The bounding box of everything at the current Z, whatever it is. We expect potentially
317 // multiple primitive segments produced by a primitive, which we allow to check
318 // `current_batch_index` instead of iterating the batches.
319 z_bounding_rect: &PictureRect,
320 ) -> &mut Vec<PrimitiveInstanceData> {
321 if self.current_batch_index == usize::MAX ||
322 !self.batches[self.current_batch_index].key.is_compatible_with(&key) {
323 let mut selected_batch_index = None;
324 let item_area = z_bounding_rect.size.area();
325
326 // If the area of this primitive is larger than the given threshold,
327 // then it is large enough to warrant breaking a batch for. In this
328 // case we just see if it can be added to the existing batch or
329 // create a new one.
330 if item_area > self.pixel_area_threshold_for_new_batch {
331 if let Some(batch) = self.batches.last() {
332 if batch.key.is_compatible_with(&key) {
333 selected_batch_index = Some(self.batches.len() - 1);
334 }
335 }
336 } else {
337 // Otherwise, look back through a reasonable number of batches.
338 for (batch_index, batch) in self.batches.iter().enumerate().rev().take(self.lookback_count) {
339 if batch.key.is_compatible_with(&key) {
340 selected_batch_index = Some(batch_index);
341 break;
342 }
343 }
344 }
345
346 if selected_batch_index.is_none() {
347 let new_batch = PrimitiveBatch::new(key);
348 selected_batch_index = Some(self.batches.len());
349 self.batches.push(new_batch);
350 }
351
352 self.current_batch_index = selected_batch_index.unwrap();
353 }
354
355 let batch = &mut self.batches[self.current_batch_index];
356 batch.features |= features;
357
358 &mut batch.instances
359 }
360
finalize(&mut self)361 fn finalize(&mut self) {
362 // Reverse the instance arrays in the opaque batches
363 // to get maximum z-buffer efficiency by drawing
364 // front-to-back.
365 // TODO(gw): Maybe we can change the batch code to
366 // build these in reverse and avoid having
367 // to reverse the instance array here.
368 for batch in &mut self.batches {
369 batch.instances.reverse();
370 }
371 }
372 }
373
374 #[cfg_attr(feature = "capture", derive(Serialize))]
375 #[cfg_attr(feature = "replay", derive(Deserialize))]
376 pub struct PrimitiveBatch {
377 pub key: BatchKey,
378 pub instances: Vec<PrimitiveInstanceData>,
379 pub features: BatchFeatures,
380 }
381
382 bitflags! {
383 /// Features of the batch that, if not requested, may allow a fast-path.
384 ///
385 /// Rather than breaking batches when primitives request different features,
386 /// we always request the minimum amount of features to satisfy all items in
387 /// the batch.
388 /// The goal is to let the renderer be optionally select more specialized
389 /// versions of a shader if the batch doesn't require code certain code paths.
390 /// Not all shaders necessarily implement all of these features.
391 #[cfg_attr(feature = "capture", derive(Serialize))]
392 #[cfg_attr(feature = "replay", derive(Deserialize))]
393 pub struct BatchFeatures: u8 {
394 const ALPHA_PASS = 1 << 0;
395 const ANTIALIASING = 1 << 1;
396 const REPETITION = 1 << 2;
397 }
398 }
399
400 impl PrimitiveBatch {
new(key: BatchKey) -> PrimitiveBatch401 fn new(key: BatchKey) -> PrimitiveBatch {
402 PrimitiveBatch {
403 key,
404 instances: Vec::new(),
405 features: BatchFeatures::empty(),
406 }
407 }
408
merge(&mut self, other: PrimitiveBatch)409 fn merge(&mut self, other: PrimitiveBatch) {
410 self.instances.extend(other.instances);
411 self.features |= other.features;
412 }
413 }
414
415 #[cfg_attr(feature = "capture", derive(Serialize))]
416 #[cfg_attr(feature = "replay", derive(Deserialize))]
417 pub struct AlphaBatchContainer {
418 pub opaque_batches: Vec<PrimitiveBatch>,
419 pub alpha_batches: Vec<PrimitiveBatch>,
420 /// The overall scissor rect for this render task, if one
421 /// is required.
422 pub task_scissor_rect: Option<DeviceIntRect>,
423 /// The rectangle of the owning render target that this
424 /// set of batches affects.
425 pub task_rect: DeviceIntRect,
426 }
427
428 impl AlphaBatchContainer {
new( task_scissor_rect: Option<DeviceIntRect>, ) -> AlphaBatchContainer429 pub fn new(
430 task_scissor_rect: Option<DeviceIntRect>,
431 ) -> AlphaBatchContainer {
432 AlphaBatchContainer {
433 opaque_batches: Vec::new(),
434 alpha_batches: Vec::new(),
435 task_scissor_rect,
436 task_rect: DeviceIntRect::zero(),
437 }
438 }
439
is_empty(&self) -> bool440 pub fn is_empty(&self) -> bool {
441 self.opaque_batches.is_empty() &&
442 self.alpha_batches.is_empty()
443 }
444
merge(&mut self, builder: AlphaBatchBuilder, task_rect: &DeviceIntRect)445 fn merge(&mut self, builder: AlphaBatchBuilder, task_rect: &DeviceIntRect) {
446 self.task_rect = self.task_rect.union(task_rect);
447
448 for other_batch in builder.opaque_batch_list.batches {
449 let batch_index = self.opaque_batches.iter().position(|batch| {
450 batch.key.is_compatible_with(&other_batch.key)
451 });
452
453 match batch_index {
454 Some(batch_index) => {
455 self.opaque_batches[batch_index].merge(other_batch);
456 }
457 None => {
458 self.opaque_batches.push(other_batch);
459 }
460 }
461 }
462
463 let mut min_batch_index = 0;
464
465 for other_batch in builder.alpha_batch_list.batches {
466 let batch_index = self.alpha_batches.iter().skip(min_batch_index).position(|batch| {
467 batch.key.is_compatible_with(&other_batch.key)
468 });
469
470 match batch_index {
471 Some(batch_index) => {
472 let index = batch_index + min_batch_index;
473 self.alpha_batches[index].merge(other_batch);
474 min_batch_index = index;
475 }
476 None => {
477 self.alpha_batches.push(other_batch);
478 min_batch_index = self.alpha_batches.len();
479 }
480 }
481 }
482 }
483 }
484
485 /// Each segment can optionally specify a per-segment
486 /// texture set and one user data field.
487 #[derive(Debug, Copy, Clone)]
488 struct SegmentInstanceData {
489 textures: BatchTextures,
490 specific_resource_address: i32,
491 }
492
493 /// Encapsulates the logic of building batches for items that are blended.
494 pub struct AlphaBatchBuilder {
495 pub alpha_batch_list: AlphaBatchList,
496 pub opaque_batch_list: OpaqueBatchList,
497 pub render_task_id: RenderTaskId,
498 render_task_address: RenderTaskAddress,
499 pub vis_mask: PrimitiveVisibilityMask,
500 }
501
502 impl AlphaBatchBuilder {
new( screen_size: DeviceIntSize, break_advanced_blend_batches: bool, lookback_count: usize, render_task_id: RenderTaskId, render_task_address: RenderTaskAddress, vis_mask: PrimitiveVisibilityMask, ) -> Self503 pub fn new(
504 screen_size: DeviceIntSize,
505 break_advanced_blend_batches: bool,
506 lookback_count: usize,
507 render_task_id: RenderTaskId,
508 render_task_address: RenderTaskAddress,
509 vis_mask: PrimitiveVisibilityMask,
510 ) -> Self {
511 // The threshold for creating a new batch is
512 // one quarter the screen size.
513 let batch_area_threshold = (screen_size.width * screen_size.height) as f32 / 4.0;
514
515 AlphaBatchBuilder {
516 alpha_batch_list: AlphaBatchList::new(break_advanced_blend_batches, lookback_count),
517 opaque_batch_list: OpaqueBatchList::new(batch_area_threshold, lookback_count),
518 render_task_id,
519 render_task_address,
520 vis_mask,
521 }
522 }
523
524 /// Clear all current batches in this builder. This is typically used
525 /// when a primitive is encountered that occludes all previous
526 /// content in this batch list.
clear(&mut self)527 fn clear(&mut self) {
528 self.alpha_batch_list.clear();
529 self.opaque_batch_list.clear();
530 }
531
build( mut self, batch_containers: &mut Vec<AlphaBatchContainer>, merged_batches: &mut AlphaBatchContainer, task_rect: DeviceIntRect, task_scissor_rect: Option<DeviceIntRect>, )532 pub fn build(
533 mut self,
534 batch_containers: &mut Vec<AlphaBatchContainer>,
535 merged_batches: &mut AlphaBatchContainer,
536 task_rect: DeviceIntRect,
537 task_scissor_rect: Option<DeviceIntRect>,
538 ) {
539 self.opaque_batch_list.finalize();
540
541 if task_scissor_rect.is_none() {
542 merged_batches.merge(self, &task_rect);
543 } else {
544 batch_containers.push(AlphaBatchContainer {
545 alpha_batches: self.alpha_batch_list.batches,
546 opaque_batches: self.opaque_batch_list.batches,
547 task_scissor_rect,
548 task_rect,
549 });
550 }
551 }
552
push_single_instance( &mut self, key: BatchKey, features: BatchFeatures, bounding_rect: &PictureRect, z_id: ZBufferId, instance: PrimitiveInstanceData, )553 pub fn push_single_instance(
554 &mut self,
555 key: BatchKey,
556 features: BatchFeatures,
557 bounding_rect: &PictureRect,
558 z_id: ZBufferId,
559 instance: PrimitiveInstanceData,
560 ) {
561 self.set_params_and_get_batch(key, features, bounding_rect, z_id)
562 .push(instance);
563 }
564
set_params_and_get_batch( &mut self, key: BatchKey, features: BatchFeatures, bounding_rect: &PictureRect, z_id: ZBufferId, ) -> &mut Vec<PrimitiveInstanceData>565 pub fn set_params_and_get_batch(
566 &mut self,
567 key: BatchKey,
568 features: BatchFeatures,
569 bounding_rect: &PictureRect,
570 z_id: ZBufferId,
571 ) -> &mut Vec<PrimitiveInstanceData> {
572 match key.blend_mode {
573 BlendMode::None => {
574 self.opaque_batch_list
575 .set_params_and_get_batch(key, features, bounding_rect)
576 }
577 BlendMode::Alpha |
578 BlendMode::PremultipliedAlpha |
579 BlendMode::PremultipliedDestOut |
580 BlendMode::SubpixelConstantTextColor(..) |
581 BlendMode::SubpixelWithBgColor |
582 BlendMode::SubpixelDualSource |
583 BlendMode::Advanced(_) => {
584 self.alpha_batch_list
585 .set_params_and_get_batch(key, features, bounding_rect, z_id)
586 }
587 }
588 }
589 }
590
591 /// Supports (recursively) adding a list of primitives and pictures to an alpha batch
592 /// builder. In future, it will support multiple dirty regions / slices, allowing the
593 /// contents of a picture to be spliced into multiple batch builders.
594 pub struct BatchBuilder {
595 /// A temporary buffer that is used during glyph fetching, stored here
596 /// to reduce memory allocations.
597 glyph_fetch_buffer: Vec<GlyphFetchResult>,
598
599 pub batchers: Vec<AlphaBatchBuilder>,
600 }
601
602 impl BatchBuilder {
new(batchers: Vec<AlphaBatchBuilder>) -> Self603 pub fn new(batchers: Vec<AlphaBatchBuilder>) -> Self {
604 BatchBuilder {
605 glyph_fetch_buffer: Vec::new(),
606 batchers,
607 }
608 }
609
finalize(self) -> Vec<AlphaBatchBuilder>610 pub fn finalize(self) -> Vec<AlphaBatchBuilder> {
611 self.batchers
612 }
613
add_brush_instance_to_batches( &mut self, batch_key: BatchKey, features: BatchFeatures, bounding_rect: &PictureRect, z_id: ZBufferId, segment_index: i32, edge_flags: EdgeAaSegmentMask, clip_task_address: RenderTaskAddress, brush_flags: BrushFlags, prim_header_index: PrimitiveHeaderIndex, resource_address: i32, prim_vis_mask: PrimitiveVisibilityMask, )614 fn add_brush_instance_to_batches(
615 &mut self,
616 batch_key: BatchKey,
617 features: BatchFeatures,
618 bounding_rect: &PictureRect,
619 z_id: ZBufferId,
620 segment_index: i32,
621 edge_flags: EdgeAaSegmentMask,
622 clip_task_address: RenderTaskAddress,
623 brush_flags: BrushFlags,
624 prim_header_index: PrimitiveHeaderIndex,
625 resource_address: i32,
626 prim_vis_mask: PrimitiveVisibilityMask,
627 ) {
628 for batcher in &mut self.batchers {
629 if batcher.vis_mask.intersects(prim_vis_mask) {
630 let render_task_address = batcher.render_task_address;
631
632 let instance = BrushInstance {
633 segment_index,
634 edge_flags,
635 clip_task_address,
636 render_task_address,
637 brush_flags,
638 prim_header_index,
639 resource_address,
640 brush_kind: batch_key.kind.shader_kind(),
641 };
642
643 batcher.push_single_instance(
644 batch_key,
645 features,
646 bounding_rect,
647 z_id,
648 PrimitiveInstanceData::from(instance),
649 );
650 }
651 }
652 }
653
add_split_composite_instance_to_batches( &mut self, batch_key: BatchKey, bounding_rect: &PictureRect, z_id: ZBufferId, prim_header_index: PrimitiveHeaderIndex, polygons_address: GpuCacheAddress, prim_vis_mask: PrimitiveVisibilityMask, )654 fn add_split_composite_instance_to_batches(
655 &mut self,
656 batch_key: BatchKey,
657 bounding_rect: &PictureRect,
658 z_id: ZBufferId,
659 prim_header_index: PrimitiveHeaderIndex,
660 polygons_address: GpuCacheAddress,
661 prim_vis_mask: PrimitiveVisibilityMask,
662 ) {
663 for batcher in &mut self.batchers {
664 if batcher.vis_mask.intersects(prim_vis_mask) {
665 let render_task_address = batcher.render_task_address;
666
667 batcher.push_single_instance(
668 batch_key,
669 BatchFeatures::empty(),
670 bounding_rect,
671 z_id,
672 PrimitiveInstanceData::from(SplitCompositeInstance {
673 prim_header_index,
674 render_task_address,
675 polygons_address,
676 z: z_id,
677 }),
678 );
679 }
680 }
681 }
682
683 /// Clear all current batchers. This is typically used when a primitive
684 /// is encountered that occludes all previous content in this batch list.
clear_batches(&mut self)685 fn clear_batches(&mut self) {
686 for batcher in &mut self.batchers {
687 batcher.clear();
688 }
689 }
690
691 /// Add a picture to a given batch builder.
add_pic_to_batch( &mut self, pic: &PicturePrimitive, ctx: &RenderTargetContext, gpu_cache: &mut GpuCache, render_tasks: &RenderTaskGraph, deferred_resolves: &mut Vec<DeferredResolve>, prim_headers: &mut PrimitiveHeaders, transforms: &mut TransformPalette, root_spatial_node_index: SpatialNodeIndex, surface_spatial_node_index: SpatialNodeIndex, z_generator: &mut ZBufferIdGenerator, composite_state: &mut CompositeState, )692 pub fn add_pic_to_batch(
693 &mut self,
694 pic: &PicturePrimitive,
695 ctx: &RenderTargetContext,
696 gpu_cache: &mut GpuCache,
697 render_tasks: &RenderTaskGraph,
698 deferred_resolves: &mut Vec<DeferredResolve>,
699 prim_headers: &mut PrimitiveHeaders,
700 transforms: &mut TransformPalette,
701 root_spatial_node_index: SpatialNodeIndex,
702 surface_spatial_node_index: SpatialNodeIndex,
703 z_generator: &mut ZBufferIdGenerator,
704 composite_state: &mut CompositeState,
705 ) {
706 for cluster in &pic.prim_list.clusters {
707 profile_scope!("cluster");
708 // Add each run in this picture to the batch.
709 for prim_instance in &cluster.prim_instances {
710 self.add_prim_to_batch(
711 prim_instance,
712 cluster.spatial_node_index,
713 ctx,
714 gpu_cache,
715 render_tasks,
716 deferred_resolves,
717 prim_headers,
718 transforms,
719 root_spatial_node_index,
720 surface_spatial_node_index,
721 z_generator,
722 composite_state,
723 );
724 }
725 }
726 }
727
728 // If an image is being drawn as a compositor surface, we don't want
729 // to draw the surface itself into the tile. Instead, we draw a transparent
730 // rectangle that writes to the z-buffer where this compositor surface is.
731 // That ensures we 'cut out' the part of the tile that has the compositor
732 // surface on it, allowing us to draw this tile as an overlay on top of
733 // the compositor surface.
734 // TODO(gw): There's a slight performance cost to doing this cutout rectangle
735 // if we end up not needing to use overlay mode. Consider skipping
736 // the cutout completely in this path.
emit_placeholder( &mut self, prim_rect: LayoutRect, prim_info: &PrimitiveVisibility, z_id: ZBufferId, transform_id: TransformPaletteId, batch_features: BatchFeatures, ctx: &RenderTargetContext, gpu_cache: &mut GpuCache, render_tasks: &RenderTaskGraph, prim_headers: &mut PrimitiveHeaders, )737 fn emit_placeholder(
738 &mut self,
739 prim_rect: LayoutRect,
740 prim_info: &PrimitiveVisibility,
741 z_id: ZBufferId,
742 transform_id: TransformPaletteId,
743 batch_features: BatchFeatures,
744 ctx: &RenderTargetContext,
745 gpu_cache: &mut GpuCache,
746 render_tasks: &RenderTaskGraph,
747 prim_headers: &mut PrimitiveHeaders,
748 ) {
749 let batch_params = BrushBatchParameters::shared(
750 BrushBatchKind::Solid,
751 BatchTextures::no_texture(),
752 [get_shader_opacity(0.0), 0, 0, 0],
753 0,
754 );
755
756 let prim_cache_address = gpu_cache.get_address(
757 &ctx.globals.default_transparent_rect_handle,
758 );
759
760 let prim_header = PrimitiveHeader {
761 local_rect: prim_rect,
762 local_clip_rect: prim_info.combined_local_clip_rect,
763 specific_prim_address: prim_cache_address,
764 transform_id,
765 };
766
767 let prim_header_index = prim_headers.push(
768 &prim_header,
769 z_id,
770 batch_params.prim_user_data,
771 );
772
773 let bounding_rect = &prim_info.clip_chain.pic_clip_rect;
774 let transform_kind = transform_id.transform_kind();
775 let prim_vis_mask = prim_info.visibility_mask;
776
777 self.add_segmented_prim_to_batch(
778 None,
779 PrimitiveOpacity::translucent(),
780 &batch_params,
781 BlendMode::None,
782 BlendMode::None,
783 batch_features,
784 prim_header_index,
785 bounding_rect,
786 transform_kind,
787 render_tasks,
788 z_id,
789 prim_info.clip_task_index,
790 prim_vis_mask,
791 ctx,
792 );
793 }
794
795 // Adds a primitive to a batch.
796 // It can recursively call itself in some situations, for
797 // example if it encounters a picture where the items
798 // in that picture are being drawn into the same target.
add_prim_to_batch( &mut self, prim_instance: &PrimitiveInstance, prim_spatial_node_index: SpatialNodeIndex, ctx: &RenderTargetContext, gpu_cache: &mut GpuCache, render_tasks: &RenderTaskGraph, deferred_resolves: &mut Vec<DeferredResolve>, prim_headers: &mut PrimitiveHeaders, transforms: &mut TransformPalette, root_spatial_node_index: SpatialNodeIndex, surface_spatial_node_index: SpatialNodeIndex, z_generator: &mut ZBufferIdGenerator, composite_state: &mut CompositeState, )799 fn add_prim_to_batch(
800 &mut self,
801 prim_instance: &PrimitiveInstance,
802 prim_spatial_node_index: SpatialNodeIndex,
803 ctx: &RenderTargetContext,
804 gpu_cache: &mut GpuCache,
805 render_tasks: &RenderTaskGraph,
806 deferred_resolves: &mut Vec<DeferredResolve>,
807 prim_headers: &mut PrimitiveHeaders,
808 transforms: &mut TransformPalette,
809 root_spatial_node_index: SpatialNodeIndex,
810 surface_spatial_node_index: SpatialNodeIndex,
811 z_generator: &mut ZBufferIdGenerator,
812 composite_state: &mut CompositeState,
813 ) {
814 if prim_instance.visibility_info == PrimitiveVisibilityIndex::INVALID {
815 return;
816 }
817
818 #[cfg(debug_assertions)] //TODO: why is this needed?
819 debug_assert_eq!(prim_instance.prepared_frame_id, render_tasks.frame_id());
820
821 let is_chased = prim_instance.is_chased();
822
823 let transform_id = transforms
824 .get_id(
825 prim_spatial_node_index,
826 root_spatial_node_index,
827 ctx.spatial_tree,
828 );
829
830 // TODO(gw): Calculating this for every primitive is a bit
831 // wasteful. We should probably cache this in
832 // the scroll node...
833 let transform_kind = transform_id.transform_kind();
834 let prim_info = &ctx.scratch.prim_info[prim_instance.visibility_info.0 as usize];
835 let bounding_rect = &prim_info.clip_chain.pic_clip_rect;
836
837 // If this primitive is a backdrop, that means that it is known to cover
838 // the entire picture cache background. In that case, the renderer will
839 // use the backdrop color as a clear color, and so we can drop this
840 // primitive and any prior primitives from the batch lists for this
841 // picture cache slice.
842 if prim_info.flags.contains(PrimitiveVisibilityFlags::IS_BACKDROP) {
843 self.clear_batches();
844 return;
845 }
846
847 let z_id = z_generator.next();
848
849 let prim_rect = ctx.data_stores.get_local_prim_rect(
850 prim_instance,
851 ctx.prim_store,
852 );
853
854 let mut batch_features = BatchFeatures::empty();
855 if ctx.data_stores.prim_may_need_repetition(prim_instance) {
856 batch_features |= BatchFeatures::REPETITION;
857 }
858
859 if transform_kind != TransformedRectKind::AxisAligned {
860 batch_features |= BatchFeatures::ANTIALIASING;
861 }
862
863 let prim_vis_mask = prim_info.visibility_mask;
864 let clip_task_address = ctx.get_prim_clip_task_address(
865 prim_info.clip_task_index,
866 render_tasks,
867 );
868
869 if is_chased {
870 println!("\tbatch {:?} with bound {:?} and clip task {:?}", prim_rect, bounding_rect, clip_task_address);
871 }
872
873 if !bounding_rect.is_empty() {
874 debug_assert_eq!(prim_info.clip_chain.pic_spatial_node_index, surface_spatial_node_index,
875 "The primitive's bounding box is specified in a different coordinate system from the current batch!");
876 }
877
878 match prim_instance.kind {
879 PrimitiveInstanceKind::Clear { data_handle } => {
880 let prim_data = &ctx.data_stores.prim[data_handle];
881 let prim_cache_address = gpu_cache.get_address(&prim_data.gpu_cache_handle);
882
883 // TODO(gw): We can abstract some of the common code below into
884 // helper methods, as we port more primitives to make
885 // use of interning.
886
887 let prim_header = PrimitiveHeader {
888 local_rect: prim_rect,
889 local_clip_rect: prim_info.combined_local_clip_rect,
890 specific_prim_address: prim_cache_address,
891 transform_id,
892 };
893
894 let prim_header_index = prim_headers.push(
895 &prim_header,
896 z_id,
897 [get_shader_opacity(1.0), 0, 0, 0],
898 );
899
900 let batch_key = BatchKey {
901 blend_mode: BlendMode::PremultipliedDestOut,
902 kind: BatchKind::Brush(BrushBatchKind::Solid),
903 textures: BatchTextures::no_texture(),
904 };
905
906 self.add_brush_instance_to_batches(
907 batch_key,
908 batch_features,
909 bounding_rect,
910 z_id,
911 INVALID_SEGMENT_INDEX,
912 EdgeAaSegmentMask::all(),
913 clip_task_address.unwrap(),
914 BrushFlags::PERSPECTIVE_INTERPOLATION,
915 prim_header_index,
916 0,
917 prim_vis_mask,
918 );
919 }
920 PrimitiveInstanceKind::NormalBorder { data_handle, ref cache_handles, .. } => {
921 let prim_data = &ctx.data_stores.normal_border[data_handle];
922 let common_data = &prim_data.common;
923 let prim_cache_address = gpu_cache.get_address(&common_data.gpu_cache_handle);
924 let cache_handles = &ctx.scratch.border_cache_handles[*cache_handles];
925 let specified_blend_mode = BlendMode::PremultipliedAlpha;
926 let mut segment_data: SmallVec<[SegmentInstanceData; 8]> = SmallVec::new();
927
928 // Collect the segment instance data from each render
929 // task for each valid edge / corner of the border.
930
931 for handle in cache_handles {
932 let rt_cache_entry = ctx.resource_cache
933 .get_cached_render_task(handle);
934 let cache_item = ctx.resource_cache
935 .get_texture_cache_item(&rt_cache_entry.handle);
936 segment_data.push(
937 SegmentInstanceData {
938 textures: BatchTextures::color(cache_item.texture_id),
939 specific_resource_address: cache_item.uv_rect_handle.as_int(gpu_cache),
940 }
941 );
942 }
943
944 let non_segmented_blend_mode = if !common_data.opacity.is_opaque ||
945 prim_info.clip_task_index != ClipTaskIndex::INVALID ||
946 transform_kind == TransformedRectKind::Complex
947 {
948 specified_blend_mode
949 } else {
950 BlendMode::None
951 };
952
953 let prim_header = PrimitiveHeader {
954 local_rect: prim_rect,
955 local_clip_rect: prim_info.combined_local_clip_rect,
956 specific_prim_address: prim_cache_address,
957 transform_id,
958 };
959
960 let batch_params = BrushBatchParameters::instanced(
961 BrushBatchKind::Image(ImageBufferKind::Texture2DArray),
962 ImageBrushData {
963 color_mode: ShaderColorMode::Image,
964 alpha_type: AlphaType::PremultipliedAlpha,
965 raster_space: RasterizationSpace::Local,
966 opacity: 1.0,
967 }.encode(),
968 segment_data,
969 );
970
971 let prim_header_index = prim_headers.push(
972 &prim_header,
973 z_id,
974 batch_params.prim_user_data,
975 );
976
977 let border_data = &prim_data.kind;
978 self.add_segmented_prim_to_batch(
979 Some(border_data.brush_segments.as_slice()),
980 common_data.opacity,
981 &batch_params,
982 specified_blend_mode,
983 non_segmented_blend_mode,
984 batch_features,
985 prim_header_index,
986 bounding_rect,
987 transform_kind,
988 render_tasks,
989 z_id,
990 prim_info.clip_task_index,
991 prim_vis_mask,
992 ctx,
993 );
994 }
995 PrimitiveInstanceKind::TextRun { data_handle, run_index, .. } => {
996 let run = &ctx.prim_store.text_runs[run_index];
997 let subpx_dir = run.used_font.get_subpx_dir();
998
999 // The GPU cache data is stored in the template and reused across
1000 // frames and display lists.
1001 let prim_data = &ctx.data_stores.text_run[data_handle];
1002 let prim_cache_address = gpu_cache.get_address(&prim_data.gpu_cache_handle);
1003
1004 // The local prim rect is only informative for text primitives, as
1005 // thus is not directly necessary for any drawing of the text run.
1006 // However the glyph offsets are relative to the prim rect origin
1007 // less the unsnapped reference frame offset. We also want the
1008 // the snapped reference frame offset, because cannot recalculate
1009 // it as it ignores the animated components for the transform. As
1010 // such, we adjust the prim rect origin here, and replace the size
1011 // with the unsnapped and snapped offsets respectively. This has
1012 // the added bonus of avoiding quantization effects when storing
1013 // floats in the extra header integers.
1014 let prim_header = PrimitiveHeader {
1015 local_rect: LayoutRect::new(
1016 prim_rect.origin - run.reference_frame_relative_offset,
1017 run.snapped_reference_frame_relative_offset.to_size(),
1018 ),
1019 local_clip_rect: prim_info.combined_local_clip_rect,
1020 specific_prim_address: prim_cache_address,
1021 transform_id,
1022 };
1023
1024 let glyph_keys = &ctx.scratch.glyph_keys[run.glyph_keys_range];
1025 let raster_scale = run.raster_space.local_scale().unwrap_or(1.0).max(0.001);
1026 let prim_header_index = prim_headers.push(
1027 &prim_header,
1028 z_id,
1029 [
1030 (raster_scale * 65535.0).round() as i32,
1031 0,
1032 0,
1033 0,
1034 ],
1035 );
1036 let base_instance = GlyphInstance::new(
1037 prim_header_index,
1038 );
1039 let batchers = &mut self.batchers;
1040
1041 ctx.resource_cache.fetch_glyphs(
1042 run.used_font.clone(),
1043 &glyph_keys,
1044 &mut self.glyph_fetch_buffer,
1045 gpu_cache,
1046 |texture_id, mut glyph_format, glyphs| {
1047 debug_assert_ne!(texture_id, TextureSource::Invalid);
1048
1049 // Ignore color and only sample alpha when shadowing.
1050 if run.shadow {
1051 glyph_format = glyph_format.ignore_color();
1052 }
1053
1054 let subpx_dir = subpx_dir.limit_by(glyph_format);
1055
1056 let textures = BatchTextures {
1057 colors: [
1058 texture_id,
1059 TextureSource::Invalid,
1060 TextureSource::Invalid,
1061 ],
1062 };
1063
1064 let kind = BatchKind::TextRun(glyph_format);
1065
1066 let (blend_mode, color_mode) = match glyph_format {
1067 GlyphFormat::Subpixel |
1068 GlyphFormat::TransformedSubpixel => {
1069 if run.used_font.bg_color.a != 0 {
1070 (
1071 BlendMode::SubpixelWithBgColor,
1072 ShaderColorMode::FromRenderPassMode,
1073 )
1074 } else if ctx.use_dual_source_blending {
1075 (
1076 BlendMode::SubpixelDualSource,
1077 ShaderColorMode::SubpixelDualSource,
1078 )
1079 } else {
1080 (
1081 BlendMode::SubpixelConstantTextColor(run.used_font.color.into()),
1082 ShaderColorMode::SubpixelConstantTextColor,
1083 )
1084 }
1085 }
1086 GlyphFormat::Alpha |
1087 GlyphFormat::TransformedAlpha => {
1088 (
1089 BlendMode::PremultipliedAlpha,
1090 ShaderColorMode::Alpha,
1091 )
1092 }
1093 GlyphFormat::Bitmap => {
1094 (
1095 BlendMode::PremultipliedAlpha,
1096 ShaderColorMode::Bitmap,
1097 )
1098 }
1099 GlyphFormat::ColorBitmap => {
1100 (
1101 BlendMode::PremultipliedAlpha,
1102 ShaderColorMode::ColorBitmap,
1103 )
1104 }
1105 };
1106
1107 let key = BatchKey::new(kind, blend_mode, textures);
1108
1109 for batcher in batchers.iter_mut() {
1110 if batcher.vis_mask.intersects(prim_vis_mask) {
1111 let render_task_address = batcher.render_task_address;
1112 let batch = batcher.alpha_batch_list.set_params_and_get_batch(
1113 key,
1114 BatchFeatures::empty(),
1115 bounding_rect,
1116 z_id,
1117 );
1118
1119 for glyph in glyphs {
1120 batch.push(base_instance.build(
1121 render_task_address,
1122 clip_task_address.unwrap(),
1123 subpx_dir,
1124 glyph.index_in_text_run,
1125 glyph.uv_rect_address,
1126 color_mode,
1127 ));
1128 }
1129 }
1130 }
1131 },
1132 );
1133 }
1134 PrimitiveInstanceKind::LineDecoration { data_handle, ref cache_handle, .. } => {
1135 // The GPU cache data is stored in the template and reused across
1136 // frames and display lists.
1137 let common_data = &ctx.data_stores.line_decoration[data_handle].common;
1138 let prim_cache_address = gpu_cache.get_address(&common_data.gpu_cache_handle);
1139
1140 let (batch_kind, textures, prim_user_data, specific_resource_address) = match cache_handle {
1141 Some(cache_handle) => {
1142 let rt_cache_entry = ctx
1143 .resource_cache
1144 .get_cached_render_task(cache_handle);
1145 let cache_item = ctx
1146 .resource_cache
1147 .get_texture_cache_item(&rt_cache_entry.handle);
1148 let textures = BatchTextures::color(cache_item.texture_id);
1149 (
1150 BrushBatchKind::Image(get_buffer_kind(cache_item.texture_id)),
1151 textures,
1152 ImageBrushData {
1153 color_mode: ShaderColorMode::Image,
1154 alpha_type: AlphaType::PremultipliedAlpha,
1155 raster_space: RasterizationSpace::Local,
1156 opacity: 1.0,
1157 }.encode(),
1158 cache_item.uv_rect_handle.as_int(gpu_cache),
1159 )
1160 }
1161 None => {
1162 (
1163 BrushBatchKind::Solid,
1164 BatchTextures::no_texture(),
1165 [get_shader_opacity(1.0), 0, 0, 0],
1166 0,
1167 )
1168 }
1169 };
1170
1171 // TODO(gw): We can abstract some of the common code below into
1172 // helper methods, as we port more primitives to make
1173 // use of interning.
1174 let blend_mode = if !common_data.opacity.is_opaque ||
1175 prim_info.clip_task_index != ClipTaskIndex::INVALID ||
1176 transform_kind == TransformedRectKind::Complex
1177 {
1178 BlendMode::PremultipliedAlpha
1179 } else {
1180 BlendMode::None
1181 };
1182
1183 let prim_header = PrimitiveHeader {
1184 local_rect: prim_rect,
1185 local_clip_rect: prim_info.combined_local_clip_rect,
1186 specific_prim_address: prim_cache_address,
1187 transform_id,
1188 };
1189
1190 let prim_header_index = prim_headers.push(
1191 &prim_header,
1192 z_id,
1193 prim_user_data,
1194 );
1195
1196 let batch_key = BatchKey {
1197 blend_mode,
1198 kind: BatchKind::Brush(batch_kind),
1199 textures,
1200 };
1201
1202 self.add_brush_instance_to_batches(
1203 batch_key,
1204 batch_features,
1205 bounding_rect,
1206 z_id,
1207 INVALID_SEGMENT_INDEX,
1208 EdgeAaSegmentMask::all(),
1209 clip_task_address.unwrap(),
1210 BrushFlags::PERSPECTIVE_INTERPOLATION,
1211 prim_header_index,
1212 specific_resource_address,
1213 prim_vis_mask,
1214 );
1215 }
1216 PrimitiveInstanceKind::Picture { pic_index, segment_instance_index, .. } => {
1217 let picture = &ctx.prim_store.pictures[pic_index.0];
1218 let non_segmented_blend_mode = BlendMode::PremultipliedAlpha;
1219 let prim_cache_address = gpu_cache.get_address(&ctx.globals.default_image_handle);
1220
1221 let prim_header = PrimitiveHeader {
1222 local_rect: picture.precise_local_rect,
1223 local_clip_rect: prim_info.combined_local_clip_rect,
1224 specific_prim_address: prim_cache_address,
1225 transform_id,
1226 };
1227
1228 match picture.context_3d {
1229 // Convert all children of the 3D hierarchy root into batches.
1230 Picture3DContext::In { root_data: Some(ref list), .. } => {
1231 for child in list {
1232 let cluster = &picture.prim_list.clusters[child.anchor.cluster_index];
1233 let child_prim_instance = &cluster.prim_instances[child.anchor.instance_index];
1234 let child_prim_info = &ctx.scratch.prim_info[child_prim_instance.visibility_info.0 as usize];
1235
1236 let child_pic_index = match child_prim_instance.kind {
1237 PrimitiveInstanceKind::Picture { pic_index, .. } => pic_index,
1238 _ => unreachable!(),
1239 };
1240 let pic = &ctx.prim_store.pictures[child_pic_index.0];
1241
1242 // Get clip task, if set, for the picture primitive.
1243 let child_clip_task_address = ctx.get_prim_clip_task_address(
1244 child_prim_info.clip_task_index,
1245 render_tasks,
1246 );
1247
1248 let prim_header = PrimitiveHeader {
1249 local_rect: pic.precise_local_rect,
1250 local_clip_rect: child_prim_info.combined_local_clip_rect,
1251 specific_prim_address: GpuCacheAddress::INVALID,
1252 transform_id: transforms
1253 .get_id(
1254 child.spatial_node_index,
1255 root_spatial_node_index,
1256 ctx.spatial_tree,
1257 ),
1258 };
1259
1260 let raster_config = pic
1261 .raster_config
1262 .as_ref()
1263 .expect("BUG: 3d primitive was not assigned a surface");
1264 let (uv_rect_address, _) = render_tasks.resolve_surface(
1265 ctx.surfaces[raster_config.surface_index.0]
1266 .render_tasks
1267 .expect("BUG: no surface")
1268 .root,
1269 gpu_cache,
1270 );
1271
1272 // Need a new z-id for each child preserve-3d context added
1273 // by this inner loop.
1274 let z_id = z_generator.next();
1275
1276 let prim_header_index = prim_headers.push(&prim_header, z_id, [
1277 uv_rect_address.as_int(),
1278 if raster_config.establishes_raster_root { 1 } else { 0 },
1279 0,
1280 child_clip_task_address.unwrap().0 as i32,
1281 ]);
1282
1283 let key = BatchKey::new(
1284 BatchKind::SplitComposite,
1285 BlendMode::PremultipliedAlpha,
1286 BatchTextures::no_texture(),
1287 );
1288
1289 self.add_split_composite_instance_to_batches(
1290 key,
1291 &child_prim_info.clip_chain.pic_clip_rect,
1292 z_id,
1293 prim_header_index,
1294 child.gpu_address,
1295 child_prim_info.visibility_mask,
1296 );
1297 }
1298 }
1299 // Ignore the 3D pictures that are not in the root of preserve-3D
1300 // hierarchy, since we process them with the root.
1301 Picture3DContext::In { root_data: None, .. } => return,
1302 // Proceed for non-3D pictures.
1303 Picture3DContext::Out => ()
1304 }
1305
1306 match picture.raster_config {
1307 Some(ref raster_config) => {
1308 // If the child picture was rendered in local space, we can safely
1309 // interpolate the UV coordinates with perspective correction.
1310 let brush_flags = if raster_config.establishes_raster_root {
1311 BrushFlags::PERSPECTIVE_INTERPOLATION
1312 } else {
1313 BrushFlags::empty()
1314 };
1315
1316 let surface = &ctx.surfaces[raster_config.surface_index.0];
1317 let surface_task = surface.render_tasks.map(|s| s.root);
1318
1319 match raster_config.composite_mode {
1320 PictureCompositeMode::TileCache { .. } => {
1321 // Tile cache instances are added to the composite config, rather than
1322 // directly added to batches. This allows them to be drawn with various
1323 // present modes during render, such as partial present etc.
1324 let tile_cache = picture.tile_cache.as_ref().unwrap();
1325 let map_local_to_world = SpaceMapper::new_with_target(
1326 ROOT_SPATIAL_NODE_INDEX,
1327 tile_cache.spatial_node_index,
1328 ctx.screen_world_rect,
1329 ctx.spatial_tree,
1330 );
1331 // TODO(gw): As a follow up to the valid_rect work, see why we use
1332 // prim_info.combined_local_clip_rect here instead of the
1333 // local_clip_rect built in the TileCacheInstance. Perhaps
1334 // these can be unified or are different for a good reason?
1335 let world_clip_rect = map_local_to_world
1336 .map(&prim_info.combined_local_clip_rect)
1337 .expect("bug: unable to map clip rect");
1338 let device_clip_rect = (world_clip_rect * ctx.global_device_pixel_scale).round();
1339
1340 composite_state.push_surface(
1341 tile_cache,
1342 device_clip_rect,
1343 ctx.global_device_pixel_scale,
1344 ctx.resource_cache,
1345 gpu_cache,
1346 deferred_resolves,
1347 );
1348 }
1349 PictureCompositeMode::Filter(ref filter) => {
1350 assert!(filter.is_visible());
1351 match filter {
1352 Filter::Blur(..) => {
1353 let kind = BatchKind::Brush(
1354 BrushBatchKind::Image(ImageBufferKind::Texture2DArray)
1355 );
1356 let (uv_rect_address, textures) = render_tasks.resolve_surface(
1357 surface_task.expect("bug: surface must be allocated by now"),
1358 gpu_cache,
1359 );
1360 let key = BatchKey::new(
1361 kind,
1362 non_segmented_blend_mode,
1363 textures,
1364 );
1365 let prim_header_index = prim_headers.push(
1366 &prim_header,
1367 z_id,
1368 ImageBrushData {
1369 color_mode: ShaderColorMode::Image,
1370 alpha_type: AlphaType::PremultipliedAlpha,
1371 raster_space: RasterizationSpace::Screen,
1372 opacity: 1.0,
1373 }.encode(),
1374 );
1375
1376 self.add_brush_instance_to_batches(
1377 key,
1378 batch_features,
1379 bounding_rect,
1380 z_id,
1381 INVALID_SEGMENT_INDEX,
1382 EdgeAaSegmentMask::empty(),
1383 clip_task_address.unwrap(),
1384 brush_flags,
1385 prim_header_index,
1386 uv_rect_address.as_int(),
1387 prim_vis_mask,
1388 );
1389 }
1390 Filter::DropShadows(shadows) => {
1391 // Draw an instance per shadow first, following by the content.
1392
1393 // The shadows and the content get drawn as a brush image.
1394 let kind = BatchKind::Brush(
1395 BrushBatchKind::Image(ImageBufferKind::Texture2DArray),
1396 );
1397
1398 // Gets the saved render task ID of the content, which is
1399 // deeper in the render task graph than the direct child.
1400 let secondary_id = picture.secondary_render_task_id.expect("no secondary!?");
1401 let content_source = {
1402 let secondary_task = &render_tasks[secondary_id];
1403 let saved_index = secondary_task.saved_index.expect("no saved index!?");
1404 debug_assert_ne!(saved_index, SavedTargetIndex::PENDING);
1405 TextureSource::RenderTaskCache(saved_index, Swizzle::default())
1406 };
1407
1408 // Build BatchTextures for shadow/content
1409 let shadow_textures = BatchTextures::render_target_cache();
1410 let content_textures = BatchTextures {
1411 colors: [
1412 content_source,
1413 TextureSource::Invalid,
1414 TextureSource::Invalid,
1415 ],
1416 };
1417
1418 // Build batch keys for shadow/content
1419 let shadow_key = BatchKey::new(kind, non_segmented_blend_mode, shadow_textures);
1420 let content_key = BatchKey::new(kind, non_segmented_blend_mode, content_textures);
1421
1422 // Retrieve the UV rect addresses for shadow/content.
1423 let cache_task_id = surface_task
1424 .expect("bug: surface must be allocated by now");
1425 let shadow_uv_rect_address = render_tasks[cache_task_id]
1426 .get_texture_address(gpu_cache)
1427 .as_int();
1428 let content_uv_rect_address = render_tasks[secondary_id]
1429 .get_texture_address(gpu_cache)
1430 .as_int();
1431
1432 for (shadow, shadow_gpu_data) in shadows.iter().zip(picture.extra_gpu_data_handles.iter()) {
1433 // Get the GPU cache address of the extra data handle.
1434 let shadow_prim_address = gpu_cache.get_address(shadow_gpu_data);
1435
1436 let shadow_rect = prim_header.local_rect.translate(shadow.offset);
1437
1438 let shadow_prim_header = PrimitiveHeader {
1439 local_rect: shadow_rect,
1440 specific_prim_address: shadow_prim_address,
1441 ..prim_header
1442 };
1443
1444 let shadow_prim_header_index = prim_headers.push(
1445 &shadow_prim_header,
1446 z_id,
1447 ImageBrushData {
1448 color_mode: ShaderColorMode::Alpha,
1449 alpha_type: AlphaType::PremultipliedAlpha,
1450 raster_space: RasterizationSpace::Screen,
1451 opacity: 1.0,
1452 }.encode(),
1453 );
1454
1455 self.add_brush_instance_to_batches(
1456 shadow_key,
1457 batch_features,
1458 bounding_rect,
1459 z_id,
1460 INVALID_SEGMENT_INDEX,
1461 EdgeAaSegmentMask::empty(),
1462 clip_task_address.unwrap(),
1463 brush_flags,
1464 shadow_prim_header_index,
1465 shadow_uv_rect_address,
1466 prim_vis_mask,
1467 );
1468 }
1469 let z_id_content = z_generator.next();
1470
1471 let content_prim_header_index = prim_headers.push(
1472 &prim_header,
1473 z_id_content,
1474 ImageBrushData {
1475 color_mode: ShaderColorMode::Image,
1476 alpha_type: AlphaType::PremultipliedAlpha,
1477 raster_space: RasterizationSpace::Screen,
1478 opacity: 1.0,
1479 }.encode(),
1480 );
1481
1482 self.add_brush_instance_to_batches(
1483 content_key,
1484 batch_features,
1485 bounding_rect,
1486 z_id_content,
1487 INVALID_SEGMENT_INDEX,
1488 EdgeAaSegmentMask::empty(),
1489 clip_task_address.unwrap(),
1490 brush_flags,
1491 content_prim_header_index,
1492 content_uv_rect_address,
1493 prim_vis_mask,
1494 );
1495 }
1496 Filter::Opacity(_, amount) => {
1497 let amount = (amount * 65536.0) as i32;
1498
1499 let (uv_rect_address, textures) = render_tasks.resolve_surface(
1500 surface_task.expect("bug: surface must be allocated by now"),
1501 gpu_cache,
1502 );
1503
1504 let key = BatchKey::new(
1505 BatchKind::Brush(BrushBatchKind::Opacity),
1506 BlendMode::PremultipliedAlpha,
1507 textures,
1508 );
1509
1510 let prim_header_index = prim_headers.push(&prim_header, z_id, [
1511 uv_rect_address.as_int(),
1512 amount,
1513 0,
1514 0,
1515 ]);
1516
1517 self.add_brush_instance_to_batches(
1518 key,
1519 batch_features,
1520 bounding_rect,
1521 z_id,
1522 INVALID_SEGMENT_INDEX,
1523 EdgeAaSegmentMask::empty(),
1524 clip_task_address.unwrap(),
1525 brush_flags,
1526 prim_header_index,
1527 0,
1528 prim_vis_mask,
1529 );
1530 }
1531 _ => {
1532 // Must be kept in sync with brush_blend.glsl
1533 let filter_mode = filter.as_int();
1534
1535 let user_data = match filter {
1536 Filter::Identity => 0x10000i32, // matches `Contrast(1)`
1537 Filter::Contrast(amount) |
1538 Filter::Grayscale(amount) |
1539 Filter::Invert(amount) |
1540 Filter::Saturate(amount) |
1541 Filter::Sepia(amount) |
1542 Filter::Brightness(amount) => {
1543 (amount * 65536.0) as i32
1544 }
1545 Filter::SrgbToLinear | Filter::LinearToSrgb => 0,
1546 Filter::HueRotate(angle) => {
1547 (0.01745329251 * angle * 65536.0) as i32
1548 }
1549 Filter::ColorMatrix(_) => {
1550 picture.extra_gpu_data_handles[0].as_int(gpu_cache)
1551 }
1552 Filter::Flood(_) => {
1553 picture.extra_gpu_data_handles[0].as_int(gpu_cache)
1554 }
1555
1556 // These filters are handled via different paths.
1557 Filter::ComponentTransfer |
1558 Filter::Blur(..) |
1559 Filter::DropShadows(..) |
1560 Filter::Opacity(..) => unreachable!(),
1561 };
1562
1563 let (uv_rect_address, textures) = render_tasks.resolve_surface(
1564 surface_task.expect("bug: surface must be allocated by now"),
1565 gpu_cache,
1566 );
1567
1568 let key = BatchKey::new(
1569 BatchKind::Brush(BrushBatchKind::Blend),
1570 BlendMode::PremultipliedAlpha,
1571 textures,
1572 );
1573
1574 let prim_header_index = prim_headers.push(&prim_header, z_id, [
1575 uv_rect_address.as_int(),
1576 filter_mode,
1577 user_data,
1578 0,
1579 ]);
1580
1581 self.add_brush_instance_to_batches(
1582 key,
1583 batch_features,
1584 bounding_rect,
1585 z_id,
1586 INVALID_SEGMENT_INDEX,
1587 EdgeAaSegmentMask::empty(),
1588 clip_task_address.unwrap(),
1589 brush_flags,
1590 prim_header_index,
1591 0,
1592 prim_vis_mask,
1593 );
1594 }
1595 }
1596 }
1597 PictureCompositeMode::ComponentTransferFilter(handle) => {
1598 // This is basically the same as the general filter case above
1599 // except we store a little more data in the filter mode and
1600 // a gpu cache handle in the user data.
1601 let filter_data = &ctx.data_stores.filter_data[handle];
1602 let filter_mode : i32 = Filter::ComponentTransfer.as_int() |
1603 ((filter_data.data.r_func.to_int() << 28 |
1604 filter_data.data.g_func.to_int() << 24 |
1605 filter_data.data.b_func.to_int() << 20 |
1606 filter_data.data.a_func.to_int() << 16) as i32);
1607
1608 let user_data = filter_data.gpu_cache_handle.as_int(gpu_cache);
1609
1610 let (uv_rect_address, textures) = render_tasks.resolve_surface(
1611 surface_task.expect("bug: surface must be allocated by now"),
1612 gpu_cache,
1613 );
1614
1615 let key = BatchKey::new(
1616 BatchKind::Brush(BrushBatchKind::Blend),
1617 BlendMode::PremultipliedAlpha,
1618 textures,
1619 );
1620
1621 let prim_header_index = prim_headers.push(&prim_header, z_id, [
1622 uv_rect_address.as_int(),
1623 filter_mode,
1624 user_data,
1625 0,
1626 ]);
1627
1628 self.add_brush_instance_to_batches(
1629 key,
1630 batch_features,
1631 bounding_rect,
1632 z_id,
1633 INVALID_SEGMENT_INDEX,
1634 EdgeAaSegmentMask::empty(),
1635 clip_task_address.unwrap(),
1636 brush_flags,
1637 prim_header_index,
1638 0,
1639 prim_vis_mask,
1640 );
1641 }
1642 PictureCompositeMode::MixBlend(mode) if ctx.use_advanced_blending => {
1643 let (uv_rect_address, textures) = render_tasks.resolve_surface(
1644 surface_task.expect("bug: surface must be allocated by now"),
1645 gpu_cache,
1646 );
1647 let key = BatchKey::new(
1648 BatchKind::Brush(
1649 BrushBatchKind::Image(ImageBufferKind::Texture2DArray),
1650 ),
1651 BlendMode::Advanced(mode),
1652 textures,
1653 );
1654 let prim_header_index = prim_headers.push(
1655 &prim_header,
1656 z_id,
1657 ImageBrushData {
1658 color_mode: ShaderColorMode::Image,
1659 alpha_type: AlphaType::PremultipliedAlpha,
1660 raster_space: RasterizationSpace::Local,
1661 opacity: 1.0,
1662 }.encode(),
1663 );
1664
1665 self.add_brush_instance_to_batches(
1666 key,
1667 batch_features,
1668 bounding_rect,
1669 z_id,
1670 INVALID_SEGMENT_INDEX,
1671 EdgeAaSegmentMask::empty(),
1672 clip_task_address.unwrap(),
1673 brush_flags,
1674 prim_header_index,
1675 uv_rect_address.as_int(),
1676 prim_vis_mask,
1677 );
1678 }
1679 PictureCompositeMode::MixBlend(mode) => {
1680 let cache_task_id = surface_task.expect("bug: surface must be allocated by now");
1681 let backdrop_id = picture.secondary_render_task_id.expect("no backdrop!?");
1682
1683 // TODO(gw): For now, mix-blend is not supported as a picture
1684 // caching root, so we can safely assume there is
1685 // only a single batcher present.
1686 assert_eq!(self.batchers.len(), 1);
1687
1688 let key = BatchKey::new(
1689 BatchKind::Brush(
1690 BrushBatchKind::MixBlend {
1691 task_id: self.batchers[0].render_task_id,
1692 source_id: cache_task_id,
1693 backdrop_id,
1694 },
1695 ),
1696 BlendMode::PremultipliedAlpha,
1697 BatchTextures::no_texture(),
1698 );
1699 let backdrop_task_address = render_tasks.get_task_address(backdrop_id);
1700 let source_task_address = render_tasks.get_task_address(cache_task_id);
1701 let prim_header_index = prim_headers.push(&prim_header, z_id, [
1702 mode as u32 as i32,
1703 backdrop_task_address.0 as i32,
1704 source_task_address.0 as i32,
1705 0,
1706 ]);
1707
1708 self.add_brush_instance_to_batches(
1709 key,
1710 batch_features,
1711 bounding_rect,
1712 z_id,
1713 INVALID_SEGMENT_INDEX,
1714 EdgeAaSegmentMask::empty(),
1715 clip_task_address.unwrap(),
1716 brush_flags,
1717 prim_header_index,
1718 0,
1719 prim_vis_mask,
1720 );
1721 }
1722 PictureCompositeMode::Blit(_) => {
1723 let cache_task_id = surface_task.expect("bug: surface must be allocated by now");
1724 let uv_rect_address = render_tasks[cache_task_id]
1725 .get_texture_address(gpu_cache)
1726 .as_int();
1727 let textures = match render_tasks[cache_task_id].saved_index {
1728 Some(saved_index) => BatchTextures {
1729 colors: [
1730 TextureSource::RenderTaskCache(saved_index, Swizzle::default()),
1731 TextureSource::PrevPassAlpha,
1732 TextureSource::Invalid,
1733 ]
1734 },
1735 None => BatchTextures::render_target_cache(),
1736 };
1737 let batch_params = BrushBatchParameters::shared(
1738 BrushBatchKind::Image(ImageBufferKind::Texture2DArray),
1739 textures,
1740 ImageBrushData {
1741 color_mode: ShaderColorMode::Image,
1742 alpha_type: AlphaType::PremultipliedAlpha,
1743 raster_space: RasterizationSpace::Screen,
1744 opacity: 1.0,
1745 }.encode(),
1746 uv_rect_address,
1747 );
1748
1749 let is_segmented =
1750 segment_instance_index != SegmentInstanceIndex::INVALID &&
1751 segment_instance_index != SegmentInstanceIndex::UNUSED;
1752
1753 let (prim_cache_address, segments) = if is_segmented {
1754 let segment_instance = &ctx.scratch.segment_instances[segment_instance_index];
1755 let segments = Some(&ctx.scratch.segments[segment_instance.segments_range]);
1756 (gpu_cache.get_address(&segment_instance.gpu_cache_handle), segments)
1757 } else {
1758 (prim_cache_address, None)
1759 };
1760
1761 let prim_header = PrimitiveHeader {
1762 local_rect: picture.precise_local_rect,
1763 local_clip_rect: prim_info.combined_local_clip_rect,
1764 specific_prim_address: prim_cache_address,
1765 transform_id,
1766 };
1767
1768 let prim_header_index = prim_headers.push(
1769 &prim_header,
1770 z_id,
1771 batch_params.prim_user_data,
1772 );
1773
1774 // TODO(gw): As before, all pictures that get blitted are assumed
1775 // to have alpha. However, we could determine (at least for
1776 // simple, common cases) if the picture content is opaque.
1777 // That would allow inner segments of pictures to be drawn
1778 // with blend disabled, which is a big performance win on
1779 // integrated GPUs.
1780 let opacity = PrimitiveOpacity::translucent();
1781 let specified_blend_mode = BlendMode::PremultipliedAlpha;
1782
1783 self.add_segmented_prim_to_batch(
1784 segments,
1785 opacity,
1786 &batch_params,
1787 specified_blend_mode,
1788 non_segmented_blend_mode,
1789 batch_features,
1790 prim_header_index,
1791 bounding_rect,
1792 transform_kind,
1793 render_tasks,
1794 z_id,
1795 prim_info.clip_task_index,
1796 prim_vis_mask,
1797 ctx,
1798 );
1799 }
1800 PictureCompositeMode::SvgFilter(..) => {
1801 let kind = BatchKind::Brush(
1802 BrushBatchKind::Image(ImageBufferKind::Texture2DArray)
1803 );
1804 let (uv_rect_address, textures) = render_tasks.resolve_surface(
1805 surface_task.expect("bug: surface must be allocated by now"),
1806 gpu_cache,
1807 );
1808 let key = BatchKey::new(
1809 kind,
1810 non_segmented_blend_mode,
1811 textures,
1812 );
1813 let prim_header_index = prim_headers.push(
1814 &prim_header,
1815 z_id,
1816 ImageBrushData {
1817 color_mode: ShaderColorMode::Image,
1818 alpha_type: AlphaType::PremultipliedAlpha,
1819 raster_space: RasterizationSpace::Screen,
1820 opacity: 1.0,
1821 }.encode(),
1822 );
1823
1824 self.add_brush_instance_to_batches(
1825 key,
1826 batch_features,
1827 bounding_rect,
1828 z_id,
1829 INVALID_SEGMENT_INDEX,
1830 EdgeAaSegmentMask::empty(),
1831 clip_task_address.unwrap(),
1832 brush_flags,
1833 prim_header_index,
1834 uv_rect_address.as_int(),
1835 prim_vis_mask,
1836 );
1837 }
1838 }
1839 }
1840 None => {
1841 // If this picture is being drawn into an existing target (i.e. with
1842 // no composition operation), recurse and add to the current batch list.
1843 self.add_pic_to_batch(
1844 picture,
1845 ctx,
1846 gpu_cache,
1847 render_tasks,
1848 deferred_resolves,
1849 prim_headers,
1850 transforms,
1851 root_spatial_node_index,
1852 surface_spatial_node_index,
1853 z_generator,
1854 composite_state,
1855 );
1856 }
1857 }
1858 }
1859 PrimitiveInstanceKind::ImageBorder { data_handle, .. } => {
1860 let prim_data = &ctx.data_stores.image_border[data_handle];
1861 let common_data = &prim_data.common;
1862 let border_data = &prim_data.kind;
1863
1864 let cache_item = resolve_image(
1865 border_data.request,
1866 ctx.resource_cache,
1867 gpu_cache,
1868 deferred_resolves,
1869 );
1870 if cache_item.texture_id == TextureSource::Invalid {
1871 return;
1872 }
1873
1874 let textures = BatchTextures::color(cache_item.texture_id);
1875 let prim_cache_address = gpu_cache.get_address(&common_data.gpu_cache_handle);
1876 let specified_blend_mode = BlendMode::PremultipliedAlpha;
1877 let non_segmented_blend_mode = if !common_data.opacity.is_opaque ||
1878 prim_info.clip_task_index != ClipTaskIndex::INVALID ||
1879 transform_kind == TransformedRectKind::Complex
1880 {
1881 specified_blend_mode
1882 } else {
1883 BlendMode::None
1884 };
1885
1886 let prim_header = PrimitiveHeader {
1887 local_rect: prim_rect,
1888 local_clip_rect: prim_info.combined_local_clip_rect,
1889 specific_prim_address: prim_cache_address,
1890 transform_id,
1891 };
1892
1893 let batch_params = BrushBatchParameters::shared(
1894 BrushBatchKind::Image(get_buffer_kind(cache_item.texture_id)),
1895 textures,
1896 ImageBrushData {
1897 color_mode: ShaderColorMode::Image,
1898 alpha_type: AlphaType::PremultipliedAlpha,
1899 raster_space: RasterizationSpace::Local,
1900 opacity: 1.0,
1901 }.encode(),
1902 cache_item.uv_rect_handle.as_int(gpu_cache),
1903 );
1904
1905 let prim_header_index = prim_headers.push(
1906 &prim_header,
1907 z_id,
1908 batch_params.prim_user_data,
1909 );
1910
1911 self.add_segmented_prim_to_batch(
1912 Some(border_data.brush_segments.as_slice()),
1913 common_data.opacity,
1914 &batch_params,
1915 specified_blend_mode,
1916 non_segmented_blend_mode,
1917 batch_features,
1918 prim_header_index,
1919 bounding_rect,
1920 transform_kind,
1921 render_tasks,
1922 z_id,
1923 prim_info.clip_task_index,
1924 prim_vis_mask,
1925 ctx,
1926 );
1927 }
1928 PrimitiveInstanceKind::Rectangle { data_handle, segment_instance_index, opacity_binding_index, .. } => {
1929 let prim_data = &ctx.data_stores.prim[data_handle];
1930 let specified_blend_mode = BlendMode::PremultipliedAlpha;
1931 let opacity_binding = ctx.prim_store.get_opacity_binding(opacity_binding_index);
1932
1933 let opacity = PrimitiveOpacity::from_alpha(opacity_binding);
1934 let opacity = opacity.combine(prim_data.opacity);
1935
1936 let non_segmented_blend_mode = if !opacity.is_opaque ||
1937 prim_info.clip_task_index != ClipTaskIndex::INVALID ||
1938 transform_kind == TransformedRectKind::Complex
1939 {
1940 specified_blend_mode
1941 } else {
1942 BlendMode::None
1943 };
1944
1945 let batch_params = BrushBatchParameters::shared(
1946 BrushBatchKind::Solid,
1947 BatchTextures::no_texture(),
1948 [get_shader_opacity(opacity_binding), 0, 0, 0],
1949 0,
1950 );
1951
1952 let (prim_cache_address, segments) = if segment_instance_index == SegmentInstanceIndex::UNUSED {
1953 (gpu_cache.get_address(&prim_data.gpu_cache_handle), None)
1954 } else {
1955 let segment_instance = &ctx.scratch.segment_instances[segment_instance_index];
1956 let segments = Some(&ctx.scratch.segments[segment_instance.segments_range]);
1957 (gpu_cache.get_address(&segment_instance.gpu_cache_handle), segments)
1958 };
1959
1960 let prim_header = PrimitiveHeader {
1961 local_rect: prim_rect,
1962 local_clip_rect: prim_info.combined_local_clip_rect,
1963 specific_prim_address: prim_cache_address,
1964 transform_id,
1965 };
1966
1967 let prim_header_index = prim_headers.push(
1968 &prim_header,
1969 z_id,
1970 batch_params.prim_user_data,
1971 );
1972
1973 self.add_segmented_prim_to_batch(
1974 segments,
1975 opacity,
1976 &batch_params,
1977 specified_blend_mode,
1978 non_segmented_blend_mode,
1979 batch_features,
1980 prim_header_index,
1981 bounding_rect,
1982 transform_kind,
1983 render_tasks,
1984 z_id,
1985 prim_info.clip_task_index,
1986 prim_vis_mask,
1987 ctx,
1988 );
1989 }
1990 PrimitiveInstanceKind::YuvImage { data_handle, segment_instance_index, is_compositor_surface, .. } => {
1991 if is_compositor_surface {
1992 self.emit_placeholder(prim_rect,
1993 prim_info,
1994 z_id,
1995 transform_id,
1996 batch_features,
1997 ctx,
1998 gpu_cache,
1999 render_tasks,
2000 prim_headers);
2001 return;
2002 }
2003
2004 let yuv_image_data = &ctx.data_stores.yuv_image[data_handle].kind;
2005 let mut textures = BatchTextures::no_texture();
2006 let mut uv_rect_addresses = [0; 3];
2007
2008 //yuv channel
2009 let channel_count = yuv_image_data.format.get_plane_num();
2010 debug_assert!(channel_count <= 3);
2011 for channel in 0 .. channel_count {
2012 let image_key = yuv_image_data.yuv_key[channel];
2013
2014 let cache_item = resolve_image(
2015 ImageRequest {
2016 key: image_key,
2017 rendering: yuv_image_data.image_rendering,
2018 tile: None,
2019 },
2020 ctx.resource_cache,
2021 gpu_cache,
2022 deferred_resolves,
2023 );
2024
2025 if cache_item.texture_id == TextureSource::Invalid {
2026 warn!("Warnings: skip a PrimitiveKind::YuvImage");
2027 return;
2028 }
2029
2030 textures.colors[channel] = cache_item.texture_id;
2031 uv_rect_addresses[channel] = cache_item.uv_rect_handle.as_int(gpu_cache);
2032 }
2033
2034 // All yuv textures should be the same type.
2035 let buffer_kind = get_buffer_kind(textures.colors[0]);
2036 assert!(
2037 textures.colors[1 .. yuv_image_data.format.get_plane_num()]
2038 .iter()
2039 .all(|&tid| buffer_kind == get_buffer_kind(tid))
2040 );
2041
2042 let kind = BrushBatchKind::YuvImage(
2043 buffer_kind,
2044 yuv_image_data.format,
2045 yuv_image_data.color_depth,
2046 yuv_image_data.color_space,
2047 yuv_image_data.color_range,
2048 );
2049
2050 let batch_params = BrushBatchParameters::shared(
2051 kind,
2052 textures,
2053 [
2054 uv_rect_addresses[0],
2055 uv_rect_addresses[1],
2056 uv_rect_addresses[2],
2057 0,
2058 ],
2059 0,
2060 );
2061
2062 let specified_blend_mode = BlendMode::PremultipliedAlpha;
2063 let prim_common_data = &ctx.data_stores.as_common_data(&prim_instance);
2064
2065 let non_segmented_blend_mode = if !prim_common_data.opacity.is_opaque ||
2066 prim_info.clip_task_index != ClipTaskIndex::INVALID ||
2067 transform_kind == TransformedRectKind::Complex
2068 {
2069 specified_blend_mode
2070 } else {
2071 BlendMode::None
2072 };
2073
2074 debug_assert_ne!(segment_instance_index, SegmentInstanceIndex::INVALID);
2075 let (prim_cache_address, segments) = if segment_instance_index == SegmentInstanceIndex::UNUSED {
2076 (gpu_cache.get_address(&prim_common_data.gpu_cache_handle), None)
2077 } else {
2078 let segment_instance = &ctx.scratch.segment_instances[segment_instance_index];
2079 let segments = Some(&ctx.scratch.segments[segment_instance.segments_range]);
2080 (gpu_cache.get_address(&segment_instance.gpu_cache_handle), segments)
2081 };
2082
2083 let prim_header = PrimitiveHeader {
2084 local_rect: prim_rect,
2085 local_clip_rect: prim_info.combined_local_clip_rect,
2086 specific_prim_address: prim_cache_address,
2087 transform_id,
2088 };
2089
2090 let prim_header_index = prim_headers.push(
2091 &prim_header,
2092 z_id,
2093 batch_params.prim_user_data,
2094 );
2095
2096 self.add_segmented_prim_to_batch(
2097 segments,
2098 prim_common_data.opacity,
2099 &batch_params,
2100 specified_blend_mode,
2101 non_segmented_blend_mode,
2102 batch_features,
2103 prim_header_index,
2104 bounding_rect,
2105 transform_kind,
2106 render_tasks,
2107 z_id,
2108 prim_info.clip_task_index,
2109 prim_vis_mask,
2110 ctx,
2111 );
2112 }
2113 PrimitiveInstanceKind::Image { data_handle, image_instance_index, is_compositor_surface, .. } => {
2114 if is_compositor_surface {
2115 self.emit_placeholder(prim_rect,
2116 prim_info,
2117 z_id,
2118 transform_id,
2119 batch_features,
2120 ctx,
2121 gpu_cache,
2122 render_tasks,
2123 prim_headers);
2124 return;
2125 }
2126 let image_data = &ctx.data_stores.image[data_handle].kind;
2127 let common_data = &ctx.data_stores.image[data_handle].common;
2128 let image_instance = &ctx.prim_store.images[image_instance_index];
2129 let opacity_binding = ctx.prim_store.get_opacity_binding(image_instance.opacity_binding_index);
2130 let specified_blend_mode = match image_data.alpha_type {
2131 AlphaType::PremultipliedAlpha => BlendMode::PremultipliedAlpha,
2132 AlphaType::Alpha => BlendMode::Alpha,
2133 };
2134 let request = ImageRequest {
2135 key: image_data.key,
2136 rendering: image_data.image_rendering,
2137 tile: None,
2138 };
2139 let prim_user_data = ImageBrushData {
2140 color_mode: ShaderColorMode::Image,
2141 alpha_type: image_data.alpha_type,
2142 raster_space: RasterizationSpace::Local,
2143 opacity: opacity_binding,
2144 }.encode();
2145
2146 if image_instance.visible_tiles.is_empty() {
2147 let cache_item = match image_data.source {
2148 ImageSource::Default => {
2149 resolve_image(
2150 request,
2151 ctx.resource_cache,
2152 gpu_cache,
2153 deferred_resolves,
2154 )
2155 }
2156 ImageSource::Cache { ref handle, .. } => {
2157 let rt_handle = handle
2158 .as_ref()
2159 .expect("bug: render task handle not allocated");
2160 let rt_cache_entry = ctx.resource_cache
2161 .get_cached_render_task(rt_handle);
2162 ctx.resource_cache.get_texture_cache_item(&rt_cache_entry.handle)
2163 }
2164 };
2165
2166 if cache_item.texture_id == TextureSource::Invalid {
2167 return;
2168 }
2169
2170 let textures = BatchTextures::color(cache_item.texture_id);
2171
2172 let opacity = PrimitiveOpacity::from_alpha(opacity_binding);
2173 let opacity = opacity.combine(common_data.opacity);
2174
2175 let non_segmented_blend_mode = if !opacity.is_opaque ||
2176 prim_info.clip_task_index != ClipTaskIndex::INVALID ||
2177 transform_kind == TransformedRectKind::Complex
2178 {
2179 specified_blend_mode
2180 } else {
2181 BlendMode::None
2182 };
2183
2184 let batch_params = BrushBatchParameters::shared(
2185 BrushBatchKind::Image(get_buffer_kind(cache_item.texture_id)),
2186 textures,
2187 prim_user_data,
2188 cache_item.uv_rect_handle.as_int(gpu_cache),
2189 );
2190
2191 debug_assert_ne!(image_instance.segment_instance_index, SegmentInstanceIndex::INVALID);
2192 let (prim_cache_address, segments) = if image_instance.segment_instance_index == SegmentInstanceIndex::UNUSED {
2193 (gpu_cache.get_address(&common_data.gpu_cache_handle), None)
2194 } else {
2195 let segment_instance = &ctx.scratch.segment_instances[image_instance.segment_instance_index];
2196 let segments = Some(&ctx.scratch.segments[segment_instance.segments_range]);
2197 (gpu_cache.get_address(&segment_instance.gpu_cache_handle), segments)
2198 };
2199
2200 let prim_header = PrimitiveHeader {
2201 local_rect: prim_rect,
2202 local_clip_rect: prim_info.combined_local_clip_rect,
2203 specific_prim_address: prim_cache_address,
2204 transform_id,
2205 };
2206
2207 let prim_header_index = prim_headers.push(
2208 &prim_header,
2209 z_id,
2210 batch_params.prim_user_data,
2211 );
2212
2213 self.add_segmented_prim_to_batch(
2214 segments,
2215 opacity,
2216 &batch_params,
2217 specified_blend_mode,
2218 non_segmented_blend_mode,
2219 batch_features,
2220 prim_header_index,
2221 bounding_rect,
2222 transform_kind,
2223 render_tasks,
2224 z_id,
2225 prim_info.clip_task_index,
2226 prim_vis_mask,
2227 ctx,
2228 );
2229 } else {
2230 const VECS_PER_SPECIFIC_BRUSH: usize = 3;
2231 let max_tiles_per_header = (MAX_VERTEX_TEXTURE_WIDTH - VECS_PER_SPECIFIC_BRUSH) / VECS_PER_SEGMENT;
2232
2233 // use temporary block storage since we don't know the number of visible tiles beforehand
2234 let mut gpu_blocks = Vec::<GpuBlockData>::new();
2235 for chunk in image_instance.visible_tiles.chunks(max_tiles_per_header) {
2236 gpu_blocks.clear();
2237 gpu_blocks.push(PremultipliedColorF::WHITE.into()); //color
2238 gpu_blocks.push(PremultipliedColorF::WHITE.into()); //bg color
2239 gpu_blocks.push([-1.0, 0.0, 0.0, 0.0].into()); //stretch size
2240 // negative first value makes the shader code ignore it and use the local size instead
2241 for tile in chunk {
2242 let tile_rect = tile.local_rect.translate(-prim_rect.origin.to_vector());
2243 gpu_blocks.push(tile_rect.into());
2244 gpu_blocks.push(GpuBlockData::EMPTY);
2245 }
2246
2247 let gpu_handle = gpu_cache.push_per_frame_blocks(&gpu_blocks);
2248 let prim_header = PrimitiveHeader {
2249 local_rect: prim_rect,
2250 local_clip_rect: image_instance.tight_local_clip_rect,
2251 specific_prim_address: gpu_cache.get_address(&gpu_handle),
2252 transform_id,
2253 };
2254 let prim_header_index = prim_headers.push(&prim_header, z_id, prim_user_data);
2255
2256 for (i, tile) in chunk.iter().enumerate() {
2257 if let Some((batch_kind, textures, uv_rect_address)) = get_image_tile_params(
2258 ctx.resource_cache,
2259 gpu_cache,
2260 deferred_resolves,
2261 request.with_tile(tile.tile_offset),
2262 ) {
2263 let batch_key = BatchKey {
2264 blend_mode: specified_blend_mode,
2265 kind: BatchKind::Brush(batch_kind),
2266 textures,
2267 };
2268 self.add_brush_instance_to_batches(
2269 batch_key,
2270 batch_features,
2271 bounding_rect,
2272 z_id,
2273 i as i32,
2274 tile.edge_flags,
2275 clip_task_address.unwrap(),
2276 BrushFlags::SEGMENT_RELATIVE | BrushFlags::PERSPECTIVE_INTERPOLATION,
2277 prim_header_index,
2278 uv_rect_address.as_int(),
2279 prim_vis_mask,
2280 );
2281 }
2282 }
2283 }
2284 }
2285 }
2286 PrimitiveInstanceKind::LinearGradient { data_handle, gradient_index, .. } => {
2287 let gradient = &ctx.prim_store.linear_gradients[gradient_index];
2288 let prim_data = &ctx.data_stores.linear_grad[data_handle];
2289 let specified_blend_mode = BlendMode::PremultipliedAlpha;
2290
2291 let mut prim_header = PrimitiveHeader {
2292 local_rect: prim_rect,
2293 local_clip_rect: prim_info.combined_local_clip_rect,
2294 specific_prim_address: GpuCacheAddress::INVALID,
2295 transform_id,
2296 };
2297
2298 let non_segmented_blend_mode = if !prim_data.opacity.is_opaque ||
2299 prim_info.clip_task_index != ClipTaskIndex::INVALID ||
2300 transform_kind == TransformedRectKind::Complex
2301 {
2302 specified_blend_mode
2303 } else {
2304 BlendMode::None
2305 };
2306
2307 if !gradient.cache_segments.is_empty() {
2308
2309 for segment in &gradient.cache_segments {
2310 let ref cache_handle = segment.handle;
2311 let rt_cache_entry = ctx.resource_cache
2312 .get_cached_render_task(cache_handle);
2313 let cache_item = ctx.resource_cache
2314 .get_texture_cache_item(&rt_cache_entry.handle);
2315
2316 if cache_item.texture_id == TextureSource::Invalid {
2317 return;
2318 }
2319
2320 let textures = BatchTextures::color(cache_item.texture_id);
2321 let batch_kind = BrushBatchKind::Image(get_buffer_kind(cache_item.texture_id));
2322 let prim_user_data = ImageBrushData {
2323 color_mode: ShaderColorMode::Image,
2324 alpha_type: AlphaType::PremultipliedAlpha,
2325 raster_space: RasterizationSpace::Local,
2326 opacity: 1.0,
2327 }.encode();
2328
2329 let specific_resource_address = cache_item.uv_rect_handle.as_int(gpu_cache);
2330 prim_header.specific_prim_address = gpu_cache.get_address(&ctx.globals.default_image_handle);
2331
2332 let segment_local_clip_rect = prim_header.local_clip_rect.intersection(&segment.local_rect);
2333 if segment_local_clip_rect.is_none() {
2334 continue;
2335 }
2336
2337 let segment_prim_header = PrimitiveHeader {
2338 local_rect: segment.local_rect,
2339 local_clip_rect: segment_local_clip_rect.unwrap(),
2340 specific_prim_address: prim_header.specific_prim_address,
2341 transform_id: prim_header.transform_id,
2342 };
2343
2344 let prim_header_index = prim_headers.push(
2345 &segment_prim_header,
2346 z_id,
2347 prim_user_data,
2348 );
2349
2350 let batch_key = BatchKey {
2351 blend_mode: non_segmented_blend_mode,
2352 kind: BatchKind::Brush(batch_kind),
2353 textures,
2354 };
2355
2356 self.add_brush_instance_to_batches(
2357 batch_key,
2358 batch_features,
2359 bounding_rect,
2360 z_id,
2361 INVALID_SEGMENT_INDEX,
2362 EdgeAaSegmentMask::all(),
2363 clip_task_address.unwrap(),
2364 BrushFlags::PERSPECTIVE_INTERPOLATION,
2365 prim_header_index,
2366 specific_resource_address,
2367 prim_vis_mask,
2368 );
2369 }
2370 } else if gradient.visible_tiles_range.is_empty() {
2371 let batch_params = BrushBatchParameters::shared(
2372 BrushBatchKind::LinearGradient,
2373 BatchTextures::no_texture(),
2374 [
2375 prim_data.stops_handle.as_int(gpu_cache),
2376 0,
2377 0,
2378 0,
2379 ],
2380 0,
2381 );
2382
2383 prim_header.specific_prim_address = gpu_cache.get_address(&prim_data.gpu_cache_handle);
2384
2385 let prim_header_index = prim_headers.push(
2386 &prim_header,
2387 z_id,
2388 batch_params.prim_user_data,
2389 );
2390
2391 let segments = if prim_data.brush_segments.is_empty() {
2392 None
2393 } else {
2394 Some(prim_data.brush_segments.as_slice())
2395 };
2396
2397 self.add_segmented_prim_to_batch(
2398 segments,
2399 prim_data.opacity,
2400 &batch_params,
2401 specified_blend_mode,
2402 non_segmented_blend_mode,
2403 batch_features,
2404 prim_header_index,
2405 bounding_rect,
2406 transform_kind,
2407 render_tasks,
2408 z_id,
2409 prim_info.clip_task_index,
2410 prim_vis_mask,
2411 ctx,
2412 );
2413 } else {
2414 let visible_tiles = &ctx.scratch.gradient_tiles[gradient.visible_tiles_range];
2415
2416 self.add_gradient_tiles(
2417 visible_tiles,
2418 &prim_data.stops_handle,
2419 BrushBatchKind::LinearGradient,
2420 specified_blend_mode,
2421 bounding_rect,
2422 clip_task_address.unwrap(),
2423 gpu_cache,
2424 &prim_header,
2425 prim_headers,
2426 z_id,
2427 prim_vis_mask,
2428 );
2429 }
2430 }
2431 PrimitiveInstanceKind::RadialGradient { data_handle, ref visible_tiles_range, .. } => {
2432 let prim_data = &ctx.data_stores.radial_grad[data_handle];
2433 let specified_blend_mode = BlendMode::PremultipliedAlpha;
2434
2435 let mut prim_header = PrimitiveHeader {
2436 local_rect: prim_rect,
2437 local_clip_rect: prim_info.combined_local_clip_rect,
2438 specific_prim_address: GpuCacheAddress::INVALID,
2439 transform_id,
2440 };
2441
2442 if visible_tiles_range.is_empty() {
2443 let non_segmented_blend_mode = if !prim_data.opacity.is_opaque ||
2444 prim_info.clip_task_index != ClipTaskIndex::INVALID ||
2445 transform_kind == TransformedRectKind::Complex
2446 {
2447 specified_blend_mode
2448 } else {
2449 BlendMode::None
2450 };
2451
2452 let batch_params = BrushBatchParameters::shared(
2453 BrushBatchKind::RadialGradient,
2454 BatchTextures::no_texture(),
2455 [
2456 prim_data.stops_handle.as_int(gpu_cache),
2457 0,
2458 0,
2459 0,
2460 ],
2461 0,
2462 );
2463
2464 prim_header.specific_prim_address = gpu_cache.get_address(&prim_data.gpu_cache_handle);
2465
2466 let prim_header_index = prim_headers.push(
2467 &prim_header,
2468 z_id,
2469 batch_params.prim_user_data,
2470 );
2471
2472 let segments = if prim_data.brush_segments.is_empty() {
2473 None
2474 } else {
2475 Some(prim_data.brush_segments.as_slice())
2476 };
2477
2478 self.add_segmented_prim_to_batch(
2479 segments,
2480 prim_data.opacity,
2481 &batch_params,
2482 specified_blend_mode,
2483 non_segmented_blend_mode,
2484 batch_features,
2485 prim_header_index,
2486 bounding_rect,
2487 transform_kind,
2488 render_tasks,
2489 z_id,
2490 prim_info.clip_task_index,
2491 prim_vis_mask,
2492 ctx,
2493 );
2494 } else {
2495 let visible_tiles = &ctx.scratch.gradient_tiles[*visible_tiles_range];
2496
2497 self.add_gradient_tiles(
2498 visible_tiles,
2499 &prim_data.stops_handle,
2500 BrushBatchKind::RadialGradient,
2501 specified_blend_mode,
2502 bounding_rect,
2503 clip_task_address.unwrap(),
2504 gpu_cache,
2505 &prim_header,
2506 prim_headers,
2507 z_id,
2508 prim_vis_mask,
2509 );
2510 }
2511 }
2512 PrimitiveInstanceKind::ConicGradient { data_handle, ref visible_tiles_range, .. } => {
2513 let prim_data = &ctx.data_stores.conic_grad[data_handle];
2514 let specified_blend_mode = BlendMode::PremultipliedAlpha;
2515
2516 let mut prim_header = PrimitiveHeader {
2517 local_rect: prim_rect,
2518 local_clip_rect: prim_info.combined_local_clip_rect,
2519 specific_prim_address: GpuCacheAddress::INVALID,
2520 transform_id,
2521 };
2522
2523 if visible_tiles_range.is_empty() {
2524 let non_segmented_blend_mode = if !prim_data.opacity.is_opaque ||
2525 prim_info.clip_task_index != ClipTaskIndex::INVALID ||
2526 transform_kind == TransformedRectKind::Complex
2527 {
2528 specified_blend_mode
2529 } else {
2530 BlendMode::None
2531 };
2532
2533 let batch_params = BrushBatchParameters::shared(
2534 BrushBatchKind::ConicGradient,
2535 BatchTextures::no_texture(),
2536 [
2537 prim_data.stops_handle.as_int(gpu_cache),
2538 0,
2539 0,
2540 0,
2541 ],
2542 0,
2543 );
2544
2545 prim_header.specific_prim_address = gpu_cache.get_address(&prim_data.gpu_cache_handle);
2546
2547 let prim_header_index = prim_headers.push(
2548 &prim_header,
2549 z_id,
2550 batch_params.prim_user_data,
2551 );
2552
2553 let segments = if prim_data.brush_segments.is_empty() {
2554 None
2555 } else {
2556 Some(prim_data.brush_segments.as_slice())
2557 };
2558
2559 self.add_segmented_prim_to_batch(
2560 segments,
2561 prim_data.opacity,
2562 &batch_params,
2563 specified_blend_mode,
2564 non_segmented_blend_mode,
2565 batch_features,
2566 prim_header_index,
2567 bounding_rect,
2568 transform_kind,
2569 render_tasks,
2570 z_id,
2571 prim_info.clip_task_index,
2572 prim_vis_mask,
2573 ctx,
2574 );
2575 } else {
2576 let visible_tiles = &ctx.scratch.gradient_tiles[*visible_tiles_range];
2577
2578 self.add_gradient_tiles(
2579 visible_tiles,
2580 &prim_data.stops_handle,
2581 BrushBatchKind::ConicGradient,
2582 specified_blend_mode,
2583 bounding_rect,
2584 clip_task_address.unwrap(),
2585 gpu_cache,
2586 &prim_header,
2587 prim_headers,
2588 z_id,
2589 prim_vis_mask,
2590 );
2591 }
2592 }
2593 PrimitiveInstanceKind::Backdrop { data_handle } => {
2594 let prim_data = &ctx.data_stores.backdrop[data_handle];
2595 let backdrop_pic_index = prim_data.kind.pic_index;
2596 let backdrop_surface_index = ctx.prim_store.pictures[backdrop_pic_index.0]
2597 .raster_config
2598 .as_ref()
2599 .expect("backdrop surface should be alloc by now")
2600 .surface_index;
2601
2602 let backdrop_task_id = ctx.surfaces[backdrop_surface_index.0]
2603 .render_tasks
2604 .as_ref()
2605 .expect("backdrop task not available")
2606 .root;
2607
2608 let backdrop_uv_rect_address = render_tasks[backdrop_task_id]
2609 .get_texture_address(gpu_cache)
2610 .as_int();
2611
2612 let textures = BatchTextures::render_target_cache();
2613 let batch_key = BatchKey::new(
2614 BatchKind::Brush(BrushBatchKind::Image(ImageBufferKind::Texture2DArray)),
2615 BlendMode::PremultipliedAlpha,
2616 textures,
2617 );
2618
2619 let prim_cache_address = gpu_cache.get_address(&ctx.globals.default_image_handle);
2620 let backdrop_picture = &ctx.prim_store.pictures[backdrop_pic_index.0];
2621 let prim_header = PrimitiveHeader {
2622 local_rect: backdrop_picture.precise_local_rect,
2623 local_clip_rect: prim_info.combined_local_clip_rect,
2624 transform_id,
2625 specific_prim_address: prim_cache_address,
2626 };
2627
2628 let prim_header_index = prim_headers.push(
2629 &prim_header,
2630 z_id,
2631 ImageBrushData {
2632 color_mode: ShaderColorMode::Image,
2633 alpha_type: AlphaType::PremultipliedAlpha,
2634 raster_space: RasterizationSpace::Screen,
2635 opacity: 1.0,
2636 }.encode(),
2637 );
2638
2639 self.add_brush_instance_to_batches(
2640 batch_key,
2641 batch_features,
2642 bounding_rect,
2643 z_id,
2644 INVALID_SEGMENT_INDEX,
2645 EdgeAaSegmentMask::empty(),
2646 OPAQUE_TASK_ADDRESS,
2647 BrushFlags::empty(),
2648 prim_header_index,
2649 backdrop_uv_rect_address,
2650 prim_vis_mask,
2651 );
2652 }
2653 }
2654 }
2655
2656 /// Add a single segment instance to a batch.
add_segment_to_batch( &mut self, segment: &BrushSegment, segment_data: &SegmentInstanceData, segment_index: i32, batch_kind: BrushBatchKind, prim_header_index: PrimitiveHeaderIndex, alpha_blend_mode: BlendMode, features: BatchFeatures, bounding_rect: &PictureRect, transform_kind: TransformedRectKind, render_tasks: &RenderTaskGraph, z_id: ZBufferId, prim_opacity: PrimitiveOpacity, clip_task_index: ClipTaskIndex, prim_vis_mask: PrimitiveVisibilityMask, ctx: &RenderTargetContext, )2657 fn add_segment_to_batch(
2658 &mut self,
2659 segment: &BrushSegment,
2660 segment_data: &SegmentInstanceData,
2661 segment_index: i32,
2662 batch_kind: BrushBatchKind,
2663 prim_header_index: PrimitiveHeaderIndex,
2664 alpha_blend_mode: BlendMode,
2665 features: BatchFeatures,
2666 bounding_rect: &PictureRect,
2667 transform_kind: TransformedRectKind,
2668 render_tasks: &RenderTaskGraph,
2669 z_id: ZBufferId,
2670 prim_opacity: PrimitiveOpacity,
2671 clip_task_index: ClipTaskIndex,
2672 prim_vis_mask: PrimitiveVisibilityMask,
2673 ctx: &RenderTargetContext,
2674 ) {
2675 debug_assert!(clip_task_index != ClipTaskIndex::INVALID);
2676
2677 // Get GPU address of clip task for this segment, or None if
2678 // the entire segment is clipped out.
2679 let clip_task_address = match ctx.get_clip_task_address(
2680 clip_task_index,
2681 segment_index,
2682 render_tasks,
2683 ) {
2684 Some(clip_task_address) => clip_task_address,
2685 None => return,
2686 };
2687
2688 // If a got a valid (or OPAQUE) clip task address, add the segment.
2689 let is_inner = segment.edge_flags.is_empty();
2690 let needs_blending = !prim_opacity.is_opaque ||
2691 clip_task_address != OPAQUE_TASK_ADDRESS ||
2692 (!is_inner && transform_kind == TransformedRectKind::Complex);
2693
2694 let batch_key = BatchKey {
2695 blend_mode: if needs_blending { alpha_blend_mode } else { BlendMode::None },
2696 kind: BatchKind::Brush(batch_kind),
2697 textures: segment_data.textures,
2698 };
2699
2700 self.add_brush_instance_to_batches(
2701 batch_key,
2702 features,
2703 bounding_rect,
2704 z_id,
2705 segment_index,
2706 segment.edge_flags,
2707 clip_task_address,
2708 BrushFlags::PERSPECTIVE_INTERPOLATION | segment.brush_flags,
2709 prim_header_index,
2710 segment_data.specific_resource_address,
2711 prim_vis_mask,
2712 );
2713 }
2714
2715 /// Add any segment(s) from a brush to batches.
add_segmented_prim_to_batch( &mut self, brush_segments: Option<&[BrushSegment]>, prim_opacity: PrimitiveOpacity, params: &BrushBatchParameters, alpha_blend_mode: BlendMode, non_segmented_blend_mode: BlendMode, features: BatchFeatures, prim_header_index: PrimitiveHeaderIndex, bounding_rect: &PictureRect, transform_kind: TransformedRectKind, render_tasks: &RenderTaskGraph, z_id: ZBufferId, clip_task_index: ClipTaskIndex, prim_vis_mask: PrimitiveVisibilityMask, ctx: &RenderTargetContext, )2716 fn add_segmented_prim_to_batch(
2717 &mut self,
2718 brush_segments: Option<&[BrushSegment]>,
2719 prim_opacity: PrimitiveOpacity,
2720 params: &BrushBatchParameters,
2721 alpha_blend_mode: BlendMode,
2722 non_segmented_blend_mode: BlendMode,
2723 features: BatchFeatures,
2724 prim_header_index: PrimitiveHeaderIndex,
2725 bounding_rect: &PictureRect,
2726 transform_kind: TransformedRectKind,
2727 render_tasks: &RenderTaskGraph,
2728 z_id: ZBufferId,
2729 clip_task_index: ClipTaskIndex,
2730 prim_vis_mask: PrimitiveVisibilityMask,
2731 ctx: &RenderTargetContext,
2732 ) {
2733 match (brush_segments, ¶ms.segment_data) {
2734 (Some(ref brush_segments), SegmentDataKind::Instanced(ref segment_data)) => {
2735 // In this case, we have both a list of segments, and a list of
2736 // per-segment instance data. Zip them together to build batches.
2737 debug_assert_eq!(brush_segments.len(), segment_data.len());
2738 for (segment_index, (segment, segment_data)) in brush_segments
2739 .iter()
2740 .zip(segment_data.iter())
2741 .enumerate()
2742 {
2743 self.add_segment_to_batch(
2744 segment,
2745 segment_data,
2746 segment_index as i32,
2747 params.batch_kind,
2748 prim_header_index,
2749 alpha_blend_mode,
2750 features,
2751 bounding_rect,
2752 transform_kind,
2753 render_tasks,
2754 z_id,
2755 prim_opacity,
2756 clip_task_index,
2757 prim_vis_mask,
2758 ctx,
2759 );
2760 }
2761 }
2762 (Some(ref brush_segments), SegmentDataKind::Shared(ref segment_data)) => {
2763 // A list of segments, but the per-segment data is common
2764 // between all segments.
2765 for (segment_index, segment) in brush_segments
2766 .iter()
2767 .enumerate()
2768 {
2769 self.add_segment_to_batch(
2770 segment,
2771 segment_data,
2772 segment_index as i32,
2773 params.batch_kind,
2774 prim_header_index,
2775 alpha_blend_mode,
2776 features,
2777 bounding_rect,
2778 transform_kind,
2779 render_tasks,
2780 z_id,
2781 prim_opacity,
2782 clip_task_index,
2783 prim_vis_mask,
2784 ctx,
2785 );
2786 }
2787 }
2788 (None, SegmentDataKind::Shared(ref segment_data)) => {
2789 // No segments, and thus no per-segment instance data.
2790 // Note: the blend mode already takes opacity into account
2791 let batch_key = BatchKey {
2792 blend_mode: non_segmented_blend_mode,
2793 kind: BatchKind::Brush(params.batch_kind),
2794 textures: segment_data.textures,
2795 };
2796 let clip_task_address = ctx.get_prim_clip_task_address(
2797 clip_task_index,
2798 render_tasks,
2799 ).unwrap();
2800 self.add_brush_instance_to_batches(
2801 batch_key,
2802 features,
2803 bounding_rect,
2804 z_id,
2805 INVALID_SEGMENT_INDEX,
2806 EdgeAaSegmentMask::all(),
2807 clip_task_address,
2808 BrushFlags::PERSPECTIVE_INTERPOLATION,
2809 prim_header_index,
2810 segment_data.specific_resource_address,
2811 prim_vis_mask,
2812 );
2813 }
2814 (None, SegmentDataKind::Instanced(..)) => {
2815 // We should never hit the case where there are no segments,
2816 // but a list of segment instance data.
2817 unreachable!();
2818 }
2819 }
2820 }
2821
add_gradient_tiles( &mut self, visible_tiles: &[VisibleGradientTile], stops_handle: &GpuCacheHandle, kind: BrushBatchKind, blend_mode: BlendMode, bounding_rect: &PictureRect, clip_task_address: RenderTaskAddress, gpu_cache: &GpuCache, base_prim_header: &PrimitiveHeader, prim_headers: &mut PrimitiveHeaders, z_id: ZBufferId, prim_vis_mask: PrimitiveVisibilityMask, )2822 fn add_gradient_tiles(
2823 &mut self,
2824 visible_tiles: &[VisibleGradientTile],
2825 stops_handle: &GpuCacheHandle,
2826 kind: BrushBatchKind,
2827 blend_mode: BlendMode,
2828 bounding_rect: &PictureRect,
2829 clip_task_address: RenderTaskAddress,
2830 gpu_cache: &GpuCache,
2831 base_prim_header: &PrimitiveHeader,
2832 prim_headers: &mut PrimitiveHeaders,
2833 z_id: ZBufferId,
2834 prim_vis_mask: PrimitiveVisibilityMask,
2835 ) {
2836 let key = BatchKey {
2837 blend_mode,
2838 kind: BatchKind::Brush(kind),
2839 textures: BatchTextures::no_texture(),
2840 };
2841
2842 let user_data = [stops_handle.as_int(gpu_cache), 0, 0, 0];
2843
2844 for tile in visible_tiles {
2845 let prim_header = PrimitiveHeader {
2846 specific_prim_address: gpu_cache.get_address(&tile.handle),
2847 local_rect: tile.local_rect,
2848 local_clip_rect: tile.local_clip_rect,
2849 ..*base_prim_header
2850 };
2851 let prim_header_index = prim_headers.push(&prim_header, z_id, user_data);
2852
2853 self.add_brush_instance_to_batches(
2854 key,
2855 BatchFeatures::empty(),
2856 bounding_rect,
2857 z_id,
2858 INVALID_SEGMENT_INDEX,
2859 EdgeAaSegmentMask::all(),
2860 clip_task_address,
2861 BrushFlags::PERSPECTIVE_INTERPOLATION,
2862 prim_header_index,
2863 0,
2864 prim_vis_mask,
2865 );
2866 }
2867 }
2868 }
2869
get_image_tile_params( resource_cache: &ResourceCache, gpu_cache: &mut GpuCache, deferred_resolves: &mut Vec<DeferredResolve>, request: ImageRequest, ) -> Option<(BrushBatchKind, BatchTextures, GpuCacheAddress)>2870 fn get_image_tile_params(
2871 resource_cache: &ResourceCache,
2872 gpu_cache: &mut GpuCache,
2873 deferred_resolves: &mut Vec<DeferredResolve>,
2874 request: ImageRequest,
2875 ) -> Option<(BrushBatchKind, BatchTextures, GpuCacheAddress)> {
2876
2877 let cache_item = resolve_image(
2878 request,
2879 resource_cache,
2880 gpu_cache,
2881 deferred_resolves,
2882 );
2883
2884 if cache_item.texture_id == TextureSource::Invalid {
2885 None
2886 } else {
2887 let textures = BatchTextures::color(cache_item.texture_id);
2888 Some((
2889 BrushBatchKind::Image(get_buffer_kind(cache_item.texture_id)),
2890 textures,
2891 gpu_cache.get_address(&cache_item.uv_rect_handle),
2892 ))
2893 }
2894 }
2895
2896 /// Either a single texture / user data for all segments,
2897 /// or a list of one per segment.
2898 enum SegmentDataKind {
2899 Shared(SegmentInstanceData),
2900 Instanced(SmallVec<[SegmentInstanceData; 8]>),
2901 }
2902
2903 /// The parameters that are specific to a kind of brush,
2904 /// used by the common method to add a brush to batches.
2905 struct BrushBatchParameters {
2906 batch_kind: BrushBatchKind,
2907 prim_user_data: [i32; 4],
2908 segment_data: SegmentDataKind,
2909 }
2910
2911 impl BrushBatchParameters {
2912 /// This brush instance has a list of per-segment
2913 /// instance data.
instanced( batch_kind: BrushBatchKind, prim_user_data: [i32; 4], segment_data: SmallVec<[SegmentInstanceData; 8]>, ) -> Self2914 fn instanced(
2915 batch_kind: BrushBatchKind,
2916 prim_user_data: [i32; 4],
2917 segment_data: SmallVec<[SegmentInstanceData; 8]>,
2918 ) -> Self {
2919 BrushBatchParameters {
2920 batch_kind,
2921 prim_user_data,
2922 segment_data: SegmentDataKind::Instanced(segment_data),
2923 }
2924 }
2925
2926 /// This brush instance shares the per-segment data
2927 /// across all segments.
shared( batch_kind: BrushBatchKind, textures: BatchTextures, prim_user_data: [i32; 4], specific_resource_address: i32, ) -> Self2928 fn shared(
2929 batch_kind: BrushBatchKind,
2930 textures: BatchTextures,
2931 prim_user_data: [i32; 4],
2932 specific_resource_address: i32,
2933 ) -> Self {
2934 BrushBatchParameters {
2935 batch_kind,
2936 prim_user_data,
2937 segment_data: SegmentDataKind::Shared(
2938 SegmentInstanceData {
2939 textures,
2940 specific_resource_address,
2941 }
2942 ),
2943 }
2944 }
2945 }
2946
2947 impl RenderTaskGraph {
resolve_surface( &self, task_id: RenderTaskId, gpu_cache: &GpuCache, ) -> (GpuCacheAddress, BatchTextures)2948 fn resolve_surface(
2949 &self,
2950 task_id: RenderTaskId,
2951 gpu_cache: &GpuCache,
2952 ) -> (GpuCacheAddress, BatchTextures) {
2953 (
2954 self[task_id].get_texture_address(gpu_cache),
2955 BatchTextures::render_target_cache(),
2956 )
2957 }
2958 }
2959
resolve_image( request: ImageRequest, resource_cache: &ResourceCache, gpu_cache: &mut GpuCache, deferred_resolves: &mut Vec<DeferredResolve>, ) -> CacheItem2960 pub fn resolve_image(
2961 request: ImageRequest,
2962 resource_cache: &ResourceCache,
2963 gpu_cache: &mut GpuCache,
2964 deferred_resolves: &mut Vec<DeferredResolve>,
2965 ) -> CacheItem {
2966 match resource_cache.get_image_properties(request.key) {
2967 Some(image_properties) => {
2968 // Check if an external image that needs to be resolved
2969 // by the render thread.
2970 match image_properties.external_image {
2971 Some(external_image) => {
2972 // This is an external texture - we will add it to
2973 // the deferred resolves list to be patched by
2974 // the render thread...
2975 let cache_handle = gpu_cache.push_deferred_per_frame_blocks(BLOCKS_PER_UV_RECT);
2976 let cache_item = CacheItem {
2977 texture_id: TextureSource::External(external_image),
2978 uv_rect_handle: cache_handle,
2979 uv_rect: DeviceIntRect::new(
2980 DeviceIntPoint::zero(),
2981 image_properties.descriptor.size,
2982 ),
2983 texture_layer: 0,
2984 };
2985
2986 deferred_resolves.push(DeferredResolve {
2987 image_properties,
2988 address: gpu_cache.get_address(&cache_handle),
2989 rendering: request.rendering,
2990 });
2991
2992 cache_item
2993 }
2994 None => {
2995 if let Ok(cache_item) = resource_cache.get_cached_image(request) {
2996 cache_item
2997 } else {
2998 // There is no usable texture entry for the image key. Just return an invalid texture here.
2999 CacheItem::invalid()
3000 }
3001 }
3002 }
3003 }
3004 None => {
3005 CacheItem::invalid()
3006 }
3007 }
3008 }
3009
3010 /// A list of clip instances to be drawn into a target.
3011 #[derive(Debug)]
3012 #[cfg_attr(feature = "capture", derive(Serialize))]
3013 #[cfg_attr(feature = "replay", derive(Deserialize))]
3014 pub struct ClipBatchList {
3015 /// Rectangle draws fill up the rectangles with rounded corners.
3016 pub slow_rectangles: Vec<ClipMaskInstance>,
3017 pub fast_rectangles: Vec<ClipMaskInstance>,
3018 /// Image draws apply the image masking.
3019 pub images: FastHashMap<TextureSource, Vec<ClipMaskInstance>>,
3020 pub box_shadows: FastHashMap<TextureSource, Vec<ClipMaskInstance>>,
3021 }
3022
3023 impl ClipBatchList {
new() -> Self3024 fn new() -> Self {
3025 ClipBatchList {
3026 slow_rectangles: Vec::new(),
3027 fast_rectangles: Vec::new(),
3028 images: FastHashMap::default(),
3029 box_shadows: FastHashMap::default(),
3030 }
3031 }
3032 }
3033
3034 /// Batcher managing draw calls into the clip mask (in the RT cache).
3035 #[derive(Debug)]
3036 #[cfg_attr(feature = "capture", derive(Serialize))]
3037 #[cfg_attr(feature = "replay", derive(Deserialize))]
3038 pub struct ClipBatcher {
3039 /// The first clip in each clip task. This will overwrite all pixels
3040 /// in the clip region, so we can skip doing a clear and write with
3041 /// blending disabled, which is a big performance win on Intel GPUs.
3042 pub primary_clips: ClipBatchList,
3043 /// Any subsequent clip masks (rare) for a clip task get drawn in
3044 /// a second pass with multiplicative blending enabled.
3045 pub secondary_clips: ClipBatchList,
3046
3047 gpu_supports_fast_clears: bool,
3048 }
3049
3050 impl ClipBatcher {
new( gpu_supports_fast_clears: bool, ) -> Self3051 pub fn new(
3052 gpu_supports_fast_clears: bool,
3053 ) -> Self {
3054 ClipBatcher {
3055 primary_clips: ClipBatchList::new(),
3056 secondary_clips: ClipBatchList::new(),
3057 gpu_supports_fast_clears,
3058 }
3059 }
3060
add_clip_region( &mut self, clip_data_address: GpuCacheAddress, local_pos: LayoutPoint, sub_rect: DeviceRect, task_origin: DevicePoint, screen_origin: DevicePoint, device_pixel_scale: f32, )3061 pub fn add_clip_region(
3062 &mut self,
3063 clip_data_address: GpuCacheAddress,
3064 local_pos: LayoutPoint,
3065 sub_rect: DeviceRect,
3066 task_origin: DevicePoint,
3067 screen_origin: DevicePoint,
3068 device_pixel_scale: f32,
3069 ) {
3070 let instance = ClipMaskInstance {
3071 clip_transform_id: TransformPaletteId::IDENTITY,
3072 prim_transform_id: TransformPaletteId::IDENTITY,
3073 clip_data_address,
3074 resource_address: GpuCacheAddress::INVALID,
3075 local_pos,
3076 tile_rect: LayoutRect::zero(),
3077 sub_rect,
3078 task_origin,
3079 screen_origin,
3080 device_pixel_scale,
3081 };
3082
3083 self.primary_clips.slow_rectangles.push(instance);
3084 }
3085
3086 /// Where appropriate, draw a clip rectangle as a small series of tiles,
3087 /// instead of one large rectangle.
add_tiled_clip_mask( &mut self, mask_screen_rect: DeviceIntRect, local_clip_rect: LayoutRect, clip_spatial_node_index: SpatialNodeIndex, spatial_tree: &SpatialTree, world_rect: &WorldRect, device_pixel_scale: DevicePixelScale, gpu_address: GpuCacheAddress, instance: &ClipMaskInstance, is_first_clip: bool, ) -> bool3088 fn add_tiled_clip_mask(
3089 &mut self,
3090 mask_screen_rect: DeviceIntRect,
3091 local_clip_rect: LayoutRect,
3092 clip_spatial_node_index: SpatialNodeIndex,
3093 spatial_tree: &SpatialTree,
3094 world_rect: &WorldRect,
3095 device_pixel_scale: DevicePixelScale,
3096 gpu_address: GpuCacheAddress,
3097 instance: &ClipMaskInstance,
3098 is_first_clip: bool,
3099 ) -> bool {
3100 // Only try to draw in tiles if the clip mark is big enough.
3101 if mask_screen_rect.area() < CLIP_RECTANGLE_AREA_THRESHOLD {
3102 return false;
3103 }
3104
3105 let clip_spatial_node = &spatial_tree
3106 .spatial_nodes[clip_spatial_node_index.0 as usize];
3107
3108 // Only support clips that are axis-aligned to the root coordinate space,
3109 // for now, to simplify the logic below. This handles the vast majority
3110 // of real world cases, but could be expanded in future if needed.
3111 if clip_spatial_node.coordinate_system_id != CoordinateSystemId::root() {
3112 return false;
3113 }
3114
3115 // Get the world rect of the clip rectangle. If we can't transform it due
3116 // to the matrix, just fall back to drawing the entire clip mask.
3117 let transform = spatial_tree.get_world_transform(
3118 clip_spatial_node_index,
3119 );
3120 let world_clip_rect = match project_rect(
3121 &transform.into_transform(),
3122 &local_clip_rect,
3123 world_rect,
3124 ) {
3125 Some(rect) => rect,
3126 None => return false,
3127 };
3128
3129 // Work out how many tiles to draw this clip mask in, stretched across the
3130 // device rect of the primitive clip mask.
3131 let world_device_rect = world_clip_rect * device_pixel_scale;
3132 let x_tiles = (mask_screen_rect.size.width + CLIP_RECTANGLE_TILE_SIZE-1) / CLIP_RECTANGLE_TILE_SIZE;
3133 let y_tiles = (mask_screen_rect.size.height + CLIP_RECTANGLE_TILE_SIZE-1) / CLIP_RECTANGLE_TILE_SIZE;
3134
3135 // Because we only run this code path for axis-aligned rects (the root coord system check above),
3136 // and only for rectangles (not rounded etc), the world_device_rect is not conservative - we know
3137 // that there is no inner_rect, and the world_device_rect should be the real, axis-aligned clip rect.
3138 let mask_origin = mask_screen_rect.origin.to_f32().to_vector();
3139 let clip_list = self.get_batch_list(is_first_clip);
3140
3141 for y in 0 .. y_tiles {
3142 for x in 0 .. x_tiles {
3143 let p0 = DeviceIntPoint::new(
3144 x * CLIP_RECTANGLE_TILE_SIZE,
3145 y * CLIP_RECTANGLE_TILE_SIZE,
3146 );
3147 let p1 = DeviceIntPoint::new(
3148 (p0.x + CLIP_RECTANGLE_TILE_SIZE).min(mask_screen_rect.size.width),
3149 (p0.y + CLIP_RECTANGLE_TILE_SIZE).min(mask_screen_rect.size.height),
3150 );
3151 let normalized_sub_rect = DeviceIntRect::new(
3152 p0,
3153 DeviceIntSize::new(
3154 p1.x - p0.x,
3155 p1.y - p0.y,
3156 ),
3157 ).to_f32();
3158 let world_sub_rect = normalized_sub_rect.translate(mask_origin);
3159
3160 // If the clip rect completely contains this tile rect, then drawing
3161 // these pixels would be redundant - since this clip can't possibly
3162 // affect the pixels in this tile, skip them!
3163 if !world_device_rect.contains_rect(&world_sub_rect) {
3164 clip_list.slow_rectangles.push(ClipMaskInstance {
3165 clip_data_address: gpu_address,
3166 sub_rect: normalized_sub_rect,
3167 local_pos: local_clip_rect.origin,
3168 ..*instance
3169 });
3170 }
3171 }
3172 }
3173
3174 true
3175 }
3176
3177 /// Retrieve the correct clip batch list to append to, depending
3178 /// on whether this is the first clip mask for a clip task.
get_batch_list( &mut self, is_first_clip: bool, ) -> &mut ClipBatchList3179 fn get_batch_list(
3180 &mut self,
3181 is_first_clip: bool,
3182 ) -> &mut ClipBatchList {
3183 if is_first_clip && !self.gpu_supports_fast_clears {
3184 &mut self.primary_clips
3185 } else {
3186 &mut self.secondary_clips
3187 }
3188 }
3189
add( &mut self, clip_node_range: ClipNodeRange, root_spatial_node_index: SpatialNodeIndex, resource_cache: &ResourceCache, gpu_cache: &GpuCache, clip_store: &ClipStore, spatial_tree: &SpatialTree, transforms: &mut TransformPalette, clip_data_store: &ClipDataStore, actual_rect: DeviceIntRect, world_rect: &WorldRect, device_pixel_scale: DevicePixelScale, task_origin: DevicePoint, screen_origin: DevicePoint, )3190 pub fn add(
3191 &mut self,
3192 clip_node_range: ClipNodeRange,
3193 root_spatial_node_index: SpatialNodeIndex,
3194 resource_cache: &ResourceCache,
3195 gpu_cache: &GpuCache,
3196 clip_store: &ClipStore,
3197 spatial_tree: &SpatialTree,
3198 transforms: &mut TransformPalette,
3199 clip_data_store: &ClipDataStore,
3200 actual_rect: DeviceIntRect,
3201 world_rect: &WorldRect,
3202 device_pixel_scale: DevicePixelScale,
3203 task_origin: DevicePoint,
3204 screen_origin: DevicePoint,
3205 ) {
3206 let mut is_first_clip = true;
3207
3208 for i in 0 .. clip_node_range.count {
3209 let clip_instance = clip_store.get_instance_from_range(&clip_node_range, i);
3210 let clip_node = &clip_data_store[clip_instance.handle];
3211
3212 let clip_transform_id = transforms.get_id(
3213 clip_instance.spatial_node_index,
3214 ROOT_SPATIAL_NODE_INDEX,
3215 spatial_tree,
3216 );
3217
3218 let prim_transform_id = transforms.get_id(
3219 root_spatial_node_index,
3220 ROOT_SPATIAL_NODE_INDEX,
3221 spatial_tree,
3222 );
3223
3224 let instance = ClipMaskInstance {
3225 clip_transform_id,
3226 prim_transform_id,
3227 clip_data_address: GpuCacheAddress::INVALID,
3228 resource_address: GpuCacheAddress::INVALID,
3229 local_pos: LayoutPoint::zero(),
3230 tile_rect: LayoutRect::zero(),
3231 sub_rect: DeviceRect::new(
3232 DevicePoint::zero(),
3233 actual_rect.size.to_f32(),
3234 ),
3235 task_origin,
3236 screen_origin,
3237 device_pixel_scale: device_pixel_scale.0,
3238 };
3239
3240 let added_clip = match clip_node.item.kind {
3241 ClipItemKind::Image { image, rect, .. } => {
3242 let request = ImageRequest {
3243 key: image,
3244 rendering: ImageRendering::Auto,
3245 tile: None,
3246 };
3247
3248 let clip_data_address =
3249 gpu_cache.get_address(&clip_node.gpu_cache_handle);
3250
3251 let mut add_image = |request: ImageRequest, local_tile_rect: LayoutRect| {
3252 let cache_item = match resource_cache.get_cached_image(request) {
3253 Ok(item) => item,
3254 Err(..) => {
3255 warn!("Warnings: skip a image mask");
3256 debug!("request: {:?}", request);
3257 return;
3258 }
3259 };
3260
3261 self.get_batch_list(is_first_clip)
3262 .images
3263 .entry(cache_item.texture_id)
3264 .or_insert_with(Vec::new)
3265 .push(ClipMaskInstance {
3266 clip_data_address,
3267 resource_address: gpu_cache.get_address(&cache_item.uv_rect_handle),
3268 tile_rect: local_tile_rect,
3269 local_pos: rect.origin,
3270 ..instance
3271 });
3272 };
3273
3274 match clip_instance.visible_tiles {
3275 Some(ref tiles) => {
3276 for tile in tiles {
3277 add_image(
3278 request.with_tile(tile.tile_offset),
3279 tile.tile_rect,
3280 )
3281 }
3282 }
3283 None => {
3284 add_image(request, rect)
3285 }
3286 }
3287
3288 true
3289 }
3290 ClipItemKind::BoxShadow { ref source } => {
3291 let gpu_address =
3292 gpu_cache.get_address(&clip_node.gpu_cache_handle);
3293 let rt_handle = source
3294 .cache_handle
3295 .as_ref()
3296 .expect("bug: render task handle not allocated");
3297 let rt_cache_entry = resource_cache
3298 .get_cached_render_task(rt_handle);
3299 let cache_item = resource_cache
3300 .get_texture_cache_item(&rt_cache_entry.handle);
3301 debug_assert_ne!(cache_item.texture_id, TextureSource::Invalid);
3302
3303 self.get_batch_list(is_first_clip)
3304 .box_shadows
3305 .entry(cache_item.texture_id)
3306 .or_insert_with(Vec::new)
3307 .push(ClipMaskInstance {
3308 clip_data_address: gpu_address,
3309 resource_address: gpu_cache.get_address(&cache_item.uv_rect_handle),
3310 ..instance
3311 });
3312
3313 true
3314 }
3315 ClipItemKind::Rectangle { rect, mode: ClipMode::ClipOut } => {
3316 let gpu_address =
3317 gpu_cache.get_address(&clip_node.gpu_cache_handle);
3318 self.get_batch_list(is_first_clip)
3319 .slow_rectangles
3320 .push(ClipMaskInstance {
3321 local_pos: rect.origin,
3322 clip_data_address: gpu_address,
3323 ..instance
3324 });
3325
3326 true
3327 }
3328 ClipItemKind::Rectangle { rect, mode: ClipMode::Clip } => {
3329 if clip_instance.flags.contains(ClipNodeFlags::SAME_COORD_SYSTEM) {
3330 false
3331 } else {
3332 let gpu_address = gpu_cache.get_address(&clip_node.gpu_cache_handle);
3333
3334 if !self.add_tiled_clip_mask(
3335 actual_rect,
3336 rect,
3337 clip_instance.spatial_node_index,
3338 spatial_tree,
3339 world_rect,
3340 device_pixel_scale,
3341 gpu_address,
3342 &instance,
3343 is_first_clip,
3344 ) {
3345 self.get_batch_list(is_first_clip)
3346 .slow_rectangles
3347 .push(ClipMaskInstance {
3348 clip_data_address: gpu_address,
3349 local_pos: rect.origin,
3350 ..instance
3351 });
3352 }
3353
3354 true
3355 }
3356 }
3357 ClipItemKind::RoundedRectangle { rect, .. } => {
3358 let gpu_address =
3359 gpu_cache.get_address(&clip_node.gpu_cache_handle);
3360 let batch_list = self.get_batch_list(is_first_clip);
3361 let instance = ClipMaskInstance {
3362 clip_data_address: gpu_address,
3363 local_pos: rect.origin,
3364 ..instance
3365 };
3366 if clip_instance.flags.contains(ClipNodeFlags::USE_FAST_PATH) {
3367 batch_list.fast_rectangles.push(instance);
3368 } else {
3369 batch_list.slow_rectangles.push(instance);
3370 }
3371
3372 true
3373 }
3374 };
3375
3376 is_first_clip &= !added_clip;
3377 }
3378 }
3379 }
3380
3381 // TODO(gw): This should probably be a method on TextureSource
get_buffer_kind(texture: TextureSource) -> ImageBufferKind3382 pub fn get_buffer_kind(texture: TextureSource) -> ImageBufferKind {
3383 match texture {
3384 TextureSource::External(ext_image) => {
3385 match ext_image.image_type {
3386 ExternalImageType::TextureHandle(target) => {
3387 target.into()
3388 }
3389 ExternalImageType::Buffer => {
3390 // The ExternalImageType::Buffer should be handled by resource_cache.
3391 // It should go through the non-external case.
3392 panic!("Unexpected non-texture handle type");
3393 }
3394 }
3395 }
3396 _ => ImageBufferKind::Texture2DArray,
3397 }
3398 }
3399
3400 impl<'a, 'rc> RenderTargetContext<'a, 'rc> {
3401 /// Retrieve the GPU task address for a given clip task instance.
3402 /// Returns None if the segment was completely clipped out.
3403 /// Returns Some(OPAQUE_TASK_ADDRESS) if no clip mask is needed.
3404 /// Returns Some(task_address) if there was a valid clip mask.
get_clip_task_address( &self, clip_task_index: ClipTaskIndex, offset: i32, render_tasks: &RenderTaskGraph, ) -> Option<RenderTaskAddress>3405 fn get_clip_task_address(
3406 &self,
3407 clip_task_index: ClipTaskIndex,
3408 offset: i32,
3409 render_tasks: &RenderTaskGraph,
3410 ) -> Option<RenderTaskAddress> {
3411 let address = match self.scratch.clip_mask_instances[clip_task_index.0 as usize + offset as usize] {
3412 ClipMaskKind::Mask(task_id) => {
3413 render_tasks.get_task_address(task_id)
3414 }
3415 ClipMaskKind::None => {
3416 OPAQUE_TASK_ADDRESS
3417 }
3418 ClipMaskKind::Clipped => {
3419 return None;
3420 }
3421 };
3422
3423 Some(address)
3424 }
3425
3426 /// Helper function to get the clip task address for a
3427 /// non-segmented primitive.
get_prim_clip_task_address( &self, clip_task_index: ClipTaskIndex, render_tasks: &RenderTaskGraph, ) -> Option<RenderTaskAddress>3428 fn get_prim_clip_task_address(
3429 &self,
3430 clip_task_index: ClipTaskIndex,
3431 render_tasks: &RenderTaskGraph,
3432 ) -> Option<RenderTaskAddress> {
3433 self.get_clip_task_address(
3434 clip_task_index,
3435 0,
3436 render_tasks,
3437 )
3438 }
3439 }
3440