1 /* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
4
5 use api::{AlphaType, ClipMode, DeviceIntRect, DeviceIntPoint, DeviceIntSize, WorldRect};
6 use api::{ExternalImageType, FilterOp, ImageRendering, LayoutRect, DeviceRect, DevicePixelScale};
7 use api::{YuvColorSpace, YuvFormat, PictureRect, ColorDepth, LayoutPoint, DevicePoint, LayoutSize};
8 use api::{PremultipliedColorF};
9 use clip::{ClipDataStore, ClipNodeFlags, ClipNodeRange, ClipItem, ClipStore, ClipNodeInstance};
10 use clip_scroll_tree::{ClipScrollTree, ROOT_SPATIAL_NODE_INDEX, SpatialNodeIndex, CoordinateSystemId};
11 use glyph_rasterizer::GlyphFormat;
12 use gpu_cache::{GpuBlockData, GpuCache, GpuCacheHandle, GpuCacheAddress};
13 use gpu_types::{BrushFlags, BrushInstance, PrimitiveHeaders, ZBufferId, ZBufferIdGenerator};
14 use gpu_types::{ClipMaskInstance, SplitCompositeInstance, SnapOffsets};
15 use gpu_types::{PrimitiveInstanceData, RasterizationSpace, GlyphInstance};
16 use gpu_types::{PrimitiveHeader, PrimitiveHeaderIndex, TransformPaletteId, TransformPalette};
17 use internal_types::{FastHashMap, SavedTargetIndex, TextureSource};
18 use picture::{Picture3DContext, PictureCompositeMode, PicturePrimitive, PictureSurface};
19 use prim_store::{DeferredResolve, EdgeAaSegmentMask, PrimitiveInstanceKind, PrimitiveVisibilityIndex};
20 use prim_store::{VisibleGradientTile, PrimitiveInstance, PrimitiveOpacity, SegmentInstanceIndex};
21 use prim_store::{BrushSegment, ClipMaskKind, ClipTaskIndex};
22 use prim_store::image::ImageSource;
23 use render_backend::DataStores;
24 use render_task::{RenderTaskAddress, RenderTaskId, RenderTaskTree, TileBlit};
25 use renderer::{BlendMode, ImageBufferKind, ShaderColorMode};
26 use renderer::{BLOCKS_PER_UV_RECT, MAX_VERTEX_TEXTURE_WIDTH};
27 use resource_cache::{CacheItem, GlyphFetchResult, ImageRequest, ResourceCache, ImageProperties};
28 use scene::FilterOpHelpers;
29 use smallvec::SmallVec;
30 use std::{f32, i32, usize};
31 use tiling::{RenderTargetContext};
32 use util::{project_rect, TransformedRectKind};
33
34 // Special sentinel value recognized by the shader. It is considered to be
35 // a dummy task that doesn't mask out anything.
36 const OPAQUE_TASK_ADDRESS: RenderTaskAddress = RenderTaskAddress(0x7fff);
37
38 /// Used to signal there are no segments provided with this primitive.
39 const INVALID_SEGMENT_INDEX: i32 = 0xffff;
40
41 /// Size in device pixels for tiles that clip masks are drawn in.
42 const CLIP_RECTANGLE_TILE_SIZE: i32 = 128;
43
44 /// The minimum size of a clip mask before trying to draw in tiles.
45 const CLIP_RECTANGLE_AREA_THRESHOLD: i32 = CLIP_RECTANGLE_TILE_SIZE * CLIP_RECTANGLE_TILE_SIZE * 4;
46
47 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
48 #[cfg_attr(feature = "capture", derive(Serialize))]
49 #[cfg_attr(feature = "replay", derive(Deserialize))]
50 pub enum BrushBatchKind {
51 Solid,
52 Image(ImageBufferKind),
53 Blend,
54 MixBlend {
55 task_id: RenderTaskId,
56 source_id: RenderTaskId,
57 backdrop_id: RenderTaskId,
58 },
59 YuvImage(ImageBufferKind, YuvFormat, ColorDepth, YuvColorSpace),
60 RadialGradient,
61 LinearGradient,
62 }
63
64 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
65 #[cfg_attr(feature = "capture", derive(Serialize))]
66 #[cfg_attr(feature = "replay", derive(Deserialize))]
67 pub enum BatchKind {
68 SplitComposite,
69 TextRun(GlyphFormat),
70 Brush(BrushBatchKind),
71 }
72
73 /// Optional textures that can be used as a source in the shaders.
74 /// Textures that are not used by the batch are equal to TextureId::invalid().
75 #[derive(Copy, Clone, Debug)]
76 #[cfg_attr(feature = "capture", derive(Serialize))]
77 #[cfg_attr(feature = "replay", derive(Deserialize))]
78 pub struct BatchTextures {
79 pub colors: [TextureSource; 3],
80 }
81
82 impl BatchTextures {
no_texture() -> Self83 pub fn no_texture() -> Self {
84 BatchTextures {
85 colors: [TextureSource::Invalid; 3],
86 }
87 }
88
render_target_cache() -> Self89 pub fn render_target_cache() -> Self {
90 BatchTextures {
91 colors: [
92 TextureSource::PrevPassColor,
93 TextureSource::PrevPassAlpha,
94 TextureSource::Invalid,
95 ],
96 }
97 }
98
color(texture: TextureSource) -> Self99 pub fn color(texture: TextureSource) -> Self {
100 BatchTextures {
101 colors: [texture, texture, TextureSource::Invalid],
102 }
103 }
104 }
105
106 #[derive(Copy, Clone, Debug)]
107 #[cfg_attr(feature = "capture", derive(Serialize))]
108 #[cfg_attr(feature = "replay", derive(Deserialize))]
109 pub struct BatchKey {
110 pub kind: BatchKind,
111 pub blend_mode: BlendMode,
112 pub textures: BatchTextures,
113 }
114
115 impl BatchKey {
new(kind: BatchKind, blend_mode: BlendMode, textures: BatchTextures) -> Self116 pub fn new(kind: BatchKind, blend_mode: BlendMode, textures: BatchTextures) -> Self {
117 BatchKey {
118 kind,
119 blend_mode,
120 textures,
121 }
122 }
123
is_compatible_with(&self, other: &BatchKey) -> bool124 pub fn is_compatible_with(&self, other: &BatchKey) -> bool {
125 self.kind == other.kind && self.blend_mode == other.blend_mode &&
126 textures_compatible(self.textures.colors[0], other.textures.colors[0]) &&
127 textures_compatible(self.textures.colors[1], other.textures.colors[1]) &&
128 textures_compatible(self.textures.colors[2], other.textures.colors[2])
129 }
130 }
131
132 #[inline]
textures_compatible(t1: TextureSource, t2: TextureSource) -> bool133 fn textures_compatible(t1: TextureSource, t2: TextureSource) -> bool {
134 t1 == TextureSource::Invalid || t2 == TextureSource::Invalid || t1 == t2
135 }
136
137 pub struct AlphaBatchList {
138 pub batches: Vec<PrimitiveBatch>,
139 pub item_rects: Vec<Vec<PictureRect>>,
140 current_batch_index: usize,
141 current_z_id: ZBufferId,
142 }
143
144 impl AlphaBatchList {
new() -> Self145 fn new() -> Self {
146 AlphaBatchList {
147 batches: Vec::new(),
148 item_rects: Vec::new(),
149 current_z_id: ZBufferId::invalid(),
150 current_batch_index: usize::MAX,
151 }
152 }
153
set_params_and_get_batch( &mut self, key: BatchKey, bounding_rect: &PictureRect, z_id: ZBufferId, ) -> &mut Vec<PrimitiveInstanceData>154 pub fn set_params_and_get_batch(
155 &mut self,
156 key: BatchKey,
157 bounding_rect: &PictureRect,
158 z_id: ZBufferId,
159 ) -> &mut Vec<PrimitiveInstanceData> {
160 if z_id != self.current_z_id ||
161 self.current_batch_index == usize::MAX ||
162 !self.batches[self.current_batch_index].key.is_compatible_with(&key)
163 {
164 let mut selected_batch_index = None;
165
166 match key.blend_mode {
167 BlendMode::SubpixelWithBgColor => {
168 'outer_multipass: for (batch_index, batch) in self.batches.iter().enumerate().rev().take(10) {
169 // Some subpixel batches are drawn in two passes. Because of this, we need
170 // to check for overlaps with every batch (which is a bit different
171 // than the normal batching below).
172 for item_rect in &self.item_rects[batch_index] {
173 if item_rect.intersects(bounding_rect) {
174 break 'outer_multipass;
175 }
176 }
177
178 if batch.key.is_compatible_with(&key) {
179 selected_batch_index = Some(batch_index);
180 break;
181 }
182 }
183 }
184 _ => {
185 'outer_default: for (batch_index, batch) in self.batches.iter().enumerate().rev().take(10) {
186 // For normal batches, we only need to check for overlaps for batches
187 // other than the first batch we consider. If the first batch
188 // is compatible, then we know there isn't any potential overlap
189 // issues to worry about.
190 if batch.key.is_compatible_with(&key) {
191 selected_batch_index = Some(batch_index);
192 break;
193 }
194
195 // check for intersections
196 for item_rect in &self.item_rects[batch_index] {
197 if item_rect.intersects(bounding_rect) {
198 break 'outer_default;
199 }
200 }
201 }
202 }
203 }
204
205 if selected_batch_index.is_none() {
206 let new_batch = PrimitiveBatch::new(key);
207 selected_batch_index = Some(self.batches.len());
208 self.batches.push(new_batch);
209 self.item_rects.push(Vec::new());
210 }
211
212 self.current_batch_index = selected_batch_index.unwrap();
213 self.item_rects[self.current_batch_index].push(*bounding_rect);
214 self.current_z_id = z_id;
215 }
216
217 &mut self.batches[self.current_batch_index].instances
218 }
219 }
220
221 pub struct OpaqueBatchList {
222 pub pixel_area_threshold_for_new_batch: f32,
223 pub batches: Vec<PrimitiveBatch>,
224 pub current_batch_index: usize,
225 }
226
227 impl OpaqueBatchList {
new(pixel_area_threshold_for_new_batch: f32) -> Self228 fn new(pixel_area_threshold_for_new_batch: f32) -> Self {
229 OpaqueBatchList {
230 batches: Vec::new(),
231 pixel_area_threshold_for_new_batch,
232 current_batch_index: usize::MAX,
233 }
234 }
235
set_params_and_get_batch( &mut self, key: BatchKey, bounding_rect: &PictureRect, ) -> &mut Vec<PrimitiveInstanceData>236 pub fn set_params_and_get_batch(
237 &mut self,
238 key: BatchKey,
239 bounding_rect: &PictureRect,
240 ) -> &mut Vec<PrimitiveInstanceData> {
241 if self.current_batch_index == usize::MAX ||
242 !self.batches[self.current_batch_index].key.is_compatible_with(&key) {
243 let mut selected_batch_index = None;
244 let item_area = bounding_rect.size.area();
245
246 // If the area of this primitive is larger than the given threshold,
247 // then it is large enough to warrant breaking a batch for. In this
248 // case we just see if it can be added to the existing batch or
249 // create a new one.
250 if item_area > self.pixel_area_threshold_for_new_batch {
251 if let Some(batch) = self.batches.last() {
252 if batch.key.is_compatible_with(&key) {
253 selected_batch_index = Some(self.batches.len() - 1);
254 }
255 }
256 } else {
257 // Otherwise, look back through a reasonable number of batches.
258 for (batch_index, batch) in self.batches.iter().enumerate().rev().take(10) {
259 if batch.key.is_compatible_with(&key) {
260 selected_batch_index = Some(batch_index);
261 break;
262 }
263 }
264 }
265
266 if selected_batch_index.is_none() {
267 let new_batch = PrimitiveBatch::new(key);
268 selected_batch_index = Some(self.batches.len());
269 self.batches.push(new_batch);
270 }
271
272 self.current_batch_index = selected_batch_index.unwrap();
273 }
274
275 &mut self.batches[self.current_batch_index].instances
276 }
277
finalize(&mut self)278 fn finalize(&mut self) {
279 // Reverse the instance arrays in the opaque batches
280 // to get maximum z-buffer efficiency by drawing
281 // front-to-back.
282 // TODO(gw): Maybe we can change the batch code to
283 // build these in reverse and avoid having
284 // to reverse the instance array here.
285 for batch in &mut self.batches {
286 batch.instances.reverse();
287 }
288 }
289 }
290
291 pub struct BatchList {
292 pub alpha_batch_list: AlphaBatchList,
293 pub opaque_batch_list: OpaqueBatchList,
294 /// A list of rectangle regions this batch should be drawn
295 /// in. Each region will have scissor rect set before drawing.
296 pub regions: Vec<DeviceIntRect>,
297 pub tile_blits: Vec<TileBlit>,
298 }
299
300 impl BatchList {
new( screen_size: DeviceIntSize, regions: Vec<DeviceIntRect>, tile_blits: Vec<TileBlit>, ) -> Self301 pub fn new(
302 screen_size: DeviceIntSize,
303 regions: Vec<DeviceIntRect>,
304 tile_blits: Vec<TileBlit>,
305 ) -> Self {
306 // The threshold for creating a new batch is
307 // one quarter the screen size.
308 let batch_area_threshold = (screen_size.width * screen_size.height) as f32 / 4.0;
309
310 BatchList {
311 alpha_batch_list: AlphaBatchList::new(),
312 opaque_batch_list: OpaqueBatchList::new(batch_area_threshold),
313 regions,
314 tile_blits,
315 }
316 }
317
push_single_instance( &mut self, key: BatchKey, bounding_rect: &PictureRect, z_id: ZBufferId, instance: PrimitiveInstanceData, )318 pub fn push_single_instance(
319 &mut self,
320 key: BatchKey,
321 bounding_rect: &PictureRect,
322 z_id: ZBufferId,
323 instance: PrimitiveInstanceData,
324 ) {
325 match key.blend_mode {
326 BlendMode::None => {
327 self.opaque_batch_list
328 .set_params_and_get_batch(key, bounding_rect)
329 .push(instance);
330 }
331 BlendMode::Alpha |
332 BlendMode::PremultipliedAlpha |
333 BlendMode::PremultipliedDestOut |
334 BlendMode::SubpixelConstantTextColor(..) |
335 BlendMode::SubpixelWithBgColor |
336 BlendMode::SubpixelDualSource => {
337 self.alpha_batch_list
338 .set_params_and_get_batch(key, bounding_rect, z_id)
339 .push(instance);
340 }
341 }
342 }
343
set_params_and_get_batch( &mut self, key: BatchKey, bounding_rect: &PictureRect, z_id: ZBufferId, ) -> &mut Vec<PrimitiveInstanceData>344 pub fn set_params_and_get_batch(
345 &mut self,
346 key: BatchKey,
347 bounding_rect: &PictureRect,
348 z_id: ZBufferId,
349 ) -> &mut Vec<PrimitiveInstanceData> {
350 match key.blend_mode {
351 BlendMode::None => {
352 self.opaque_batch_list
353 .set_params_and_get_batch(key, bounding_rect)
354 }
355 BlendMode::Alpha |
356 BlendMode::PremultipliedAlpha |
357 BlendMode::PremultipliedDestOut |
358 BlendMode::SubpixelConstantTextColor(..) |
359 BlendMode::SubpixelWithBgColor |
360 BlendMode::SubpixelDualSource => {
361 self.alpha_batch_list
362 .set_params_and_get_batch(key, bounding_rect, z_id)
363 }
364 }
365 }
366
finalize(&mut self)367 fn finalize(&mut self) {
368 self.opaque_batch_list.finalize()
369 }
370 }
371
372 #[cfg_attr(feature = "capture", derive(Serialize))]
373 #[cfg_attr(feature = "replay", derive(Deserialize))]
374 pub struct PrimitiveBatch {
375 pub key: BatchKey,
376 pub instances: Vec<PrimitiveInstanceData>,
377 }
378
379 impl PrimitiveBatch {
new(key: BatchKey) -> PrimitiveBatch380 fn new(key: BatchKey) -> PrimitiveBatch {
381 PrimitiveBatch {
382 key,
383 instances: Vec::new(),
384 }
385 }
386 }
387
388 #[cfg_attr(feature = "capture", derive(Serialize))]
389 #[cfg_attr(feature = "replay", derive(Deserialize))]
390 pub struct AlphaBatchContainer {
391 pub opaque_batches: Vec<PrimitiveBatch>,
392 pub alpha_batches: Vec<PrimitiveBatch>,
393 /// The overall scissor rect for this render task, if one
394 /// is required.
395 pub task_scissor_rect: Option<DeviceIntRect>,
396 /// A list of rectangle regions this batch should be drawn
397 /// in. Each region will have scissor rect set before drawing.
398 pub regions: Vec<DeviceIntRect>,
399 pub tile_blits: Vec<TileBlit>,
400 }
401
402 impl AlphaBatchContainer {
new( task_scissor_rect: Option<DeviceIntRect>, regions: Vec<DeviceIntRect>, ) -> AlphaBatchContainer403 pub fn new(
404 task_scissor_rect: Option<DeviceIntRect>,
405 regions: Vec<DeviceIntRect>,
406 ) -> AlphaBatchContainer {
407 AlphaBatchContainer {
408 opaque_batches: Vec::new(),
409 alpha_batches: Vec::new(),
410 task_scissor_rect,
411 regions,
412 tile_blits: Vec::new(),
413 }
414 }
415
is_empty(&self) -> bool416 pub fn is_empty(&self) -> bool {
417 self.opaque_batches.is_empty() &&
418 self.alpha_batches.is_empty()
419 }
420
merge(&mut self, batch_list: BatchList)421 fn merge(&mut self, batch_list: BatchList) {
422 for other_batch in batch_list.opaque_batch_list.batches {
423 let batch_index = self.opaque_batches.iter().position(|batch| {
424 batch.key.is_compatible_with(&other_batch.key)
425 });
426
427 match batch_index {
428 Some(batch_index) => {
429 self.opaque_batches[batch_index].instances.extend(other_batch.instances);
430 }
431 None => {
432 self.opaque_batches.push(other_batch);
433 }
434 }
435 }
436
437 let mut min_batch_index = 0;
438
439 for other_batch in batch_list.alpha_batch_list.batches {
440 let batch_index = self.alpha_batches.iter().skip(min_batch_index).position(|batch| {
441 batch.key.is_compatible_with(&other_batch.key)
442 });
443
444 match batch_index {
445 Some(batch_index) => {
446 let batch_index = batch_index + min_batch_index;
447 self.alpha_batches[batch_index].instances.extend(other_batch.instances);
448 min_batch_index = batch_index;
449 }
450 None => {
451 self.alpha_batches.push(other_batch);
452 min_batch_index = self.alpha_batches.len();
453 }
454 }
455 }
456 }
457 }
458
459 /// Each segment can optionally specify a per-segment
460 /// texture set and one user data field.
461 #[derive(Debug, Copy, Clone)]
462 struct SegmentInstanceData {
463 textures: BatchTextures,
464 user_data: i32,
465 }
466
467 /// Encapsulates the logic of building batches for items that are blended.
468 pub struct AlphaBatchBuilder {
469 pub batch_lists: Vec<BatchList>,
470 screen_size: DeviceIntSize,
471 task_scissor_rect: Option<DeviceIntRect>,
472 glyph_fetch_buffer: Vec<GlyphFetchResult>,
473 }
474
475 impl AlphaBatchBuilder {
new( screen_size: DeviceIntSize, task_scissor_rect: Option<DeviceIntRect>, ) -> Self476 pub fn new(
477 screen_size: DeviceIntSize,
478 task_scissor_rect: Option<DeviceIntRect>,
479 ) -> Self {
480 let batch_lists = vec![
481 BatchList::new(
482 screen_size,
483 Vec::new(),
484 Vec::new(),
485 ),
486 ];
487
488 AlphaBatchBuilder {
489 batch_lists,
490 task_scissor_rect,
491 screen_size,
492 glyph_fetch_buffer: Vec::new(),
493 }
494 }
495
push_new_batch_list( &mut self, regions: Vec<DeviceIntRect>, tile_blits: Vec<TileBlit>, )496 fn push_new_batch_list(
497 &mut self,
498 regions: Vec<DeviceIntRect>,
499 tile_blits: Vec<TileBlit>,
500 ) {
501 self.batch_lists.push(BatchList::new(
502 self.screen_size,
503 regions,
504 tile_blits,
505 ));
506 }
507
current_batch_list(&mut self) -> &mut BatchList508 fn current_batch_list(&mut self) -> &mut BatchList {
509 self.batch_lists.last_mut().unwrap()
510 }
511
can_merge(&self) -> bool512 fn can_merge(&self) -> bool {
513 self.task_scissor_rect.is_none() &&
514 self.batch_lists.len() == 1
515 }
516
build( mut self, batch_containers: &mut Vec<AlphaBatchContainer>, merged_batches: &mut AlphaBatchContainer, )517 pub fn build(
518 mut self,
519 batch_containers: &mut Vec<AlphaBatchContainer>,
520 merged_batches: &mut AlphaBatchContainer,
521 ) {
522 for batch_list in &mut self.batch_lists {
523 batch_list.finalize();
524 }
525
526 if self.can_merge() {
527 let batch_list = self.batch_lists.pop().unwrap();
528 debug_assert!(batch_list.tile_blits.is_empty());
529 merged_batches.merge(batch_list);
530 } else {
531 for batch_list in self.batch_lists {
532 batch_containers.push(AlphaBatchContainer {
533 alpha_batches: batch_list.alpha_batch_list.batches,
534 opaque_batches: batch_list.opaque_batch_list.batches,
535 task_scissor_rect: self.task_scissor_rect,
536 regions: batch_list.regions,
537 tile_blits: batch_list.tile_blits,
538 });
539 }
540 }
541 }
542
add_pic_to_batch( &mut self, pic: &PicturePrimitive, task_id: RenderTaskId, ctx: &RenderTargetContext, gpu_cache: &mut GpuCache, render_tasks: &RenderTaskTree, deferred_resolves: &mut Vec<DeferredResolve>, prim_headers: &mut PrimitiveHeaders, transforms: &mut TransformPalette, root_spatial_node_index: SpatialNodeIndex, z_generator: &mut ZBufferIdGenerator, )543 pub fn add_pic_to_batch(
544 &mut self,
545 pic: &PicturePrimitive,
546 task_id: RenderTaskId,
547 ctx: &RenderTargetContext,
548 gpu_cache: &mut GpuCache,
549 render_tasks: &RenderTaskTree,
550 deferred_resolves: &mut Vec<DeferredResolve>,
551 prim_headers: &mut PrimitiveHeaders,
552 transforms: &mut TransformPalette,
553 root_spatial_node_index: SpatialNodeIndex,
554 z_generator: &mut ZBufferIdGenerator,
555 ) {
556 let task_address = render_tasks.get_task_address(task_id);
557
558 // Add each run in this picture to the batch.
559 for prim_instance in &pic.prim_list.prim_instances {
560 self.add_prim_to_batch(
561 prim_instance,
562 ctx,
563 gpu_cache,
564 render_tasks,
565 task_id,
566 task_address,
567 deferred_resolves,
568 prim_headers,
569 transforms,
570 root_spatial_node_index,
571 z_generator,
572 );
573 }
574 }
575
576 // Adds a primitive to a batch.
577 // It can recursively call itself in some situations, for
578 // example if it encounters a picture where the items
579 // in that picture are being drawn into the same target.
add_prim_to_batch( &mut self, prim_instance: &PrimitiveInstance, ctx: &RenderTargetContext, gpu_cache: &mut GpuCache, render_tasks: &RenderTaskTree, task_id: RenderTaskId, task_address: RenderTaskAddress, deferred_resolves: &mut Vec<DeferredResolve>, prim_headers: &mut PrimitiveHeaders, transforms: &mut TransformPalette, root_spatial_node_index: SpatialNodeIndex, z_generator: &mut ZBufferIdGenerator, )580 fn add_prim_to_batch(
581 &mut self,
582 prim_instance: &PrimitiveInstance,
583 ctx: &RenderTargetContext,
584 gpu_cache: &mut GpuCache,
585 render_tasks: &RenderTaskTree,
586 task_id: RenderTaskId,
587 task_address: RenderTaskAddress,
588 deferred_resolves: &mut Vec<DeferredResolve>,
589 prim_headers: &mut PrimitiveHeaders,
590 transforms: &mut TransformPalette,
591 root_spatial_node_index: SpatialNodeIndex,
592 z_generator: &mut ZBufferIdGenerator,
593 ) {
594 if prim_instance.visibility_info == PrimitiveVisibilityIndex::INVALID {
595 return;
596 }
597
598 #[cfg(debug_assertions)] //TODO: why is this needed?
599 debug_assert_eq!(prim_instance.prepared_frame_id, render_tasks.frame_id());
600
601 let is_chased = prim_instance.is_chased();
602
603 let transform_id = transforms
604 .get_id(
605 prim_instance.spatial_node_index,
606 root_spatial_node_index,
607 ctx.clip_scroll_tree,
608 );
609
610 // TODO(gw): Calculating this for every primitive is a bit
611 // wasteful. We should probably cache this in
612 // the scroll node...
613 let transform_kind = transform_id.transform_kind();
614 let prim_info = &ctx.scratch.prim_info[prim_instance.visibility_info.0 as usize];
615 let bounding_rect = &prim_info.clip_chain.pic_clip_rect;
616
617 let z_id = z_generator.next();
618
619 // Get the clip task address for the global primitive, if one was set.
620 let clip_task_address = get_clip_task_address(
621 &ctx.scratch.clip_mask_instances,
622 prim_info.clip_task_index,
623 0,
624 render_tasks,
625 ).unwrap_or(OPAQUE_TASK_ADDRESS);
626
627 let prim_common_data = &ctx.data_stores.as_common_data(&prim_instance);
628 let prim_rect = LayoutRect::new(
629 prim_instance.prim_origin,
630 prim_common_data.prim_size,
631 );
632
633 if is_chased {
634 println!("\tbatch {:?} with clip {:?} and bound {:?}",
635 prim_rect, clip_task_address, bounding_rect);
636 }
637
638
639 match prim_instance.kind {
640 PrimitiveInstanceKind::Clear { data_handle } => {
641 let prim_data = &ctx.data_stores.prim[data_handle];
642 let prim_cache_address = gpu_cache.get_address(&prim_data.gpu_cache_handle);
643
644 // TODO(gw): We can abstract some of the common code below into
645 // helper methods, as we port more primitives to make
646 // use of interning.
647
648 let prim_header = PrimitiveHeader {
649 local_rect: prim_rect,
650 local_clip_rect: prim_info.combined_local_clip_rect,
651 task_address,
652 specific_prim_address: prim_cache_address,
653 clip_task_address,
654 transform_id,
655 };
656
657 let prim_header_index = prim_headers.push(
658 &prim_header,
659 z_id,
660 [get_shader_opacity(1.0), 0, 0],
661 );
662
663 let batch_key = BatchKey {
664 blend_mode: BlendMode::PremultipliedDestOut,
665 kind: BatchKind::Brush(BrushBatchKind::Solid),
666 textures: BatchTextures::no_texture(),
667 };
668
669 let instance = PrimitiveInstanceData::from(BrushInstance {
670 segment_index: INVALID_SEGMENT_INDEX,
671 edge_flags: EdgeAaSegmentMask::all(),
672 clip_task_address,
673 brush_flags: BrushFlags::PERSPECTIVE_INTERPOLATION,
674 prim_header_index,
675 user_data: 0,
676 });
677
678 self.current_batch_list().push_single_instance(
679 batch_key,
680 bounding_rect,
681 z_id,
682 PrimitiveInstanceData::from(instance),
683 );
684 }
685 PrimitiveInstanceKind::NormalBorder { data_handle, ref cache_handles, .. } => {
686 let prim_data = &ctx.data_stores.normal_border[data_handle];
687 let common_data = &prim_data.common;
688 let prim_cache_address = gpu_cache.get_address(&common_data.gpu_cache_handle);
689 let cache_handles = &ctx.scratch.border_cache_handles[*cache_handles];
690 let specified_blend_mode = BlendMode::PremultipliedAlpha;
691 let mut segment_data: SmallVec<[SegmentInstanceData; 8]> = SmallVec::new();
692
693 // Collect the segment instance data from each render
694 // task for each valid edge / corner of the border.
695
696 for handle in cache_handles {
697 let rt_cache_entry = ctx.resource_cache
698 .get_cached_render_task(handle);
699 let cache_item = ctx.resource_cache
700 .get_texture_cache_item(&rt_cache_entry.handle);
701 segment_data.push(
702 SegmentInstanceData {
703 textures: BatchTextures::color(cache_item.texture_id),
704 user_data: cache_item.uv_rect_handle.as_int(gpu_cache),
705 }
706 );
707 }
708
709 let non_segmented_blend_mode = if !common_data.opacity.is_opaque ||
710 prim_info.clip_task_index != ClipTaskIndex::INVALID ||
711 transform_kind == TransformedRectKind::Complex
712 {
713 specified_blend_mode
714 } else {
715 BlendMode::None
716 };
717
718 let prim_header = PrimitiveHeader {
719 local_rect: prim_rect,
720 local_clip_rect: prim_info.combined_local_clip_rect,
721 task_address,
722 specific_prim_address: prim_cache_address,
723 clip_task_address,
724 transform_id,
725 };
726
727 let batch_params = BrushBatchParameters::instanced(
728 BrushBatchKind::Image(ImageBufferKind::Texture2DArray),
729 [
730 ShaderColorMode::Image as i32 | ((AlphaType::PremultipliedAlpha as i32) << 16),
731 RasterizationSpace::Local as i32,
732 get_shader_opacity(1.0),
733 ],
734 segment_data,
735 );
736
737 let prim_header_index = prim_headers.push(
738 &prim_header,
739 z_id,
740 batch_params.prim_user_data,
741 );
742
743 let border_data = &prim_data.kind;
744 self.add_segmented_prim_to_batch(
745 Some(border_data.brush_segments.as_slice()),
746 common_data.opacity,
747 &batch_params,
748 specified_blend_mode,
749 non_segmented_blend_mode,
750 prim_header_index,
751 clip_task_address,
752 bounding_rect,
753 transform_kind,
754 render_tasks,
755 z_id,
756 prim_info.clip_task_index,
757 ctx,
758 );
759 }
760 PrimitiveInstanceKind::TextRun { data_handle, run_index, .. } => {
761 let run = &ctx.prim_store.text_runs[run_index];
762 let subpx_dir = run.used_font.get_subpx_dir();
763
764 // The GPU cache data is stored in the template and reused across
765 // frames and display lists.
766 let prim_data = &ctx.data_stores.text_run[data_handle];
767 let glyph_fetch_buffer = &mut self.glyph_fetch_buffer;
768 let alpha_batch_list = &mut self.batch_lists.last_mut().unwrap().alpha_batch_list;
769 let prim_cache_address = gpu_cache.get_address(&prim_data.gpu_cache_handle);
770
771 let prim_header = PrimitiveHeader {
772 local_rect: prim_rect,
773 local_clip_rect: prim_info.combined_local_clip_rect,
774 task_address,
775 specific_prim_address: prim_cache_address,
776 clip_task_address,
777 transform_id,
778 };
779
780 let glyph_keys = &ctx.scratch.glyph_keys[run.glyph_keys_range];
781
782 ctx.resource_cache.fetch_glyphs(
783 run.used_font.clone(),
784 &glyph_keys,
785 glyph_fetch_buffer,
786 gpu_cache,
787 |texture_id, mut glyph_format, glyphs| {
788 debug_assert_ne!(texture_id, TextureSource::Invalid);
789
790 // Ignore color and only sample alpha when shadowing.
791 if run.shadow {
792 glyph_format = glyph_format.ignore_color();
793 }
794
795 let subpx_dir = subpx_dir.limit_by(glyph_format);
796
797 let textures = BatchTextures {
798 colors: [
799 texture_id,
800 TextureSource::Invalid,
801 TextureSource::Invalid,
802 ],
803 };
804
805 let kind = BatchKind::TextRun(glyph_format);
806
807 let (blend_mode, color_mode) = match glyph_format {
808 GlyphFormat::Subpixel |
809 GlyphFormat::TransformedSubpixel => {
810 if run.used_font.bg_color.a != 0 {
811 (
812 BlendMode::SubpixelWithBgColor,
813 ShaderColorMode::FromRenderPassMode,
814 )
815 } else if ctx.use_dual_source_blending {
816 (
817 BlendMode::SubpixelDualSource,
818 ShaderColorMode::SubpixelDualSource,
819 )
820 } else {
821 (
822 BlendMode::SubpixelConstantTextColor(run.used_font.color.into()),
823 ShaderColorMode::SubpixelConstantTextColor,
824 )
825 }
826 }
827 GlyphFormat::Alpha |
828 GlyphFormat::TransformedAlpha => {
829 (
830 BlendMode::PremultipliedAlpha,
831 ShaderColorMode::Alpha,
832 )
833 }
834 GlyphFormat::Bitmap => {
835 (
836 BlendMode::PremultipliedAlpha,
837 ShaderColorMode::Bitmap,
838 )
839 }
840 GlyphFormat::ColorBitmap => {
841 (
842 BlendMode::PremultipliedAlpha,
843 ShaderColorMode::ColorBitmap,
844 )
845 }
846 };
847
848 let prim_header_index = prim_headers.push(
849 &prim_header,
850 z_id,
851 [
852 (run.reference_frame_relative_offset.x * 256.0) as i32,
853 (run.reference_frame_relative_offset.y * 256.0) as i32,
854 run.raster_space as i32,
855 ],
856 );
857 let key = BatchKey::new(kind, blend_mode, textures);
858 let base_instance = GlyphInstance::new(
859 prim_header_index,
860 );
861 let batch = alpha_batch_list.set_params_and_get_batch(
862 key,
863 bounding_rect,
864 z_id,
865 );
866
867 for glyph in glyphs {
868 batch.push(base_instance.build(
869 glyph.index_in_text_run,
870 glyph.uv_rect_address.as_int(),
871 (subpx_dir as u32 as i32) << 16 |
872 (color_mode as u32 as i32),
873 ));
874 }
875 },
876 );
877 }
878 PrimitiveInstanceKind::LineDecoration { data_handle, ref cache_handle, .. } => {
879 // The GPU cache data is stored in the template and reused across
880 // frames and display lists.
881 let common_data = &ctx.data_stores.line_decoration[data_handle].common;
882 let prim_cache_address = gpu_cache.get_address(&common_data.gpu_cache_handle);
883
884 let (batch_kind, textures, prim_user_data, segment_user_data) = match cache_handle {
885 Some(cache_handle) => {
886 let rt_cache_entry = ctx
887 .resource_cache
888 .get_cached_render_task(cache_handle);
889 let cache_item = ctx
890 .resource_cache
891 .get_texture_cache_item(&rt_cache_entry.handle);
892 let textures = BatchTextures::color(cache_item.texture_id);
893 (
894 BrushBatchKind::Image(get_buffer_kind(cache_item.texture_id)),
895 textures,
896 [
897 ShaderColorMode::Image as i32 | ((AlphaType::PremultipliedAlpha as i32) << 16),
898 RasterizationSpace::Local as i32,
899 get_shader_opacity(1.0),
900 ],
901 cache_item.uv_rect_handle.as_int(gpu_cache),
902 )
903 }
904 None => {
905 (
906 BrushBatchKind::Solid,
907 BatchTextures::no_texture(),
908 [get_shader_opacity(1.0), 0, 0],
909 0,
910 )
911 }
912 };
913
914 // TODO(gw): We can abstract some of the common code below into
915 // helper methods, as we port more primitives to make
916 // use of interning.
917 let blend_mode = if !common_data.opacity.is_opaque ||
918 prim_info.clip_task_index != ClipTaskIndex::INVALID ||
919 transform_kind == TransformedRectKind::Complex
920 {
921 BlendMode::PremultipliedAlpha
922 } else {
923 BlendMode::None
924 };
925
926 let prim_header = PrimitiveHeader {
927 local_rect: prim_rect,
928 local_clip_rect: prim_info.combined_local_clip_rect,
929 task_address,
930 specific_prim_address: prim_cache_address,
931 clip_task_address,
932 transform_id,
933 };
934
935 let prim_header_index = prim_headers.push(
936 &prim_header,
937 z_id,
938 prim_user_data,
939 );
940
941 let batch_key = BatchKey {
942 blend_mode,
943 kind: BatchKind::Brush(batch_kind),
944 textures: textures,
945 };
946
947 let instance = PrimitiveInstanceData::from(BrushInstance {
948 segment_index: INVALID_SEGMENT_INDEX,
949 edge_flags: EdgeAaSegmentMask::all(),
950 clip_task_address,
951 brush_flags: BrushFlags::PERSPECTIVE_INTERPOLATION,
952 prim_header_index,
953 user_data: segment_user_data,
954 });
955
956 self.current_batch_list().push_single_instance(
957 batch_key,
958 bounding_rect,
959 z_id,
960 PrimitiveInstanceData::from(instance),
961 );
962 }
963 PrimitiveInstanceKind::Picture { pic_index, .. } => {
964 let picture = &ctx.prim_store.pictures[pic_index.0];
965 let non_segmented_blend_mode = BlendMode::PremultipliedAlpha;
966 let prim_cache_address = gpu_cache.get_address(&ctx.globals.default_image_handle);
967
968 let prim_header = PrimitiveHeader {
969 local_rect: picture.local_rect,
970 local_clip_rect: prim_info.combined_local_clip_rect,
971 task_address,
972 specific_prim_address: prim_cache_address,
973 clip_task_address,
974 transform_id,
975 };
976
977 match picture.context_3d {
978 // Convert all children of the 3D hierarchy root into batches.
979 Picture3DContext::In { root_data: Some(ref list), .. } => {
980 for child in list {
981 let prim_instance = &picture.prim_list.prim_instances[child.anchor];
982 let prim_info = &ctx.scratch.prim_info[prim_instance.visibility_info.0 as usize];
983
984 let child_pic_index = match prim_instance.kind {
985 PrimitiveInstanceKind::Picture { pic_index, .. } => pic_index,
986 PrimitiveInstanceKind::LineDecoration { .. } |
987 PrimitiveInstanceKind::TextRun { .. } |
988 PrimitiveInstanceKind::NormalBorder { .. } |
989 PrimitiveInstanceKind::ImageBorder { .. } |
990 PrimitiveInstanceKind::Rectangle { .. } |
991 PrimitiveInstanceKind::YuvImage { .. } |
992 PrimitiveInstanceKind::Image { .. } |
993 PrimitiveInstanceKind::LinearGradient { .. } |
994 PrimitiveInstanceKind::RadialGradient { .. } |
995 PrimitiveInstanceKind::Clear { .. } => {
996 unreachable!();
997 }
998 };
999 let pic = &ctx.prim_store.pictures[child_pic_index.0];
1000
1001
1002 // Get clip task, if set, for the picture primitive.
1003 let clip_task_address = get_clip_task_address(
1004 &ctx.scratch.clip_mask_instances,
1005 prim_info.clip_task_index,
1006 0,
1007 render_tasks,
1008 ).unwrap_or(OPAQUE_TASK_ADDRESS);
1009
1010 let prim_header = PrimitiveHeader {
1011 local_rect: pic.local_rect,
1012 local_clip_rect: prim_info.combined_local_clip_rect,
1013 task_address,
1014 specific_prim_address: GpuCacheAddress::invalid(),
1015 clip_task_address,
1016 transform_id: transforms
1017 .get_id(
1018 child.spatial_node_index,
1019 root_spatial_node_index,
1020 ctx.clip_scroll_tree,
1021 ),
1022 };
1023
1024 let raster_config = pic
1025 .raster_config
1026 .as_ref()
1027 .expect("BUG: 3d primitive was not assigned a surface");
1028 let (uv_rect_address, _) = ctx
1029 .surfaces[raster_config.surface_index.0]
1030 .surface
1031 .as_ref()
1032 .expect("BUG: no surface")
1033 .resolve(
1034 render_tasks,
1035 ctx.resource_cache,
1036 gpu_cache,
1037 );
1038
1039 let prim_header_index = prim_headers.push(&prim_header, z_id, [
1040 uv_rect_address.as_int(),
1041 if raster_config.establishes_raster_root { 1 } else { 0 },
1042 0,
1043 ]);
1044
1045 let key = BatchKey::new(
1046 BatchKind::SplitComposite,
1047 BlendMode::PremultipliedAlpha,
1048 BatchTextures::no_texture(),
1049 );
1050
1051 let instance = SplitCompositeInstance::new(
1052 prim_header_index,
1053 child.gpu_address,
1054 z_id,
1055 );
1056
1057 self.current_batch_list().push_single_instance(
1058 key,
1059 &prim_info.clip_chain.pic_clip_rect,
1060 z_id,
1061 PrimitiveInstanceData::from(instance),
1062 );
1063 }
1064 }
1065 // Ignore the 3D pictures that are not in the root of preserve-3D
1066 // hierarchy, since we process them with the root.
1067 Picture3DContext::In { root_data: None, .. } => return,
1068 // Proceed for non-3D pictures.
1069 Picture3DContext::Out => ()
1070 }
1071
1072 match picture.raster_config {
1073 Some(ref raster_config) => {
1074 // If the child picture was rendered in local space, we can safely
1075 // interpolate the UV coordinates with perspective correction.
1076 let brush_flags = if raster_config.establishes_raster_root {
1077 BrushFlags::PERSPECTIVE_INTERPOLATION
1078 } else {
1079 BrushFlags::empty()
1080 };
1081
1082 match raster_config.composite_mode {
1083 PictureCompositeMode::TileCache { .. } => {
1084 let tile_cache = picture.tile_cache.as_ref().unwrap();
1085
1086 // If the tile cache is disabled, just recurse into the
1087 // picture like a normal pass-through picture, adding
1088 // any child primitives into the parent surface batches.
1089 if !tile_cache.is_enabled {
1090 self.add_pic_to_batch(
1091 picture,
1092 task_id,
1093 ctx,
1094 gpu_cache,
1095 render_tasks,
1096 deferred_resolves,
1097 prim_headers,
1098 transforms,
1099 root_spatial_node_index,
1100 z_generator,
1101 );
1102
1103 return;
1104 }
1105
1106 // Construct a local clip rect that ensures we only draw pixels where
1107 // the local bounds of the picture extend to within the edge tiles.
1108 let local_clip_rect = prim_info
1109 .combined_local_clip_rect
1110 .intersection(&picture.local_rect)
1111 .and_then(|rect| {
1112 rect.intersection(&picture.local_clip_rect)
1113 });
1114
1115 if let Some(local_clip_rect) = local_clip_rect {
1116 // Step through each tile in the cache, and draw it with an image
1117 // brush primitive if visible.
1118
1119 let kind = BatchKind::Brush(
1120 BrushBatchKind::Image(ImageBufferKind::Texture2DArray)
1121 );
1122
1123 for tile_index in &tile_cache.tiles_to_draw {
1124 let tile = &tile_cache.tiles[tile_index.0];
1125
1126 // Get the local rect of the tile.
1127 let tile_rect = tile.local_rect;
1128
1129 let prim_header = PrimitiveHeader {
1130 local_rect: tile_rect,
1131 local_clip_rect,
1132 task_address,
1133 specific_prim_address: prim_cache_address,
1134 clip_task_address,
1135 transform_id,
1136 };
1137
1138 let prim_header_index = prim_headers.push(&prim_header, z_id, [
1139 ShaderColorMode::Image as i32 | ((AlphaType::PremultipliedAlpha as i32) << 16),
1140 RasterizationSpace::Local as i32,
1141 get_shader_opacity(1.0),
1142 ]);
1143
1144 let cache_item = ctx
1145 .resource_cache
1146 .get_texture_cache_item(&tile.handle);
1147
1148 let key = BatchKey::new(
1149 kind,
1150 BlendMode::None,
1151 BatchTextures::color(cache_item.texture_id),
1152 );
1153
1154 let uv_rect_address = gpu_cache
1155 .get_address(&cache_item.uv_rect_handle)
1156 .as_int();
1157
1158 let instance = BrushInstance {
1159 prim_header_index,
1160 clip_task_address,
1161 segment_index: INVALID_SEGMENT_INDEX,
1162 edge_flags: EdgeAaSegmentMask::empty(),
1163 brush_flags,
1164 user_data: uv_rect_address,
1165 };
1166
1167 // Instead of retrieving the batch once and adding each tile instance,
1168 // use this API to get an appropriate batch for each tile, since
1169 // the batch textures may be different. The batch list internally
1170 // caches the current batch if the key hasn't changed.
1171 let batch = self.current_batch_list().set_params_and_get_batch(
1172 key,
1173 bounding_rect,
1174 z_id,
1175 );
1176
1177 batch.push(PrimitiveInstanceData::from(instance));
1178 }
1179
1180 // If there is a dirty rect for the tile cache, recurse into the
1181 // main picture primitive list, and draw them first.
1182 if !tile_cache.dirty_region.is_empty() {
1183 let mut tile_blits = Vec::new();
1184
1185 let (target_rect, _) = render_tasks[task_id].get_target_rect();
1186
1187 for blit in &tile_cache.pending_blits {
1188 tile_blits.push(TileBlit {
1189 dest_offset: blit.dest_offset,
1190 size: blit.size,
1191 target: blit.target.clone(),
1192 src_offset: DeviceIntPoint::new(
1193 blit.src_offset.x + target_rect.origin.x,
1194 blit.src_offset.y + target_rect.origin.y,
1195 ),
1196 })
1197 }
1198
1199 // Collect the list of regions to scissor and repeat
1200 // the draw calls into, based on dirty rects.
1201 let batch_regions = tile_cache
1202 .dirty_region
1203 .dirty_rects
1204 .iter()
1205 .map(|dirty_rect| {
1206 (dirty_rect.world_rect * ctx.device_pixel_scale).round().to_i32()
1207 })
1208 .collect();
1209
1210 self.push_new_batch_list(
1211 batch_regions,
1212 tile_blits,
1213 );
1214
1215 self.add_pic_to_batch(
1216 picture,
1217 task_id,
1218 ctx,
1219 gpu_cache,
1220 render_tasks,
1221 deferred_resolves,
1222 prim_headers,
1223 transforms,
1224 root_spatial_node_index,
1225 z_generator,
1226 );
1227
1228 self.push_new_batch_list(
1229 Vec::new(),
1230 Vec::new(),
1231 );
1232 }
1233 }
1234 }
1235 PictureCompositeMode::Filter(filter) => {
1236 let surface = ctx.surfaces[raster_config.surface_index.0]
1237 .surface
1238 .as_ref()
1239 .expect("bug: surface must be allocated by now");
1240 assert!(filter.is_visible());
1241 match filter {
1242 FilterOp::Blur(..) => {
1243 let kind = BatchKind::Brush(
1244 BrushBatchKind::Image(ImageBufferKind::Texture2DArray)
1245 );
1246 let (uv_rect_address, textures) = surface
1247 .resolve(
1248 render_tasks,
1249 ctx.resource_cache,
1250 gpu_cache,
1251 );
1252 let key = BatchKey::new(
1253 kind,
1254 non_segmented_blend_mode,
1255 textures,
1256 );
1257 let prim_header_index = prim_headers.push(&prim_header, z_id, [
1258 ShaderColorMode::Image as i32 | ((AlphaType::PremultipliedAlpha as i32) << 16),
1259 RasterizationSpace::Screen as i32,
1260 get_shader_opacity(1.0),
1261 ]);
1262
1263 let instance = BrushInstance {
1264 prim_header_index,
1265 segment_index: INVALID_SEGMENT_INDEX,
1266 edge_flags: EdgeAaSegmentMask::empty(),
1267 brush_flags,
1268 clip_task_address,
1269 user_data: uv_rect_address.as_int(),
1270 };
1271
1272 self.current_batch_list().push_single_instance(
1273 key,
1274 bounding_rect,
1275 z_id,
1276 PrimitiveInstanceData::from(instance),
1277 );
1278 }
1279 FilterOp::DropShadow(offset, ..) => {
1280 // Draw an instance of the shadow first, following by the content.
1281
1282 // Both the shadow and the content get drawn as a brush image.
1283 let kind = BatchKind::Brush(
1284 BrushBatchKind::Image(ImageBufferKind::Texture2DArray),
1285 );
1286
1287 // Gets the saved render task ID of the content, which is
1288 // deeper in the render task tree than the direct child.
1289 let secondary_id = picture.secondary_render_task_id.expect("no secondary!?");
1290 let saved_index = render_tasks[secondary_id].saved_index.expect("no saved index!?");
1291 debug_assert_ne!(saved_index, SavedTargetIndex::PENDING);
1292
1293 // Build BatchTextures for shadow/content
1294 let shadow_textures = BatchTextures::render_target_cache();
1295 let content_textures = BatchTextures {
1296 colors: [
1297 TextureSource::RenderTaskCache(saved_index),
1298 TextureSource::Invalid,
1299 TextureSource::Invalid,
1300 ],
1301 };
1302
1303 // Build batch keys for shadow/content
1304 let shadow_key = BatchKey::new(kind, non_segmented_blend_mode, shadow_textures);
1305 let content_key = BatchKey::new(kind, non_segmented_blend_mode, content_textures);
1306
1307 // Retrieve the UV rect addresses for shadow/content.
1308 let cache_task_id = surface.resolve_render_task_id();
1309 let shadow_uv_rect_address = render_tasks[cache_task_id]
1310 .get_texture_address(gpu_cache)
1311 .as_int();
1312 let content_uv_rect_address = render_tasks[secondary_id]
1313 .get_texture_address(gpu_cache)
1314 .as_int();
1315
1316 // Get the GPU cache address of the extra data handle.
1317 let shadow_prim_address = gpu_cache.get_address(&picture.extra_gpu_data_handle);
1318
1319 let z_id_shadow = z_id;
1320 let z_id_content = z_generator.next();
1321
1322 let content_prim_header_index = prim_headers.push(&prim_header, z_id_content, [
1323 ShaderColorMode::Image as i32 | ((AlphaType::PremultipliedAlpha as i32) << 16),
1324 RasterizationSpace::Screen as i32,
1325 get_shader_opacity(1.0),
1326 ]);
1327
1328 let shadow_rect = prim_header.local_rect.translate(&offset);
1329
1330 let shadow_prim_header = PrimitiveHeader {
1331 local_rect: shadow_rect,
1332 specific_prim_address: shadow_prim_address,
1333 ..prim_header
1334 };
1335
1336 let shadow_prim_header_index = prim_headers.push(&shadow_prim_header, z_id_shadow, [
1337 ShaderColorMode::Alpha as i32 | ((AlphaType::PremultipliedAlpha as i32) << 16),
1338 RasterizationSpace::Screen as i32,
1339 get_shader_opacity(1.0),
1340 ]);
1341
1342 let shadow_instance = BrushInstance {
1343 prim_header_index: shadow_prim_header_index,
1344 clip_task_address,
1345 segment_index: INVALID_SEGMENT_INDEX,
1346 edge_flags: EdgeAaSegmentMask::empty(),
1347 brush_flags,
1348 user_data: shadow_uv_rect_address,
1349 };
1350
1351 let content_instance = BrushInstance {
1352 prim_header_index: content_prim_header_index,
1353 clip_task_address,
1354 segment_index: INVALID_SEGMENT_INDEX,
1355 edge_flags: EdgeAaSegmentMask::empty(),
1356 brush_flags,
1357 user_data: content_uv_rect_address,
1358 };
1359
1360 self.current_batch_list().push_single_instance(
1361 shadow_key,
1362 bounding_rect,
1363 z_id_shadow,
1364 PrimitiveInstanceData::from(shadow_instance),
1365 );
1366
1367 self.current_batch_list().push_single_instance(
1368 content_key,
1369 bounding_rect,
1370 z_id_content,
1371 PrimitiveInstanceData::from(content_instance),
1372 );
1373 }
1374 _ => {
1375 let filter_mode = match filter {
1376 FilterOp::Identity => 1, // matches `Contrast(1)`
1377 FilterOp::Blur(..) => 0,
1378 FilterOp::Contrast(..) => 1,
1379 FilterOp::Grayscale(..) => 2,
1380 FilterOp::HueRotate(..) => 3,
1381 FilterOp::Invert(..) => 4,
1382 FilterOp::Saturate(..) => 5,
1383 FilterOp::Sepia(..) => 6,
1384 FilterOp::Brightness(..) => 7,
1385 FilterOp::Opacity(..) => 8,
1386 FilterOp::DropShadow(..) => 9,
1387 FilterOp::ColorMatrix(..) => 10,
1388 FilterOp::SrgbToLinear => 11,
1389 FilterOp::LinearToSrgb => 12,
1390 };
1391
1392 let user_data = match filter {
1393 FilterOp::Identity => 0x10000i32, // matches `Contrast(1)`
1394 FilterOp::Contrast(amount) |
1395 FilterOp::Grayscale(amount) |
1396 FilterOp::Invert(amount) |
1397 FilterOp::Saturate(amount) |
1398 FilterOp::Sepia(amount) |
1399 FilterOp::Brightness(amount) |
1400 FilterOp::Opacity(_, amount) => {
1401 (amount * 65536.0) as i32
1402 }
1403 FilterOp::SrgbToLinear | FilterOp::LinearToSrgb => 0,
1404 FilterOp::HueRotate(angle) => {
1405 (0.01745329251 * angle * 65536.0) as i32
1406 }
1407 // Go through different paths
1408 FilterOp::Blur(..) |
1409 FilterOp::DropShadow(..) => {
1410 unreachable!();
1411 }
1412 FilterOp::ColorMatrix(_) => {
1413 picture.extra_gpu_data_handle.as_int(gpu_cache)
1414 }
1415 };
1416
1417 let (uv_rect_address, textures) = surface
1418 .resolve(
1419 render_tasks,
1420 ctx.resource_cache,
1421 gpu_cache,
1422 );
1423
1424 let key = BatchKey::new(
1425 BatchKind::Brush(BrushBatchKind::Blend),
1426 BlendMode::PremultipliedAlpha,
1427 textures,
1428 );
1429
1430 let prim_header_index = prim_headers.push(&prim_header, z_id, [
1431 uv_rect_address.as_int(),
1432 filter_mode,
1433 user_data,
1434 ]);
1435
1436 let instance = BrushInstance {
1437 prim_header_index,
1438 clip_task_address,
1439 segment_index: INVALID_SEGMENT_INDEX,
1440 edge_flags: EdgeAaSegmentMask::empty(),
1441 brush_flags,
1442 user_data: 0,
1443 };
1444
1445 self.current_batch_list().push_single_instance(
1446 key,
1447 bounding_rect,
1448 z_id,
1449 PrimitiveInstanceData::from(instance),
1450 );
1451 }
1452 }
1453 }
1454 PictureCompositeMode::MixBlend(mode) => {
1455 let surface = ctx.surfaces[raster_config.surface_index.0]
1456 .surface
1457 .as_ref()
1458 .expect("bug: surface must be allocated by now");
1459 let cache_task_id = surface.resolve_render_task_id();
1460 let backdrop_id = picture.secondary_render_task_id.expect("no backdrop!?");
1461
1462 let key = BatchKey::new(
1463 BatchKind::Brush(
1464 BrushBatchKind::MixBlend {
1465 task_id,
1466 source_id: cache_task_id,
1467 backdrop_id,
1468 },
1469 ),
1470 BlendMode::PremultipliedAlpha,
1471 BatchTextures::no_texture(),
1472 );
1473 let backdrop_task_address = render_tasks.get_task_address(backdrop_id);
1474 let source_task_address = render_tasks.get_task_address(cache_task_id);
1475 let prim_header_index = prim_headers.push(&prim_header, z_id, [
1476 mode as u32 as i32,
1477 backdrop_task_address.0 as i32,
1478 source_task_address.0 as i32,
1479 ]);
1480
1481 let instance = BrushInstance {
1482 prim_header_index,
1483 clip_task_address,
1484 segment_index: INVALID_SEGMENT_INDEX,
1485 edge_flags: EdgeAaSegmentMask::empty(),
1486 brush_flags,
1487 user_data: 0,
1488 };
1489
1490 self.current_batch_list().push_single_instance(
1491 key,
1492 bounding_rect,
1493 z_id,
1494 PrimitiveInstanceData::from(instance),
1495 );
1496 }
1497 PictureCompositeMode::Blit(_) => {
1498 let surface = ctx.surfaces[raster_config.surface_index.0]
1499 .surface
1500 .as_ref()
1501 .expect("bug: surface must be allocated by now");
1502 let cache_task_id = surface.resolve_render_task_id();
1503 let kind = BatchKind::Brush(
1504 BrushBatchKind::Image(ImageBufferKind::Texture2DArray)
1505 );
1506 let key = BatchKey::new(
1507 kind,
1508 non_segmented_blend_mode,
1509 BatchTextures::render_target_cache(),
1510 );
1511
1512 let uv_rect_address = render_tasks[cache_task_id]
1513 .get_texture_address(gpu_cache)
1514 .as_int();
1515 let prim_header_index = prim_headers.push(&prim_header, z_id, [
1516 ShaderColorMode::Image as i32 | ((AlphaType::PremultipliedAlpha as i32) << 16),
1517 RasterizationSpace::Screen as i32,
1518 get_shader_opacity(1.0),
1519 ]);
1520
1521 let instance = BrushInstance {
1522 prim_header_index,
1523 clip_task_address,
1524 segment_index: INVALID_SEGMENT_INDEX,
1525 edge_flags: EdgeAaSegmentMask::empty(),
1526 brush_flags,
1527 user_data: uv_rect_address,
1528 };
1529
1530 self.current_batch_list().push_single_instance(
1531 key,
1532 bounding_rect,
1533 z_id,
1534 PrimitiveInstanceData::from(instance),
1535 );
1536 }
1537 }
1538 }
1539 None => {
1540 // If this picture is being drawn into an existing target (i.e. with
1541 // no composition operation), recurse and add to the current batch list.
1542 self.add_pic_to_batch(
1543 picture,
1544 task_id,
1545 ctx,
1546 gpu_cache,
1547 render_tasks,
1548 deferred_resolves,
1549 prim_headers,
1550 transforms,
1551 root_spatial_node_index,
1552 z_generator,
1553 );
1554 }
1555 }
1556 }
1557 PrimitiveInstanceKind::ImageBorder { data_handle, .. } => {
1558 let prim_data = &ctx.data_stores.image_border[data_handle];
1559 let common_data = &prim_data.common;
1560 let border_data = &prim_data.kind;
1561
1562 let cache_item = resolve_image(
1563 border_data.request,
1564 ctx.resource_cache,
1565 gpu_cache,
1566 deferred_resolves,
1567 );
1568 if cache_item.texture_id == TextureSource::Invalid {
1569 return;
1570 }
1571
1572 let textures = BatchTextures::color(cache_item.texture_id);
1573 let prim_cache_address = gpu_cache.get_address(&common_data.gpu_cache_handle);
1574 let specified_blend_mode = BlendMode::PremultipliedAlpha;
1575 let non_segmented_blend_mode = if !common_data.opacity.is_opaque ||
1576 prim_info.clip_task_index != ClipTaskIndex::INVALID ||
1577 transform_kind == TransformedRectKind::Complex
1578 {
1579 specified_blend_mode
1580 } else {
1581 BlendMode::None
1582 };
1583
1584 let prim_header = PrimitiveHeader {
1585 local_rect: prim_rect,
1586 local_clip_rect: prim_info.combined_local_clip_rect,
1587 task_address,
1588 specific_prim_address: prim_cache_address,
1589 clip_task_address,
1590 transform_id,
1591 };
1592
1593 let batch_params = BrushBatchParameters::shared(
1594 BrushBatchKind::Image(get_buffer_kind(cache_item.texture_id)),
1595 textures,
1596 [
1597 ShaderColorMode::Image as i32 | ((AlphaType::PremultipliedAlpha as i32) << 16),
1598 RasterizationSpace::Local as i32,
1599 get_shader_opacity(1.0),
1600 ],
1601 cache_item.uv_rect_handle.as_int(gpu_cache),
1602 );
1603
1604 let prim_header_index = prim_headers.push(
1605 &prim_header,
1606 z_id,
1607 batch_params.prim_user_data,
1608 );
1609
1610 self.add_segmented_prim_to_batch(
1611 Some(border_data.brush_segments.as_slice()),
1612 common_data.opacity,
1613 &batch_params,
1614 specified_blend_mode,
1615 non_segmented_blend_mode,
1616 prim_header_index,
1617 clip_task_address,
1618 bounding_rect,
1619 transform_kind,
1620 render_tasks,
1621 z_id,
1622 prim_info.clip_task_index,
1623 ctx,
1624 );
1625 }
1626 PrimitiveInstanceKind::Rectangle { data_handle, segment_instance_index, opacity_binding_index, .. } => {
1627 let prim_data = &ctx.data_stores.prim[data_handle];
1628 let specified_blend_mode = BlendMode::PremultipliedAlpha;
1629 let opacity_binding = ctx.prim_store.get_opacity_binding(opacity_binding_index);
1630
1631 let opacity = PrimitiveOpacity::from_alpha(opacity_binding);
1632 let opacity = opacity.combine(prim_data.opacity);
1633
1634 let non_segmented_blend_mode = if !opacity.is_opaque ||
1635 prim_info.clip_task_index != ClipTaskIndex::INVALID ||
1636 transform_kind == TransformedRectKind::Complex
1637 {
1638 specified_blend_mode
1639 } else {
1640 BlendMode::None
1641 };
1642
1643 let batch_params = BrushBatchParameters::shared(
1644 BrushBatchKind::Solid,
1645 BatchTextures::no_texture(),
1646 [get_shader_opacity(opacity_binding), 0, 0],
1647 0,
1648 );
1649
1650 let (prim_cache_address, segments) = if segment_instance_index == SegmentInstanceIndex::UNUSED {
1651 (gpu_cache.get_address(&prim_data.gpu_cache_handle), None)
1652 } else {
1653 let segment_instance = &ctx.scratch.segment_instances[segment_instance_index];
1654 let segments = Some(&ctx.scratch.segments[segment_instance.segments_range]);
1655 (gpu_cache.get_address(&segment_instance.gpu_cache_handle), segments)
1656 };
1657
1658 let prim_header = PrimitiveHeader {
1659 local_rect: prim_rect,
1660 local_clip_rect: prim_info.combined_local_clip_rect,
1661 task_address,
1662 specific_prim_address: prim_cache_address,
1663 clip_task_address,
1664 transform_id,
1665 };
1666
1667 let prim_header_index = prim_headers.push(
1668 &prim_header,
1669 z_id,
1670 batch_params.prim_user_data,
1671 );
1672
1673 self.add_segmented_prim_to_batch(
1674 segments,
1675 opacity,
1676 &batch_params,
1677 specified_blend_mode,
1678 non_segmented_blend_mode,
1679 prim_header_index,
1680 clip_task_address,
1681 bounding_rect,
1682 transform_kind,
1683 render_tasks,
1684 z_id,
1685 prim_info.clip_task_index,
1686 ctx,
1687 );
1688 }
1689 PrimitiveInstanceKind::YuvImage { data_handle, segment_instance_index, .. } => {
1690 let yuv_image_data = &ctx.data_stores.yuv_image[data_handle].kind;
1691 let mut textures = BatchTextures::no_texture();
1692 let mut uv_rect_addresses = [0; 3];
1693
1694 //yuv channel
1695 let channel_count = yuv_image_data.format.get_plane_num();
1696 debug_assert!(channel_count <= 3);
1697 for channel in 0 .. channel_count {
1698 let image_key = yuv_image_data.yuv_key[channel];
1699
1700 let cache_item = resolve_image(
1701 ImageRequest {
1702 key: image_key,
1703 rendering: yuv_image_data.image_rendering,
1704 tile: None,
1705 },
1706 ctx.resource_cache,
1707 gpu_cache,
1708 deferred_resolves,
1709 );
1710
1711 if cache_item.texture_id == TextureSource::Invalid {
1712 warn!("Warnings: skip a PrimitiveKind::YuvImage");
1713 return;
1714 }
1715
1716 textures.colors[channel] = cache_item.texture_id;
1717 uv_rect_addresses[channel] = cache_item.uv_rect_handle.as_int(gpu_cache);
1718 }
1719
1720 // All yuv textures should be the same type.
1721 let buffer_kind = get_buffer_kind(textures.colors[0]);
1722 assert!(
1723 textures.colors[1 .. yuv_image_data.format.get_plane_num()]
1724 .iter()
1725 .all(|&tid| buffer_kind == get_buffer_kind(tid))
1726 );
1727
1728 let kind = BrushBatchKind::YuvImage(
1729 buffer_kind,
1730 yuv_image_data.format,
1731 yuv_image_data.color_depth,
1732 yuv_image_data.color_space,
1733 );
1734
1735 let batch_params = BrushBatchParameters::shared(
1736 kind,
1737 textures,
1738 [
1739 uv_rect_addresses[0],
1740 uv_rect_addresses[1],
1741 uv_rect_addresses[2],
1742 ],
1743 0,
1744 );
1745
1746 let specified_blend_mode = BlendMode::PremultipliedAlpha;
1747
1748 let non_segmented_blend_mode = if !prim_common_data.opacity.is_opaque ||
1749 prim_info.clip_task_index != ClipTaskIndex::INVALID ||
1750 transform_kind == TransformedRectKind::Complex
1751 {
1752 specified_blend_mode
1753 } else {
1754 BlendMode::None
1755 };
1756
1757 debug_assert_ne!(segment_instance_index, SegmentInstanceIndex::INVALID);
1758 let (prim_cache_address, segments) = if segment_instance_index == SegmentInstanceIndex::UNUSED {
1759 (gpu_cache.get_address(&prim_common_data.gpu_cache_handle), None)
1760 } else {
1761 let segment_instance = &ctx.scratch.segment_instances[segment_instance_index];
1762 let segments = Some(&ctx.scratch.segments[segment_instance.segments_range]);
1763 (gpu_cache.get_address(&segment_instance.gpu_cache_handle), segments)
1764 };
1765
1766 let prim_header = PrimitiveHeader {
1767 local_rect: prim_rect,
1768 local_clip_rect: prim_info.combined_local_clip_rect,
1769 task_address,
1770 specific_prim_address: prim_cache_address,
1771 clip_task_address,
1772 transform_id,
1773 };
1774
1775 let prim_header_index = prim_headers.push(
1776 &prim_header,
1777 z_id,
1778 batch_params.prim_user_data,
1779 );
1780
1781 self.add_segmented_prim_to_batch(
1782 segments,
1783 prim_common_data.opacity,
1784 &batch_params,
1785 specified_blend_mode,
1786 non_segmented_blend_mode,
1787 prim_header_index,
1788 clip_task_address,
1789 bounding_rect,
1790 transform_kind,
1791 render_tasks,
1792 z_id,
1793 prim_info.clip_task_index,
1794 ctx,
1795 );
1796 }
1797 PrimitiveInstanceKind::Image { data_handle, image_instance_index, .. } => {
1798 let image_data = &ctx.data_stores.image[data_handle].kind;
1799 let common_data = &ctx.data_stores.image[data_handle].common;
1800 let image_instance = &ctx.prim_store.images[image_instance_index];
1801 let opacity_binding = ctx.prim_store.get_opacity_binding(image_instance.opacity_binding_index);
1802 let specified_blend_mode = match image_data.alpha_type {
1803 AlphaType::PremultipliedAlpha => BlendMode::PremultipliedAlpha,
1804 AlphaType::Alpha => BlendMode::Alpha,
1805 };
1806 let request = ImageRequest {
1807 key: image_data.key,
1808 rendering: image_data.image_rendering,
1809 tile: None,
1810 };
1811 let prim_user_data = [
1812 ShaderColorMode::Image as i32 | ((image_data.alpha_type as i32) << 16),
1813 RasterizationSpace::Local as i32,
1814 get_shader_opacity(opacity_binding),
1815 ];
1816
1817 if image_instance.visible_tiles.is_empty() {
1818 let cache_item = match image_data.source {
1819 ImageSource::Default => {
1820 resolve_image(
1821 request,
1822 ctx.resource_cache,
1823 gpu_cache,
1824 deferred_resolves,
1825 )
1826 }
1827 ImageSource::Cache { ref handle, .. } => {
1828 let rt_handle = handle
1829 .as_ref()
1830 .expect("bug: render task handle not allocated");
1831 let rt_cache_entry = ctx.resource_cache
1832 .get_cached_render_task(rt_handle);
1833 ctx.resource_cache.get_texture_cache_item(&rt_cache_entry.handle)
1834 }
1835 };
1836
1837 if cache_item.texture_id == TextureSource::Invalid {
1838 return;
1839 }
1840
1841 let textures = BatchTextures::color(cache_item.texture_id);
1842
1843 let opacity = PrimitiveOpacity::from_alpha(opacity_binding);
1844 let opacity = opacity.combine(common_data.opacity);
1845
1846 let non_segmented_blend_mode = if !opacity.is_opaque ||
1847 prim_info.clip_task_index != ClipTaskIndex::INVALID ||
1848 transform_kind == TransformedRectKind::Complex
1849 {
1850 specified_blend_mode
1851 } else {
1852 BlendMode::None
1853 };
1854
1855 let batch_params = BrushBatchParameters::shared(
1856 BrushBatchKind::Image(get_buffer_kind(cache_item.texture_id)),
1857 textures,
1858 prim_user_data,
1859 cache_item.uv_rect_handle.as_int(gpu_cache),
1860 );
1861
1862 debug_assert_ne!(image_instance.segment_instance_index, SegmentInstanceIndex::INVALID);
1863 let (prim_cache_address, segments) = if image_instance.segment_instance_index == SegmentInstanceIndex::UNUSED {
1864 (gpu_cache.get_address(&common_data.gpu_cache_handle), None)
1865 } else {
1866 let segment_instance = &ctx.scratch.segment_instances[image_instance.segment_instance_index];
1867 let segments = Some(&ctx.scratch.segments[segment_instance.segments_range]);
1868 (gpu_cache.get_address(&segment_instance.gpu_cache_handle), segments)
1869 };
1870
1871 let prim_header = PrimitiveHeader {
1872 local_rect: prim_rect,
1873 local_clip_rect: prim_info.combined_local_clip_rect,
1874 task_address,
1875 specific_prim_address: prim_cache_address,
1876 clip_task_address,
1877 transform_id,
1878 };
1879
1880 let prim_header_index = prim_headers.push(
1881 &prim_header,
1882 z_id,
1883 batch_params.prim_user_data,
1884 );
1885
1886 self.add_segmented_prim_to_batch(
1887 segments,
1888 opacity,
1889 &batch_params,
1890 specified_blend_mode,
1891 non_segmented_blend_mode,
1892 prim_header_index,
1893 clip_task_address,
1894 bounding_rect,
1895 transform_kind,
1896 render_tasks,
1897 z_id,
1898 prim_info.clip_task_index,
1899 ctx,
1900 );
1901 } else {
1902 const VECS_PER_SPECIFIC_BRUSH: usize = 3;
1903 const VECS_PER_SEGMENT: usize = 2;
1904 let max_tiles_per_header = (MAX_VERTEX_TEXTURE_WIDTH - VECS_PER_SPECIFIC_BRUSH) / VECS_PER_SEGMENT;
1905
1906 // use temporary block storage since we don't know the number of visible tiles beforehand
1907 let mut gpu_blocks = Vec::<GpuBlockData>::new();
1908 for chunk in image_instance.visible_tiles.chunks(max_tiles_per_header) {
1909 gpu_blocks.clear();
1910 gpu_blocks.push(PremultipliedColorF::WHITE.into()); //color
1911 gpu_blocks.push(PremultipliedColorF::WHITE.into()); //bg color
1912 gpu_blocks.push([-1.0, 0.0, 0.0, 0.0].into()); //stretch size
1913 // negative first value makes the shader code ignore it and use the local size instead
1914 for tile in chunk {
1915 let tile_rect = tile.local_rect.translate(&-prim_rect.origin.to_vector());
1916 gpu_blocks.push(tile_rect.into());
1917 gpu_blocks.push(GpuBlockData::EMPTY);
1918 }
1919
1920 let gpu_handle = gpu_cache.push_per_frame_blocks(&gpu_blocks);
1921 let prim_header = PrimitiveHeader {
1922 local_rect: prim_rect,
1923 local_clip_rect: image_instance.tight_local_clip_rect,
1924 task_address,
1925 specific_prim_address: gpu_cache.get_address(&gpu_handle),
1926 clip_task_address,
1927 transform_id,
1928 };
1929 let prim_header_index = prim_headers.push(&prim_header, z_id, prim_user_data);
1930
1931 for (i, tile) in chunk.iter().enumerate() {
1932 if let Some((batch_kind, textures, uv_rect_address)) = get_image_tile_params(
1933 ctx.resource_cache,
1934 gpu_cache,
1935 deferred_resolves,
1936 request.with_tile(tile.tile_offset),
1937 ) {
1938 let base_instance = BrushInstance {
1939 prim_header_index,
1940 clip_task_address,
1941 segment_index: i as i32,
1942 edge_flags: tile.edge_flags,
1943 brush_flags: BrushFlags::SEGMENT_RELATIVE | BrushFlags::PERSPECTIVE_INTERPOLATION,
1944 user_data: uv_rect_address.as_int(),
1945 };
1946 let batch_key = BatchKey {
1947 blend_mode: specified_blend_mode,
1948 kind: BatchKind::Brush(batch_kind),
1949 textures,
1950 };
1951 self.current_batch_list().push_single_instance(
1952 batch_key,
1953 bounding_rect,
1954 z_id,
1955 base_instance.into(),
1956 );
1957 }
1958 }
1959 }
1960 }
1961 }
1962 PrimitiveInstanceKind::LinearGradient { data_handle, ref visible_tiles_range, .. } => {
1963 let prim_data = &ctx.data_stores.linear_grad[data_handle];
1964 let specified_blend_mode = BlendMode::PremultipliedAlpha;
1965
1966 let mut prim_header = PrimitiveHeader {
1967 local_rect: prim_rect,
1968 local_clip_rect: prim_info.combined_local_clip_rect,
1969 task_address,
1970 specific_prim_address: GpuCacheAddress::invalid(),
1971 clip_task_address,
1972 transform_id,
1973 };
1974
1975 if visible_tiles_range.is_empty() {
1976 let non_segmented_blend_mode = if !prim_data.opacity.is_opaque ||
1977 prim_info.clip_task_index != ClipTaskIndex::INVALID ||
1978 transform_kind == TransformedRectKind::Complex
1979 {
1980 specified_blend_mode
1981 } else {
1982 BlendMode::None
1983 };
1984
1985 let batch_params = BrushBatchParameters::shared(
1986 BrushBatchKind::LinearGradient,
1987 BatchTextures::no_texture(),
1988 [
1989 prim_data.stops_handle.as_int(gpu_cache),
1990 0,
1991 0,
1992 ],
1993 0,
1994 );
1995
1996 prim_header.specific_prim_address = gpu_cache.get_address(&prim_data.gpu_cache_handle);
1997
1998 let prim_header_index = prim_headers.push(
1999 &prim_header,
2000 z_id,
2001 batch_params.prim_user_data,
2002 );
2003
2004 let segments = if prim_data.brush_segments.is_empty() {
2005 None
2006 } else {
2007 Some(prim_data.brush_segments.as_slice())
2008 };
2009
2010 self.add_segmented_prim_to_batch(
2011 segments,
2012 prim_data.opacity,
2013 &batch_params,
2014 specified_blend_mode,
2015 non_segmented_blend_mode,
2016 prim_header_index,
2017 clip_task_address,
2018 bounding_rect,
2019 transform_kind,
2020 render_tasks,
2021 z_id,
2022 prim_info.clip_task_index,
2023 ctx,
2024 );
2025 } else {
2026 let visible_tiles = &ctx.scratch.gradient_tiles[*visible_tiles_range];
2027
2028 add_gradient_tiles(
2029 visible_tiles,
2030 &prim_data.stops_handle,
2031 BrushBatchKind::LinearGradient,
2032 specified_blend_mode,
2033 bounding_rect,
2034 clip_task_address,
2035 gpu_cache,
2036 self.current_batch_list(),
2037 &prim_header,
2038 prim_headers,
2039 z_id,
2040 );
2041 }
2042 }
2043 PrimitiveInstanceKind::RadialGradient { data_handle, ref visible_tiles_range, .. } => {
2044 let prim_data = &ctx.data_stores.radial_grad[data_handle];
2045 let specified_blend_mode = BlendMode::PremultipliedAlpha;
2046
2047 let mut prim_header = PrimitiveHeader {
2048 local_rect: prim_rect,
2049 local_clip_rect: prim_info.combined_local_clip_rect,
2050 task_address,
2051 specific_prim_address: GpuCacheAddress::invalid(),
2052 clip_task_address,
2053 transform_id,
2054 };
2055
2056 if visible_tiles_range.is_empty() {
2057 let non_segmented_blend_mode = if !prim_data.opacity.is_opaque ||
2058 prim_info.clip_task_index != ClipTaskIndex::INVALID ||
2059 transform_kind == TransformedRectKind::Complex
2060 {
2061 specified_blend_mode
2062 } else {
2063 BlendMode::None
2064 };
2065
2066 let batch_params = BrushBatchParameters::shared(
2067 BrushBatchKind::RadialGradient,
2068 BatchTextures::no_texture(),
2069 [
2070 prim_data.stops_handle.as_int(gpu_cache),
2071 0,
2072 0,
2073 ],
2074 0,
2075 );
2076
2077 prim_header.specific_prim_address = gpu_cache.get_address(&prim_data.gpu_cache_handle);
2078
2079 let prim_header_index = prim_headers.push(
2080 &prim_header,
2081 z_id,
2082 batch_params.prim_user_data,
2083 );
2084
2085 let segments = if prim_data.brush_segments.is_empty() {
2086 None
2087 } else {
2088 Some(prim_data.brush_segments.as_slice())
2089 };
2090
2091 self.add_segmented_prim_to_batch(
2092 segments,
2093 prim_data.opacity,
2094 &batch_params,
2095 specified_blend_mode,
2096 non_segmented_blend_mode,
2097 prim_header_index,
2098 clip_task_address,
2099 bounding_rect,
2100 transform_kind,
2101 render_tasks,
2102 z_id,
2103 prim_info.clip_task_index,
2104 ctx,
2105 );
2106 } else {
2107 let visible_tiles = &ctx.scratch.gradient_tiles[*visible_tiles_range];
2108
2109 add_gradient_tiles(
2110 visible_tiles,
2111 &prim_data.stops_handle,
2112 BrushBatchKind::RadialGradient,
2113 specified_blend_mode,
2114 bounding_rect,
2115 clip_task_address,
2116 gpu_cache,
2117 self.current_batch_list(),
2118 &prim_header,
2119 prim_headers,
2120 z_id,
2121 );
2122 }
2123 }
2124 }
2125 }
2126
2127 /// Add a single segment instance to a batch.
add_segment_to_batch( &mut self, segment: &BrushSegment, segment_data: &SegmentInstanceData, segment_index: i32, batch_kind: BrushBatchKind, prim_header_index: PrimitiveHeaderIndex, alpha_blend_mode: BlendMode, bounding_rect: &PictureRect, transform_kind: TransformedRectKind, render_tasks: &RenderTaskTree, z_id: ZBufferId, prim_opacity: PrimitiveOpacity, clip_task_index: ClipTaskIndex, ctx: &RenderTargetContext, )2128 fn add_segment_to_batch(
2129 &mut self,
2130 segment: &BrushSegment,
2131 segment_data: &SegmentInstanceData,
2132 segment_index: i32,
2133 batch_kind: BrushBatchKind,
2134 prim_header_index: PrimitiveHeaderIndex,
2135 alpha_blend_mode: BlendMode,
2136 bounding_rect: &PictureRect,
2137 transform_kind: TransformedRectKind,
2138 render_tasks: &RenderTaskTree,
2139 z_id: ZBufferId,
2140 prim_opacity: PrimitiveOpacity,
2141 clip_task_index: ClipTaskIndex,
2142 ctx: &RenderTargetContext,
2143 ) {
2144 debug_assert!(clip_task_index != ClipTaskIndex::INVALID);
2145
2146 // Get GPU address of clip task for this segment, or None if
2147 // the entire segment is clipped out.
2148 let clip_task_address = match get_clip_task_address(
2149 &ctx.scratch.clip_mask_instances,
2150 clip_task_index,
2151 segment_index,
2152 render_tasks,
2153 ) {
2154 Some(clip_task_address) => clip_task_address,
2155 None => return,
2156 };
2157
2158 // If a got a valid (or OPAQUE) clip task address, add the segment.
2159 let is_inner = segment.edge_flags.is_empty();
2160 let needs_blending = !prim_opacity.is_opaque ||
2161 clip_task_address != OPAQUE_TASK_ADDRESS ||
2162 (!is_inner && transform_kind == TransformedRectKind::Complex);
2163
2164 let instance = PrimitiveInstanceData::from(BrushInstance {
2165 segment_index,
2166 edge_flags: segment.edge_flags,
2167 clip_task_address,
2168 brush_flags: BrushFlags::PERSPECTIVE_INTERPOLATION | segment.brush_flags,
2169 prim_header_index,
2170 user_data: segment_data.user_data,
2171 });
2172
2173 let batch_key = BatchKey {
2174 blend_mode: if needs_blending { alpha_blend_mode } else { BlendMode::None },
2175 kind: BatchKind::Brush(batch_kind),
2176 textures: segment_data.textures,
2177 };
2178
2179 self.current_batch_list().push_single_instance(
2180 batch_key,
2181 bounding_rect,
2182 z_id,
2183 instance,
2184 );
2185 }
2186
2187 /// Add any segment(s) from a brush to batches.
add_segmented_prim_to_batch( &mut self, brush_segments: Option<&[BrushSegment]>, prim_opacity: PrimitiveOpacity, params: &BrushBatchParameters, alpha_blend_mode: BlendMode, non_segmented_blend_mode: BlendMode, prim_header_index: PrimitiveHeaderIndex, clip_task_address: RenderTaskAddress, bounding_rect: &PictureRect, transform_kind: TransformedRectKind, render_tasks: &RenderTaskTree, z_id: ZBufferId, clip_task_index: ClipTaskIndex, ctx: &RenderTargetContext, )2188 fn add_segmented_prim_to_batch(
2189 &mut self,
2190 brush_segments: Option<&[BrushSegment]>,
2191 prim_opacity: PrimitiveOpacity,
2192 params: &BrushBatchParameters,
2193 alpha_blend_mode: BlendMode,
2194 non_segmented_blend_mode: BlendMode,
2195 prim_header_index: PrimitiveHeaderIndex,
2196 clip_task_address: RenderTaskAddress,
2197 bounding_rect: &PictureRect,
2198 transform_kind: TransformedRectKind,
2199 render_tasks: &RenderTaskTree,
2200 z_id: ZBufferId,
2201 clip_task_index: ClipTaskIndex,
2202 ctx: &RenderTargetContext,
2203 ) {
2204 match (brush_segments, ¶ms.segment_data) {
2205 (Some(ref brush_segments), SegmentDataKind::Instanced(ref segment_data)) => {
2206 // In this case, we have both a list of segments, and a list of
2207 // per-segment instance data. Zip them together to build batches.
2208 debug_assert_eq!(brush_segments.len(), segment_data.len());
2209 for (segment_index, (segment, segment_data)) in brush_segments
2210 .iter()
2211 .zip(segment_data.iter())
2212 .enumerate()
2213 {
2214 self.add_segment_to_batch(
2215 segment,
2216 segment_data,
2217 segment_index as i32,
2218 params.batch_kind,
2219 prim_header_index,
2220 alpha_blend_mode,
2221 bounding_rect,
2222 transform_kind,
2223 render_tasks,
2224 z_id,
2225 prim_opacity,
2226 clip_task_index,
2227 ctx,
2228 );
2229 }
2230 }
2231 (Some(ref brush_segments), SegmentDataKind::Shared(ref segment_data)) => {
2232 // A list of segments, but the per-segment data is common
2233 // between all segments.
2234 for (segment_index, segment) in brush_segments
2235 .iter()
2236 .enumerate()
2237 {
2238 self.add_segment_to_batch(
2239 segment,
2240 segment_data,
2241 segment_index as i32,
2242 params.batch_kind,
2243 prim_header_index,
2244 alpha_blend_mode,
2245 bounding_rect,
2246 transform_kind,
2247 render_tasks,
2248 z_id,
2249 prim_opacity,
2250 clip_task_index,
2251 ctx,
2252 );
2253 }
2254 }
2255 (None, SegmentDataKind::Shared(ref segment_data)) => {
2256 // No segments, and thus no per-segment instance data.
2257 // Note: the blend mode already takes opacity into account
2258 let batch_key = BatchKey {
2259 blend_mode: non_segmented_blend_mode,
2260 kind: BatchKind::Brush(params.batch_kind),
2261 textures: segment_data.textures,
2262 };
2263 let instance = PrimitiveInstanceData::from(BrushInstance {
2264 segment_index: INVALID_SEGMENT_INDEX,
2265 edge_flags: EdgeAaSegmentMask::all(),
2266 clip_task_address,
2267 brush_flags: BrushFlags::PERSPECTIVE_INTERPOLATION,
2268 prim_header_index,
2269 user_data: segment_data.user_data,
2270 });
2271 self.current_batch_list().push_single_instance(
2272 batch_key,
2273 bounding_rect,
2274 z_id,
2275 PrimitiveInstanceData::from(instance),
2276 );
2277 }
2278 (None, SegmentDataKind::Instanced(..)) => {
2279 // We should never hit the case where there are no segments,
2280 // but a list of segment instance data.
2281 unreachable!();
2282 }
2283 }
2284 }
2285 }
2286
add_gradient_tiles( visible_tiles: &[VisibleGradientTile], stops_handle: &GpuCacheHandle, kind: BrushBatchKind, blend_mode: BlendMode, bounding_rect: &PictureRect, clip_task_address: RenderTaskAddress, gpu_cache: &GpuCache, batch_list: &mut BatchList, base_prim_header: &PrimitiveHeader, prim_headers: &mut PrimitiveHeaders, z_id: ZBufferId, )2287 fn add_gradient_tiles(
2288 visible_tiles: &[VisibleGradientTile],
2289 stops_handle: &GpuCacheHandle,
2290 kind: BrushBatchKind,
2291 blend_mode: BlendMode,
2292 bounding_rect: &PictureRect,
2293 clip_task_address: RenderTaskAddress,
2294 gpu_cache: &GpuCache,
2295 batch_list: &mut BatchList,
2296 base_prim_header: &PrimitiveHeader,
2297 prim_headers: &mut PrimitiveHeaders,
2298 z_id: ZBufferId,
2299 ) {
2300 let batch = batch_list.set_params_and_get_batch(
2301 BatchKey {
2302 blend_mode: blend_mode,
2303 kind: BatchKind::Brush(kind),
2304 textures: BatchTextures::no_texture(),
2305 },
2306 bounding_rect,
2307 z_id,
2308 );
2309
2310 let user_data = [stops_handle.as_int(gpu_cache), 0, 0];
2311
2312 for tile in visible_tiles {
2313 let prim_header = PrimitiveHeader {
2314 specific_prim_address: gpu_cache.get_address(&tile.handle),
2315 local_rect: tile.local_rect,
2316 local_clip_rect: tile.local_clip_rect,
2317 ..*base_prim_header
2318 };
2319 let prim_header_index = prim_headers.push(&prim_header, z_id, user_data);
2320
2321 batch.push(PrimitiveInstanceData::from(
2322 BrushInstance {
2323 prim_header_index,
2324 clip_task_address,
2325 segment_index: INVALID_SEGMENT_INDEX,
2326 edge_flags: EdgeAaSegmentMask::all(),
2327 brush_flags: BrushFlags::PERSPECTIVE_INTERPOLATION,
2328 user_data: 0,
2329 }
2330 ));
2331 }
2332 }
2333
get_image_tile_params( resource_cache: &ResourceCache, gpu_cache: &mut GpuCache, deferred_resolves: &mut Vec<DeferredResolve>, request: ImageRequest, ) -> Option<(BrushBatchKind, BatchTextures, GpuCacheAddress)>2334 fn get_image_tile_params(
2335 resource_cache: &ResourceCache,
2336 gpu_cache: &mut GpuCache,
2337 deferred_resolves: &mut Vec<DeferredResolve>,
2338 request: ImageRequest,
2339 ) -> Option<(BrushBatchKind, BatchTextures, GpuCacheAddress)> {
2340
2341 let cache_item = resolve_image(
2342 request,
2343 resource_cache,
2344 gpu_cache,
2345 deferred_resolves,
2346 );
2347
2348 if cache_item.texture_id == TextureSource::Invalid {
2349 None
2350 } else {
2351 let textures = BatchTextures::color(cache_item.texture_id);
2352 Some((
2353 BrushBatchKind::Image(get_buffer_kind(cache_item.texture_id)),
2354 textures,
2355 gpu_cache.get_address(&cache_item.uv_rect_handle),
2356 ))
2357 }
2358 }
2359
2360 /// Either a single texture / user data for all segments,
2361 /// or a list of one per segment.
2362 enum SegmentDataKind {
2363 Shared(SegmentInstanceData),
2364 Instanced(SmallVec<[SegmentInstanceData; 8]>),
2365 }
2366
2367 /// The parameters that are specific to a kind of brush,
2368 /// used by the common method to add a brush to batches.
2369 struct BrushBatchParameters {
2370 batch_kind: BrushBatchKind,
2371 prim_user_data: [i32; 3],
2372 segment_data: SegmentDataKind,
2373 }
2374
2375 impl BrushBatchParameters {
2376 /// This brush instance has a list of per-segment
2377 /// instance data.
instanced( batch_kind: BrushBatchKind, prim_user_data: [i32; 3], segment_data: SmallVec<[SegmentInstanceData; 8]>, ) -> Self2378 fn instanced(
2379 batch_kind: BrushBatchKind,
2380 prim_user_data: [i32; 3],
2381 segment_data: SmallVec<[SegmentInstanceData; 8]>,
2382 ) -> Self {
2383 BrushBatchParameters {
2384 batch_kind,
2385 prim_user_data,
2386 segment_data: SegmentDataKind::Instanced(segment_data),
2387 }
2388 }
2389
2390 /// This brush instance shares the per-segment data
2391 /// across all segments.
shared( batch_kind: BrushBatchKind, textures: BatchTextures, prim_user_data: [i32; 3], segment_user_data: i32, ) -> Self2392 fn shared(
2393 batch_kind: BrushBatchKind,
2394 textures: BatchTextures,
2395 prim_user_data: [i32; 3],
2396 segment_user_data: i32,
2397 ) -> Self {
2398 BrushBatchParameters {
2399 batch_kind,
2400 prim_user_data,
2401 segment_data: SegmentDataKind::Shared(
2402 SegmentInstanceData {
2403 textures,
2404 user_data: segment_user_data,
2405 }
2406 ),
2407 }
2408 }
2409 }
2410
2411 impl PrimitiveInstance {
is_cacheable( &self, data_stores: &DataStores, resource_cache: &ResourceCache, ) -> bool2412 pub fn is_cacheable(
2413 &self,
2414 data_stores: &DataStores,
2415 resource_cache: &ResourceCache,
2416 ) -> bool {
2417 let image_key = match self.kind {
2418 PrimitiveInstanceKind::Image { data_handle, .. } => {
2419 let image_data = &data_stores.image[data_handle].kind;
2420 image_data.key
2421 }
2422 PrimitiveInstanceKind::YuvImage { data_handle, .. } => {
2423 let yuv_image_data =
2424 &data_stores.yuv_image[data_handle].kind;
2425 yuv_image_data.yuv_key[0]
2426 }
2427 PrimitiveInstanceKind::Picture { .. } |
2428 PrimitiveInstanceKind::TextRun { .. } |
2429 PrimitiveInstanceKind::LineDecoration { .. } |
2430 PrimitiveInstanceKind::NormalBorder { .. } |
2431 PrimitiveInstanceKind::ImageBorder { .. } |
2432 PrimitiveInstanceKind::Rectangle { .. } |
2433 PrimitiveInstanceKind::LinearGradient { .. } |
2434 PrimitiveInstanceKind::RadialGradient { .. } |
2435 PrimitiveInstanceKind::Clear { .. } => {
2436 return true;
2437 }
2438 };
2439 match resource_cache.get_image_properties(image_key) {
2440 Some(ImageProperties { external_image: Some(_), .. }) => {
2441 false
2442 }
2443 _ => true
2444 }
2445 }
2446 }
2447
2448 impl PictureSurface {
2449 // Retrieve the uv rect handle, and texture for a picture surface.
resolve( &self, render_tasks: &RenderTaskTree, resource_cache: &ResourceCache, gpu_cache: &GpuCache, ) -> (GpuCacheAddress, BatchTextures)2450 fn resolve(
2451 &self,
2452 render_tasks: &RenderTaskTree,
2453 resource_cache: &ResourceCache,
2454 gpu_cache: &GpuCache,
2455 ) -> (GpuCacheAddress, BatchTextures) {
2456 match *self {
2457 PictureSurface::TextureCache(ref handle) => {
2458 let rt_cache_entry = resource_cache
2459 .get_cached_render_task(handle);
2460 let cache_item = resource_cache
2461 .get_texture_cache_item(&rt_cache_entry.handle);
2462
2463 (
2464 gpu_cache.get_address(&cache_item.uv_rect_handle),
2465 BatchTextures::color(cache_item.texture_id),
2466 )
2467 }
2468 PictureSurface::RenderTask(task_id) => {
2469 (
2470 render_tasks[task_id].get_texture_address(gpu_cache),
2471 BatchTextures::render_target_cache(),
2472 )
2473 }
2474 }
2475 }
2476
2477 // Retrieve the render task id for a picture surface. Should only
2478 // be used where it's known that this picture surface will never
2479 // be persisted in the texture cache.
resolve_render_task_id(&self) -> RenderTaskId2480 fn resolve_render_task_id(&self) -> RenderTaskId {
2481 match *self {
2482 PictureSurface::TextureCache(..) => {
2483 panic!("BUG: unexpectedly cached render task");
2484 }
2485 PictureSurface::RenderTask(task_id) => {
2486 task_id
2487 }
2488 }
2489 }
2490 }
2491
resolve_image( request: ImageRequest, resource_cache: &ResourceCache, gpu_cache: &mut GpuCache, deferred_resolves: &mut Vec<DeferredResolve>, ) -> CacheItem2492 pub fn resolve_image(
2493 request: ImageRequest,
2494 resource_cache: &ResourceCache,
2495 gpu_cache: &mut GpuCache,
2496 deferred_resolves: &mut Vec<DeferredResolve>,
2497 ) -> CacheItem {
2498 match resource_cache.get_image_properties(request.key) {
2499 Some(image_properties) => {
2500 // Check if an external image that needs to be resolved
2501 // by the render thread.
2502 match image_properties.external_image {
2503 Some(external_image) => {
2504 // This is an external texture - we will add it to
2505 // the deferred resolves list to be patched by
2506 // the render thread...
2507 let cache_handle = gpu_cache.push_deferred_per_frame_blocks(BLOCKS_PER_UV_RECT);
2508 let cache_item = CacheItem {
2509 texture_id: TextureSource::External(external_image),
2510 uv_rect_handle: cache_handle,
2511 uv_rect: DeviceIntRect::new(
2512 DeviceIntPoint::zero(),
2513 image_properties.descriptor.size,
2514 ),
2515 texture_layer: 0,
2516 };
2517
2518 deferred_resolves.push(DeferredResolve {
2519 image_properties,
2520 address: gpu_cache.get_address(&cache_handle),
2521 rendering: request.rendering,
2522 });
2523
2524 cache_item
2525 }
2526 None => {
2527 if let Ok(cache_item) = resource_cache.get_cached_image(request) {
2528 cache_item
2529 } else {
2530 // There is no usable texture entry for the image key. Just return an invalid texture here.
2531 CacheItem::invalid()
2532 }
2533 }
2534 }
2535 }
2536 None => {
2537 CacheItem::invalid()
2538 }
2539 }
2540 }
2541
2542
2543 /// Batcher managing draw calls into the clip mask (in the RT cache).
2544 #[derive(Debug)]
2545 #[cfg_attr(feature = "capture", derive(Serialize))]
2546 #[cfg_attr(feature = "replay", derive(Deserialize))]
2547 pub struct ClipBatcher {
2548 /// Rectangle draws fill up the rectangles with rounded corners.
2549 pub rectangles: Vec<ClipMaskInstance>,
2550 /// Image draws apply the image masking.
2551 pub images: FastHashMap<TextureSource, Vec<ClipMaskInstance>>,
2552 pub box_shadows: FastHashMap<TextureSource, Vec<ClipMaskInstance>>,
2553 }
2554
2555 impl ClipBatcher {
new() -> Self2556 pub fn new() -> Self {
2557 ClipBatcher {
2558 rectangles: Vec::new(),
2559 images: FastHashMap::default(),
2560 box_shadows: FastHashMap::default(),
2561 }
2562 }
2563
add_clip_region( &mut self, task_address: RenderTaskAddress, clip_data_address: GpuCacheAddress, local_pos: LayoutPoint, sub_rect: DeviceRect, )2564 pub fn add_clip_region(
2565 &mut self,
2566 task_address: RenderTaskAddress,
2567 clip_data_address: GpuCacheAddress,
2568 local_pos: LayoutPoint,
2569 sub_rect: DeviceRect,
2570 ) {
2571 let instance = ClipMaskInstance {
2572 render_task_address: task_address,
2573 clip_transform_id: TransformPaletteId::IDENTITY,
2574 prim_transform_id: TransformPaletteId::IDENTITY,
2575 clip_data_address,
2576 resource_address: GpuCacheAddress::invalid(),
2577 local_pos,
2578 tile_rect: LayoutRect::zero(),
2579 sub_rect,
2580 snap_offsets: SnapOffsets::empty(),
2581 };
2582
2583 self.rectangles.push(instance);
2584 }
2585
2586 /// Where appropriate, draw a clip rectangle as a small series of tiles,
2587 /// instead of one large rectangle.
add_tiled_clip_mask( &mut self, mask_screen_rect: DeviceIntRect, clip_rect_size: LayoutSize, clip_instance: &ClipNodeInstance, clip_scroll_tree: &ClipScrollTree, world_rect: &WorldRect, device_pixel_scale: DevicePixelScale, gpu_address: GpuCacheAddress, instance: &ClipMaskInstance, ) -> bool2588 fn add_tiled_clip_mask(
2589 &mut self,
2590 mask_screen_rect: DeviceIntRect,
2591 clip_rect_size: LayoutSize,
2592 clip_instance: &ClipNodeInstance,
2593 clip_scroll_tree: &ClipScrollTree,
2594 world_rect: &WorldRect,
2595 device_pixel_scale: DevicePixelScale,
2596 gpu_address: GpuCacheAddress,
2597 instance: &ClipMaskInstance,
2598 ) -> bool {
2599 // Only try to draw in tiles if the clip mark is big enough.
2600 if mask_screen_rect.area() < CLIP_RECTANGLE_AREA_THRESHOLD {
2601 return false;
2602 }
2603
2604 let clip_spatial_node = &clip_scroll_tree
2605 .spatial_nodes[clip_instance.spatial_node_index.0 as usize];
2606
2607 // Only support clips that are axis-aligned to the root coordinate space,
2608 // for now, to simplify the logic below. This handles the vast majority
2609 // of real world cases, but could be expanded in future if needed.
2610 if clip_spatial_node.coordinate_system_id != CoordinateSystemId::root() {
2611 return false;
2612 }
2613
2614 // Get the world rect of the clip rectangle. If we can't transform it due
2615 // to the matrix, just fall back to drawing the entire clip mask.
2616 let local_clip_rect = LayoutRect::new(
2617 clip_instance.local_pos,
2618 clip_rect_size,
2619 );
2620 let world_clip_rect = match project_rect(
2621 &clip_spatial_node.world_content_transform.to_transform(),
2622 &local_clip_rect,
2623 world_rect,
2624 ) {
2625 Some(rect) => rect,
2626 None => return false,
2627 };
2628
2629 // Work out how many tiles to draw this clip mask in, stretched across the
2630 // device rect of the primitive clip mask.
2631 let world_device_rect = world_clip_rect * device_pixel_scale;
2632 let x_tiles = (mask_screen_rect.size.width + CLIP_RECTANGLE_TILE_SIZE-1) / CLIP_RECTANGLE_TILE_SIZE;
2633 let y_tiles = (mask_screen_rect.size.height + CLIP_RECTANGLE_TILE_SIZE-1) / CLIP_RECTANGLE_TILE_SIZE;
2634
2635 // Because we only run this code path for axis-aligned rects (the root coord system check above),
2636 // and only for rectangles (not rounded etc), the world_device_rect is not conservative - we know
2637 // that there is no inner_rect, and the world_device_rect should be the real, axis-aligned clip rect.
2638 let mask_origin = mask_screen_rect.origin.to_f32().to_vector();
2639
2640 for y in 0 .. y_tiles {
2641 for x in 0 .. x_tiles {
2642 let p0 = DeviceIntPoint::new(
2643 x * CLIP_RECTANGLE_TILE_SIZE,
2644 y * CLIP_RECTANGLE_TILE_SIZE,
2645 );
2646 let p1 = DeviceIntPoint::new(
2647 (p0.x + CLIP_RECTANGLE_TILE_SIZE).min(mask_screen_rect.size.width),
2648 (p0.y + CLIP_RECTANGLE_TILE_SIZE).min(mask_screen_rect.size.height),
2649 );
2650 let normalized_sub_rect = DeviceIntRect::new(
2651 p0,
2652 DeviceIntSize::new(
2653 p1.x - p0.x,
2654 p1.y - p0.y,
2655 ),
2656 ).to_f32();
2657 let world_sub_rect = normalized_sub_rect.translate(&mask_origin);
2658
2659 // If the clip rect completely contains this tile rect, then drawing
2660 // these pixels would be redundant - since this clip can't possibly
2661 // affect the pixels in this tile, skip them!
2662 if !world_device_rect.contains_rect(&world_sub_rect) {
2663 self.rectangles.push(ClipMaskInstance {
2664 clip_data_address: gpu_address,
2665 sub_rect: normalized_sub_rect,
2666 ..*instance
2667 });
2668 }
2669 }
2670 }
2671
2672 true
2673 }
2674
add( &mut self, task_address: RenderTaskAddress, clip_node_range: ClipNodeRange, root_spatial_node_index: SpatialNodeIndex, resource_cache: &ResourceCache, gpu_cache: &GpuCache, clip_store: &ClipStore, clip_scroll_tree: &ClipScrollTree, transforms: &mut TransformPalette, clip_data_store: &ClipDataStore, actual_rect: DeviceIntRect, world_rect: &WorldRect, device_pixel_scale: DevicePixelScale, snap_offsets: SnapOffsets, )2675 pub fn add(
2676 &mut self,
2677 task_address: RenderTaskAddress,
2678 clip_node_range: ClipNodeRange,
2679 root_spatial_node_index: SpatialNodeIndex,
2680 resource_cache: &ResourceCache,
2681 gpu_cache: &GpuCache,
2682 clip_store: &ClipStore,
2683 clip_scroll_tree: &ClipScrollTree,
2684 transforms: &mut TransformPalette,
2685 clip_data_store: &ClipDataStore,
2686 actual_rect: DeviceIntRect,
2687 world_rect: &WorldRect,
2688 device_pixel_scale: DevicePixelScale,
2689 snap_offsets: SnapOffsets,
2690 ) {
2691 for i in 0 .. clip_node_range.count {
2692 let clip_instance = clip_store.get_instance_from_range(&clip_node_range, i);
2693 let clip_node = &clip_data_store[clip_instance.handle];
2694
2695 let clip_transform_id = transforms.get_id(
2696 clip_instance.spatial_node_index,
2697 ROOT_SPATIAL_NODE_INDEX,
2698 clip_scroll_tree,
2699 );
2700
2701 let prim_transform_id = transforms.get_id(
2702 root_spatial_node_index,
2703 ROOT_SPATIAL_NODE_INDEX,
2704 clip_scroll_tree,
2705 );
2706
2707 let instance = ClipMaskInstance {
2708 render_task_address: task_address,
2709 clip_transform_id,
2710 prim_transform_id,
2711 clip_data_address: GpuCacheAddress::invalid(),
2712 resource_address: GpuCacheAddress::invalid(),
2713 local_pos: clip_instance.local_pos,
2714 tile_rect: LayoutRect::zero(),
2715 sub_rect: DeviceRect::new(
2716 DevicePoint::zero(),
2717 actual_rect.size.to_f32(),
2718 ),
2719 snap_offsets,
2720 };
2721
2722 match clip_node.item {
2723 ClipItem::Image { image, size, .. } => {
2724 let request = ImageRequest {
2725 key: image,
2726 rendering: ImageRendering::Auto,
2727 tile: None,
2728 };
2729
2730 let clip_data_address =
2731 gpu_cache.get_address(&clip_node.gpu_cache_handle);
2732
2733 let mut add_image = |request: ImageRequest, local_tile_rect: LayoutRect| {
2734 let cache_item = match resource_cache.get_cached_image(request) {
2735 Ok(item) => item,
2736 Err(..) => {
2737 warn!("Warnings: skip a image mask");
2738 debug!("request: {:?}", request);
2739 return;
2740 }
2741 };
2742 self.images
2743 .entry(cache_item.texture_id)
2744 .or_insert(Vec::new())
2745 .push(ClipMaskInstance {
2746 clip_data_address,
2747 resource_address: gpu_cache.get_address(&cache_item.uv_rect_handle),
2748 tile_rect: local_tile_rect,
2749 ..instance
2750 });
2751 };
2752
2753 match clip_instance.visible_tiles {
2754 Some(ref tiles) => {
2755 for tile in tiles {
2756 add_image(
2757 request.with_tile(tile.tile_offset),
2758 tile.tile_rect,
2759 )
2760 }
2761 }
2762 None => {
2763 let mask_rect = LayoutRect::new(clip_instance.local_pos, size);
2764 add_image(request, mask_rect)
2765 }
2766 }
2767 }
2768 ClipItem::BoxShadow(ref info) => {
2769 let gpu_address =
2770 gpu_cache.get_address(&clip_node.gpu_cache_handle);
2771 let rt_handle = info
2772 .cache_handle
2773 .as_ref()
2774 .expect("bug: render task handle not allocated");
2775 let rt_cache_entry = resource_cache
2776 .get_cached_render_task(rt_handle);
2777 let cache_item = resource_cache
2778 .get_texture_cache_item(&rt_cache_entry.handle);
2779 debug_assert_ne!(cache_item.texture_id, TextureSource::Invalid);
2780
2781 self.box_shadows
2782 .entry(cache_item.texture_id)
2783 .or_insert(Vec::new())
2784 .push(ClipMaskInstance {
2785 clip_data_address: gpu_address,
2786 resource_address: gpu_cache.get_address(&cache_item.uv_rect_handle),
2787 ..instance
2788 });
2789 }
2790 ClipItem::Rectangle(_, ClipMode::ClipOut) => {
2791 let gpu_address =
2792 gpu_cache.get_address(&clip_node.gpu_cache_handle);
2793 self.rectangles.push(ClipMaskInstance {
2794 clip_data_address: gpu_address,
2795 ..instance
2796 });
2797 }
2798 ClipItem::Rectangle(clip_rect_size, ClipMode::Clip) => {
2799 if !clip_instance.flags.contains(ClipNodeFlags::SAME_COORD_SYSTEM) {
2800 let gpu_address = gpu_cache.get_address(&clip_node.gpu_cache_handle);
2801
2802 if !self.add_tiled_clip_mask(
2803 actual_rect,
2804 clip_rect_size,
2805 clip_instance,
2806 clip_scroll_tree,
2807 world_rect,
2808 device_pixel_scale,
2809 gpu_address,
2810 &instance,
2811 ) {
2812 self.rectangles.push(ClipMaskInstance {
2813 clip_data_address: gpu_address,
2814 ..instance
2815 });
2816 }
2817 }
2818 }
2819 ClipItem::RoundedRectangle(..) => {
2820 let gpu_address =
2821 gpu_cache.get_address(&clip_node.gpu_cache_handle);
2822 self.rectangles.push(ClipMaskInstance {
2823 clip_data_address: gpu_address,
2824 ..instance
2825 });
2826 }
2827 }
2828 }
2829 }
2830 }
2831
get_buffer_kind(texture: TextureSource) -> ImageBufferKind2832 fn get_buffer_kind(texture: TextureSource) -> ImageBufferKind {
2833 match texture {
2834 TextureSource::External(ext_image) => {
2835 match ext_image.image_type {
2836 ExternalImageType::TextureHandle(target) => {
2837 target.into()
2838 }
2839 ExternalImageType::Buffer => {
2840 // The ExternalImageType::Buffer should be handled by resource_cache.
2841 // It should go through the non-external case.
2842 panic!("Unexpected non-texture handle type");
2843 }
2844 }
2845 }
2846 _ => ImageBufferKind::Texture2DArray,
2847 }
2848 }
2849
get_shader_opacity(opacity: f32) -> i322850 fn get_shader_opacity(opacity: f32) -> i32 {
2851 (opacity * 65535.0).round() as i32
2852 }
2853
2854 /// Retrieve the GPU task address for a given clip task instance.
2855 /// Returns None if the segment was completely clipped out.
2856 /// Returns Some(OPAQUE_TASK_ADDRESS) if no clip mask is needed.
2857 /// Returns Some(task_address) if there was a valid clip mask.
get_clip_task_address( clip_mask_instances: &[ClipMaskKind], clip_task_index: ClipTaskIndex, offset: i32, render_tasks: &RenderTaskTree, ) -> Option<RenderTaskAddress>2858 fn get_clip_task_address(
2859 clip_mask_instances: &[ClipMaskKind],
2860 clip_task_index: ClipTaskIndex,
2861 offset: i32,
2862 render_tasks: &RenderTaskTree,
2863 ) -> Option<RenderTaskAddress> {
2864 let address = match clip_mask_instances[clip_task_index.0 as usize + offset as usize] {
2865 ClipMaskKind::Mask(task_id) => {
2866 render_tasks.get_task_address(task_id)
2867 }
2868 ClipMaskKind::None => {
2869 OPAQUE_TASK_ADDRESS
2870 }
2871 ClipMaskKind::Clipped => {
2872 return None;
2873 }
2874 };
2875
2876 Some(address)
2877 }
2878