1 /* This Source Code Form is subject to the terms of the Mozilla Public 2 * License, v. 2.0. If a copy of the MPL was not distributed with this 3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 4 5 use api::{ColorF, BorderStyle, DeviceIntPoint, DeviceIntRect, DeviceIntSize, DevicePixelScale}; 6 use api::{DocumentLayer, FilterOp, ImageFormat, DevicePoint}; 7 use api::{MixBlendMode, PipelineId, DeviceRect, LayoutSize, WorldRect}; 8 use batch::{AlphaBatchBuilder, AlphaBatchContainer, ClipBatcher, resolve_image}; 9 use clip::ClipStore; 10 use clip_scroll_tree::{ClipScrollTree}; 11 use debug_render::DebugItem; 12 use device::{Texture}; 13 #[cfg(feature = "pathfinder")] 14 use euclid::{TypedPoint2D, TypedVector2D}; 15 use frame_builder::FrameGlobalResources; 16 use gpu_cache::{GpuCache}; 17 use gpu_types::{BorderInstance, BlurDirection, BlurInstance, PrimitiveHeaders, ScalingInstance}; 18 use gpu_types::{TransformData, TransformPalette, ZBufferIdGenerator}; 19 use internal_types::{CacheTextureId, FastHashMap, SavedTargetIndex, TextureSource}; 20 #[cfg(feature = "pathfinder")] 21 use pathfinder_partitioner::mesh::Mesh; 22 use picture::{RecordedDirtyRegion, SurfaceInfo}; 23 use prim_store::{PrimitiveStore, DeferredResolve, PrimitiveScratchBuffer}; 24 use profiler::FrameProfileCounters; 25 use render_backend::{DataStores, FrameId}; 26 use render_task::{BlitSource, RenderTaskAddress, RenderTaskId, RenderTaskKind}; 27 use render_task::{BlurTask, ClearMode, GlyphTask, RenderTaskLocation, RenderTaskTree, ScalingTask}; 28 use resource_cache::ResourceCache; 29 use std::{cmp, usize, f32, i32, mem}; 30 use texture_allocator::{ArrayAllocationTracker, FreeRectSlice}; 31 #[cfg(feature = "pathfinder")] 32 use webrender_api::{DevicePixel, FontRenderMode}; 33 34 const STYLE_SOLID: i32 = ((BorderStyle::Solid as i32) << 8) | ((BorderStyle::Solid as i32) << 16); 35 const STYLE_MASK: i32 = 0x00FF_FF00; 36 37 /// According to apitrace, textures larger than 2048 break fast clear 38 /// optimizations on some intel drivers. We sometimes need to go larger, but 39 /// we try to avoid it. This can go away when proper tiling support lands, 40 /// since we can then split large primitives across multiple textures. 41 const IDEAL_MAX_TEXTURE_DIMENSION: i32 = 2048; 42 /// If we ever need a larger texture than the ideal, we better round it up to a 43 /// reasonable number in order to have a bit of leeway in placing things inside. 44 const TEXTURE_DIMENSION_MASK: i32 = 0xFF; 45 46 /// Identifies a given `RenderTarget` in a `RenderTargetList`. 47 #[derive(Debug, Copy, Clone)] 48 #[cfg_attr(feature = "capture", derive(Serialize))] 49 #[cfg_attr(feature = "replay", derive(Deserialize))] 50 pub struct RenderTargetIndex(pub usize); 51 52 pub struct RenderTargetContext<'a, 'rc> { 53 pub device_pixel_scale: DevicePixelScale, 54 pub prim_store: &'a PrimitiveStore, 55 pub resource_cache: &'rc mut ResourceCache, 56 pub use_dual_source_blending: bool, 57 pub clip_scroll_tree: &'a ClipScrollTree, 58 pub data_stores: &'a DataStores, 59 pub surfaces: &'a [SurfaceInfo], 60 pub scratch: &'a PrimitiveScratchBuffer, 61 pub screen_world_rect: WorldRect, 62 pub globals: &'a FrameGlobalResources, 63 } 64 65 /// Represents a number of rendering operations on a surface. 66 /// 67 /// In graphics parlance, a "render target" usually means "a surface (texture or 68 /// framebuffer) bound to the output of a shader". This trait has a slightly 69 /// different meaning, in that it represents the operations on that surface 70 /// _before_ it's actually bound and rendered. So a `RenderTarget` is built by 71 /// the `RenderBackend` by inserting tasks, and then shipped over to the 72 /// `Renderer` where a device surface is resolved and the tasks are transformed 73 /// into draw commands on that surface. 74 /// 75 /// We express this as a trait to generalize over color and alpha surfaces. 76 /// a given `RenderTask` will draw to one or the other, depending on its type 77 /// and sometimes on its parameters. See `RenderTask::target_kind`. 78 pub trait RenderTarget { 79 /// Creates a new RenderTarget of the given type. new(screen_size: DeviceIntSize) -> Self80 fn new(screen_size: DeviceIntSize) -> Self; 81 82 /// Optional hook to provide additional processing for the target at the 83 /// end of the build phase. build( &mut self, _ctx: &mut RenderTargetContext, _gpu_cache: &mut GpuCache, _render_tasks: &mut RenderTaskTree, _deferred_resolves: &mut Vec<DeferredResolve>, _prim_headers: &mut PrimitiveHeaders, _transforms: &mut TransformPalette, _z_generator: &mut ZBufferIdGenerator, )84 fn build( 85 &mut self, 86 _ctx: &mut RenderTargetContext, 87 _gpu_cache: &mut GpuCache, 88 _render_tasks: &mut RenderTaskTree, 89 _deferred_resolves: &mut Vec<DeferredResolve>, 90 _prim_headers: &mut PrimitiveHeaders, 91 _transforms: &mut TransformPalette, 92 _z_generator: &mut ZBufferIdGenerator, 93 ) { 94 } 95 96 /// Associates a `RenderTask` with this target. That task must be assigned 97 /// to a region returned by invoking `allocate()` on this target. 98 /// 99 /// TODO(gw): It's a bit odd that we need the deferred resolves and mutable 100 /// GPU cache here. They are typically used by the build step above. They 101 /// are used for the blit jobs to allow resolve_image to be called. It's a 102 /// bit of extra overhead to store the image key here and the resolve them 103 /// in the build step separately. BUT: if/when we add more texture cache 104 /// target jobs, we might want to tidy this up. add_task( &mut self, task_id: RenderTaskId, ctx: &RenderTargetContext, gpu_cache: &mut GpuCache, render_tasks: &RenderTaskTree, clip_store: &ClipStore, transforms: &mut TransformPalette, deferred_resolves: &mut Vec<DeferredResolve>, )105 fn add_task( 106 &mut self, 107 task_id: RenderTaskId, 108 ctx: &RenderTargetContext, 109 gpu_cache: &mut GpuCache, 110 render_tasks: &RenderTaskTree, 111 clip_store: &ClipStore, 112 transforms: &mut TransformPalette, 113 deferred_resolves: &mut Vec<DeferredResolve>, 114 ); 115 needs_depth(&self) -> bool116 fn needs_depth(&self) -> bool; must_be_drawn(&self) -> bool117 fn must_be_drawn(&self) -> bool; 118 used_rect(&self) -> DeviceIntRect119 fn used_rect(&self) -> DeviceIntRect; add_used(&mut self, rect: DeviceIntRect)120 fn add_used(&mut self, rect: DeviceIntRect); 121 } 122 123 /// A tag used to identify the output format of a `RenderTarget`. 124 #[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)] 125 #[cfg_attr(feature = "capture", derive(Serialize))] 126 #[cfg_attr(feature = "replay", derive(Deserialize))] 127 pub enum RenderTargetKind { 128 Color, // RGBA8 129 Alpha, // R8 130 } 131 132 /// A series of `RenderTarget` instances, serving as the high-level container 133 /// into which `RenderTasks` are assigned. 134 /// 135 /// During the build phase, we iterate over the tasks in each `RenderPass`. For 136 /// each task, we invoke `allocate()` on the `RenderTargetList`, which in turn 137 /// attempts to allocate an output region in the last `RenderTarget` in the 138 /// list. If allocation fails (or if the list is empty), a new `RenderTarget` is 139 /// created and appended to the list. The build phase then assign the task into 140 /// the target associated with the final allocation. 141 /// 142 /// The result is that each `RenderPass` is associated with one or two 143 /// `RenderTargetLists`, depending on whether we have all our tasks have the 144 /// same `RenderTargetKind`. The lists are then shipped to the `Renderer`, which 145 /// allocates a device texture array, with one slice per render target in the 146 /// list. 147 /// 148 /// The upshot of this scheme is that it maximizes batching. In a given pass, 149 /// we need to do a separate batch for each individual render target. But with 150 /// the texture array, we can expose the entirety of the previous pass to each 151 /// task in the current pass in a single batch, which generally allows each 152 /// task to be drawn in a single batch regardless of how many results from the 153 /// previous pass it depends on. 154 /// 155 /// Note that in some cases (like drop-shadows), we can depend on the output of 156 /// a pass earlier than the immediately-preceding pass. See `SavedTargetIndex`. 157 #[cfg_attr(feature = "capture", derive(Serialize))] 158 #[cfg_attr(feature = "replay", derive(Deserialize))] 159 pub struct RenderTargetList<T> { 160 screen_size: DeviceIntSize, 161 pub format: ImageFormat, 162 /// The maximum width and height of any single primitive we've encountered 163 /// that will be drawn to a dynamic location. 164 /// 165 /// We initially create our per-slice allocators with a width and height of 166 /// IDEAL_MAX_TEXTURE_DIMENSION. If we encounter a larger primitive, the 167 /// allocation will fail, but we'll bump max_dynamic_size, which will cause the 168 /// allocator for the next slice to be just large enough to accomodate it. 169 pub max_dynamic_size: DeviceIntSize, 170 pub targets: Vec<T>, 171 pub saved_index: Option<SavedTargetIndex>, 172 pub alloc_tracker: ArrayAllocationTracker, 173 } 174 175 impl<T: RenderTarget> RenderTargetList<T> { new( screen_size: DeviceIntSize, format: ImageFormat, ) -> Self176 fn new( 177 screen_size: DeviceIntSize, 178 format: ImageFormat, 179 ) -> Self { 180 RenderTargetList { 181 screen_size, 182 format, 183 max_dynamic_size: DeviceIntSize::new(0, 0), 184 targets: Vec::new(), 185 saved_index: None, 186 alloc_tracker: ArrayAllocationTracker::new(), 187 } 188 } 189 build( &mut self, ctx: &mut RenderTargetContext, gpu_cache: &mut GpuCache, render_tasks: &mut RenderTaskTree, deferred_resolves: &mut Vec<DeferredResolve>, saved_index: Option<SavedTargetIndex>, prim_headers: &mut PrimitiveHeaders, transforms: &mut TransformPalette, z_generator: &mut ZBufferIdGenerator, )190 fn build( 191 &mut self, 192 ctx: &mut RenderTargetContext, 193 gpu_cache: &mut GpuCache, 194 render_tasks: &mut RenderTaskTree, 195 deferred_resolves: &mut Vec<DeferredResolve>, 196 saved_index: Option<SavedTargetIndex>, 197 prim_headers: &mut PrimitiveHeaders, 198 transforms: &mut TransformPalette, 199 z_generator: &mut ZBufferIdGenerator, 200 ) { 201 debug_assert_eq!(None, self.saved_index); 202 self.saved_index = saved_index; 203 204 for target in &mut self.targets { 205 target.build( 206 ctx, 207 gpu_cache, 208 render_tasks, 209 deferred_resolves, 210 prim_headers, 211 transforms, 212 z_generator, 213 ); 214 } 215 } 216 allocate( &mut self, alloc_size: DeviceIntSize, ) -> (RenderTargetIndex, DeviceIntPoint)217 fn allocate( 218 &mut self, 219 alloc_size: DeviceIntSize, 220 ) -> (RenderTargetIndex, DeviceIntPoint) { 221 let (free_rect_slice, origin) = match self.alloc_tracker.allocate(&alloc_size) { 222 Some(allocation) => allocation, 223 None => { 224 // Have the allocator restrict slice sizes to our max ideal 225 // dimensions, unless we've already gone bigger on a previous 226 // slice. 227 let rounded_dimensions = DeviceIntSize::new( 228 (self.max_dynamic_size.width + TEXTURE_DIMENSION_MASK) & !TEXTURE_DIMENSION_MASK, 229 (self.max_dynamic_size.height + TEXTURE_DIMENSION_MASK) & !TEXTURE_DIMENSION_MASK, 230 ); 231 let allocator_dimensions = DeviceIntSize::new( 232 cmp::max(IDEAL_MAX_TEXTURE_DIMENSION, rounded_dimensions.width), 233 cmp::max(IDEAL_MAX_TEXTURE_DIMENSION, rounded_dimensions.height), 234 ); 235 236 assert!(alloc_size.width <= allocator_dimensions.width && 237 alloc_size.height <= allocator_dimensions.height); 238 let slice = FreeRectSlice(self.targets.len() as u32); 239 self.targets.push(T::new(self.screen_size)); 240 241 self.alloc_tracker.extend( 242 slice, 243 allocator_dimensions, 244 alloc_size, 245 ); 246 247 (slice, DeviceIntPoint::zero()) 248 } 249 }; 250 251 if alloc_size.is_empty_or_negative() && self.targets.is_empty() { 252 // push an unused target here, only if we don't have any 253 self.targets.push(T::new(self.screen_size)); 254 } 255 256 self.targets[free_rect_slice.0 as usize] 257 .add_used(DeviceIntRect::new(origin, alloc_size)); 258 259 (RenderTargetIndex(free_rect_slice.0 as usize), origin) 260 } 261 needs_depth(&self) -> bool262 pub fn needs_depth(&self) -> bool { 263 self.targets.iter().any(|target| target.needs_depth()) 264 } 265 must_be_drawn(&self) -> bool266 pub fn must_be_drawn(&self) -> bool { 267 self.targets.iter().any(|target| target.must_be_drawn()) 268 } 269 check_ready(&self, t: &Texture)270 pub fn check_ready(&self, t: &Texture) { 271 let dimensions = t.get_dimensions(); 272 assert!(dimensions.width >= self.max_dynamic_size.width); 273 assert!(dimensions.height >= self.max_dynamic_size.height); 274 assert_eq!(t.get_format(), self.format); 275 assert_eq!(t.get_layer_count() as usize, self.targets.len()); 276 assert!(t.supports_depth() >= self.needs_depth()); 277 } 278 } 279 280 /// Frame output information for a given pipeline ID. 281 /// Storing the task ID allows the renderer to find 282 /// the target rect within the render target that this 283 /// pipeline exists at. 284 #[cfg_attr(feature = "capture", derive(Serialize))] 285 #[cfg_attr(feature = "replay", derive(Deserialize))] 286 pub struct FrameOutput { 287 pub task_id: RenderTaskId, 288 pub pipeline_id: PipelineId, 289 } 290 291 // Defines where the source data for a blit job can be found. 292 #[cfg_attr(feature = "capture", derive(Serialize))] 293 #[cfg_attr(feature = "replay", derive(Deserialize))] 294 pub enum BlitJobSource { 295 Texture(TextureSource, i32, DeviceIntRect), 296 RenderTask(RenderTaskId), 297 } 298 299 // Information required to do a blit from a source to a target. 300 #[cfg_attr(feature = "capture", derive(Serialize))] 301 #[cfg_attr(feature = "replay", derive(Deserialize))] 302 pub struct BlitJob { 303 pub source: BlitJobSource, 304 pub target_rect: DeviceIntRect, 305 } 306 307 #[cfg_attr(feature = "capture", derive(Serialize))] 308 #[cfg_attr(feature = "replay", derive(Deserialize))] 309 pub struct LineDecorationJob { 310 pub task_rect: DeviceRect, 311 pub local_size: LayoutSize, 312 pub wavy_line_thickness: f32, 313 pub style: i32, 314 pub orientation: i32, 315 } 316 317 #[cfg(feature = "pathfinder")] 318 #[cfg_attr(feature = "capture", derive(Serialize))] 319 #[cfg_attr(feature = "replay", derive(Deserialize))] 320 pub struct GlyphJob { 321 pub mesh: Mesh, 322 pub target_rect: DeviceIntRect, 323 pub origin: DeviceIntPoint, 324 pub subpixel_offset: TypedPoint2D<f32, DevicePixel>, 325 pub render_mode: FontRenderMode, 326 pub embolden_amount: TypedVector2D<f32, DevicePixel>, 327 } 328 329 #[cfg(not(feature = "pathfinder"))] 330 #[cfg_attr(feature = "capture", derive(Serialize))] 331 #[cfg_attr(feature = "replay", derive(Deserialize))] 332 pub struct GlyphJob; 333 334 /// Contains the work (in the form of instance arrays) needed to fill a color 335 /// color output surface (RGBA8). 336 /// 337 /// See `RenderTarget`. 338 #[cfg_attr(feature = "capture", derive(Serialize))] 339 #[cfg_attr(feature = "replay", derive(Deserialize))] 340 pub struct ColorRenderTarget { 341 pub alpha_batch_containers: Vec<AlphaBatchContainer>, 342 // List of blur operations to apply for this render target. 343 pub vertical_blurs: Vec<BlurInstance>, 344 pub horizontal_blurs: Vec<BlurInstance>, 345 pub readbacks: Vec<DeviceIntRect>, 346 pub scalings: Vec<ScalingInstance>, 347 pub blits: Vec<BlitJob>, 348 // List of frame buffer outputs for this render target. 349 pub outputs: Vec<FrameOutput>, 350 alpha_tasks: Vec<RenderTaskId>, 351 screen_size: DeviceIntSize, 352 // Track the used rect of the render target, so that 353 // we can set a scissor rect and only clear to the 354 // used portion of the target as an optimization. 355 pub used_rect: DeviceIntRect, 356 } 357 358 impl RenderTarget for ColorRenderTarget { new(screen_size: DeviceIntSize) -> Self359 fn new(screen_size: DeviceIntSize) -> Self { 360 ColorRenderTarget { 361 alpha_batch_containers: Vec::new(), 362 vertical_blurs: Vec::new(), 363 horizontal_blurs: Vec::new(), 364 readbacks: Vec::new(), 365 scalings: Vec::new(), 366 blits: Vec::new(), 367 outputs: Vec::new(), 368 alpha_tasks: Vec::new(), 369 screen_size, 370 used_rect: DeviceIntRect::zero(), 371 } 372 } 373 build( &mut self, ctx: &mut RenderTargetContext, gpu_cache: &mut GpuCache, render_tasks: &mut RenderTaskTree, deferred_resolves: &mut Vec<DeferredResolve>, prim_headers: &mut PrimitiveHeaders, transforms: &mut TransformPalette, z_generator: &mut ZBufferIdGenerator, )374 fn build( 375 &mut self, 376 ctx: &mut RenderTargetContext, 377 gpu_cache: &mut GpuCache, 378 render_tasks: &mut RenderTaskTree, 379 deferred_resolves: &mut Vec<DeferredResolve>, 380 prim_headers: &mut PrimitiveHeaders, 381 transforms: &mut TransformPalette, 382 z_generator: &mut ZBufferIdGenerator, 383 ) { 384 let mut merged_batches = AlphaBatchContainer::new(None, Vec::new()); 385 386 for task_id in &self.alpha_tasks { 387 let task = &render_tasks[*task_id]; 388 389 match task.clear_mode { 390 ClearMode::One | 391 ClearMode::Zero => { 392 panic!("bug: invalid clear mode for color task"); 393 } 394 ClearMode::Transparent => {} 395 } 396 397 match task.kind { 398 RenderTaskKind::Picture(ref pic_task) => { 399 let pic = &ctx.prim_store.pictures[pic_task.pic_index.0]; 400 401 let (target_rect, _) = task.get_target_rect(); 402 403 let scisor_rect = if pic_task.can_merge { 404 None 405 } else { 406 Some(target_rect) 407 }; 408 409 let mut batch_builder = AlphaBatchBuilder::new( 410 self.screen_size, 411 scisor_rect, 412 ); 413 414 batch_builder.add_pic_to_batch( 415 pic, 416 *task_id, 417 ctx, 418 gpu_cache, 419 render_tasks, 420 deferred_resolves, 421 prim_headers, 422 transforms, 423 pic_task.root_spatial_node_index, 424 z_generator, 425 ); 426 427 batch_builder.build( 428 &mut self.alpha_batch_containers, 429 &mut merged_batches, 430 ); 431 } 432 _ => { 433 unreachable!(); 434 } 435 } 436 } 437 438 if !merged_batches.is_empty() { 439 self.alpha_batch_containers.push(merged_batches); 440 } 441 } 442 add_task( &mut self, task_id: RenderTaskId, ctx: &RenderTargetContext, gpu_cache: &mut GpuCache, render_tasks: &RenderTaskTree, _: &ClipStore, _: &mut TransformPalette, deferred_resolves: &mut Vec<DeferredResolve>, )443 fn add_task( 444 &mut self, 445 task_id: RenderTaskId, 446 ctx: &RenderTargetContext, 447 gpu_cache: &mut GpuCache, 448 render_tasks: &RenderTaskTree, 449 _: &ClipStore, 450 _: &mut TransformPalette, 451 deferred_resolves: &mut Vec<DeferredResolve>, 452 ) { 453 let task = &render_tasks[task_id]; 454 455 match task.kind { 456 RenderTaskKind::VerticalBlur(ref info) => { 457 info.add_instances( 458 &mut self.vertical_blurs, 459 BlurDirection::Vertical, 460 render_tasks.get_task_address(task_id), 461 render_tasks.get_task_address(task.children[0]), 462 ); 463 } 464 RenderTaskKind::HorizontalBlur(ref info) => { 465 info.add_instances( 466 &mut self.horizontal_blurs, 467 BlurDirection::Horizontal, 468 render_tasks.get_task_address(task_id), 469 render_tasks.get_task_address(task.children[0]), 470 ); 471 } 472 RenderTaskKind::Picture(ref task_info) => { 473 let pic = &ctx.prim_store.pictures[task_info.pic_index.0]; 474 self.alpha_tasks.push(task_id); 475 476 // If this pipeline is registered as a frame output 477 // store the information necessary to do the copy. 478 if let Some(pipeline_id) = pic.frame_output_pipeline_id { 479 self.outputs.push(FrameOutput { 480 pipeline_id, 481 task_id, 482 }); 483 } 484 } 485 RenderTaskKind::ClipRegion(..) | 486 RenderTaskKind::Border(..) | 487 RenderTaskKind::CacheMask(..) | 488 RenderTaskKind::LineDecoration(..) => { 489 panic!("Should not be added to color target!"); 490 } 491 RenderTaskKind::Glyph(..) => { 492 // FIXME(pcwalton): Support color glyphs. 493 panic!("Glyphs should not be added to color target!"); 494 } 495 RenderTaskKind::Readback(device_rect) => { 496 self.readbacks.push(device_rect); 497 } 498 RenderTaskKind::Scaling(..) => { 499 self.scalings.push(ScalingInstance { 500 task_address: render_tasks.get_task_address(task_id), 501 src_task_address: render_tasks.get_task_address(task.children[0]), 502 }); 503 } 504 RenderTaskKind::Blit(ref task_info) => { 505 match task_info.source { 506 BlitSource::Image { key } => { 507 // Get the cache item for the source texture. 508 let cache_item = resolve_image( 509 key.request, 510 ctx.resource_cache, 511 gpu_cache, 512 deferred_resolves, 513 ); 514 515 // Work out a source rect to copy from the texture, depending on whether 516 // a sub-rect is present or not. 517 let source_rect = key.texel_rect.map_or(cache_item.uv_rect.to_i32(), |sub_rect| { 518 DeviceIntRect::new( 519 DeviceIntPoint::new( 520 cache_item.uv_rect.origin.x as i32 + sub_rect.origin.x, 521 cache_item.uv_rect.origin.y as i32 + sub_rect.origin.y, 522 ), 523 sub_rect.size, 524 ) 525 }); 526 527 // Store the blit job for the renderer to execute, including 528 // the allocated destination rect within this target. 529 let (target_rect, _) = task.get_target_rect(); 530 self.blits.push(BlitJob { 531 source: BlitJobSource::Texture( 532 cache_item.texture_id, 533 cache_item.texture_layer, 534 source_rect, 535 ), 536 target_rect: target_rect.inner_rect(task_info.padding) 537 }); 538 } 539 BlitSource::RenderTask { .. } => { 540 panic!("BUG: render task blit jobs to render tasks not supported"); 541 } 542 } 543 } 544 } 545 } 546 must_be_drawn(&self) -> bool547 fn must_be_drawn(&self) -> bool { 548 self.alpha_batch_containers.iter().any(|ab| { 549 !ab.tile_blits.is_empty() 550 }) 551 } 552 needs_depth(&self) -> bool553 fn needs_depth(&self) -> bool { 554 self.alpha_batch_containers.iter().any(|ab| { 555 !ab.opaque_batches.is_empty() 556 }) 557 } 558 used_rect(&self) -> DeviceIntRect559 fn used_rect(&self) -> DeviceIntRect { 560 self.used_rect 561 } 562 add_used(&mut self, rect: DeviceIntRect)563 fn add_used(&mut self, rect: DeviceIntRect) { 564 self.used_rect = self.used_rect.union(&rect); 565 } 566 } 567 568 /// Contains the work (in the form of instance arrays) needed to fill an alpha 569 /// output surface (R8). 570 /// 571 /// See `RenderTarget`. 572 #[cfg_attr(feature = "capture", derive(Serialize))] 573 #[cfg_attr(feature = "replay", derive(Deserialize))] 574 pub struct AlphaRenderTarget { 575 pub clip_batcher: ClipBatcher, 576 // List of blur operations to apply for this render target. 577 pub vertical_blurs: Vec<BlurInstance>, 578 pub horizontal_blurs: Vec<BlurInstance>, 579 pub scalings: Vec<ScalingInstance>, 580 pub zero_clears: Vec<RenderTaskId>, 581 // Track the used rect of the render target, so that 582 // we can set a scissor rect and only clear to the 583 // used portion of the target as an optimization. 584 pub used_rect: DeviceIntRect, 585 } 586 587 impl RenderTarget for AlphaRenderTarget { new(_screen_size: DeviceIntSize) -> Self588 fn new(_screen_size: DeviceIntSize) -> Self { 589 AlphaRenderTarget { 590 clip_batcher: ClipBatcher::new(), 591 vertical_blurs: Vec::new(), 592 horizontal_blurs: Vec::new(), 593 scalings: Vec::new(), 594 zero_clears: Vec::new(), 595 used_rect: DeviceIntRect::zero(), 596 } 597 } 598 add_task( &mut self, task_id: RenderTaskId, ctx: &RenderTargetContext, gpu_cache: &mut GpuCache, render_tasks: &RenderTaskTree, clip_store: &ClipStore, transforms: &mut TransformPalette, _: &mut Vec<DeferredResolve>, )599 fn add_task( 600 &mut self, 601 task_id: RenderTaskId, 602 ctx: &RenderTargetContext, 603 gpu_cache: &mut GpuCache, 604 render_tasks: &RenderTaskTree, 605 clip_store: &ClipStore, 606 transforms: &mut TransformPalette, 607 _: &mut Vec<DeferredResolve>, 608 ) { 609 let task = &render_tasks[task_id]; 610 611 match task.clear_mode { 612 ClearMode::Zero => { 613 self.zero_clears.push(task_id); 614 } 615 ClearMode::One => {} 616 ClearMode::Transparent => { 617 panic!("bug: invalid clear mode for alpha task"); 618 } 619 } 620 621 match task.kind { 622 RenderTaskKind::Readback(..) | 623 RenderTaskKind::Picture(..) | 624 RenderTaskKind::Blit(..) | 625 RenderTaskKind::Border(..) | 626 RenderTaskKind::LineDecoration(..) | 627 RenderTaskKind::Glyph(..) => { 628 panic!("BUG: should not be added to alpha target!"); 629 } 630 RenderTaskKind::VerticalBlur(ref info) => { 631 info.add_instances( 632 &mut self.vertical_blurs, 633 BlurDirection::Vertical, 634 render_tasks.get_task_address(task_id), 635 render_tasks.get_task_address(task.children[0]), 636 ); 637 } 638 RenderTaskKind::HorizontalBlur(ref info) => { 639 info.add_instances( 640 &mut self.horizontal_blurs, 641 BlurDirection::Horizontal, 642 render_tasks.get_task_address(task_id), 643 render_tasks.get_task_address(task.children[0]), 644 ); 645 } 646 RenderTaskKind::CacheMask(ref task_info) => { 647 let task_address = render_tasks.get_task_address(task_id); 648 self.clip_batcher.add( 649 task_address, 650 task_info.clip_node_range, 651 task_info.root_spatial_node_index, 652 ctx.resource_cache, 653 gpu_cache, 654 clip_store, 655 ctx.clip_scroll_tree, 656 transforms, 657 &ctx.data_stores.clip, 658 task_info.actual_rect, 659 &ctx.screen_world_rect, 660 ctx.device_pixel_scale, 661 task_info.snap_offsets, 662 ); 663 } 664 RenderTaskKind::ClipRegion(ref region_task) => { 665 let task_address = render_tasks.get_task_address(task_id); 666 let device_rect = DeviceRect::new( 667 DevicePoint::zero(), 668 task.get_dynamic_size().to_f32(), 669 ); 670 self.clip_batcher.add_clip_region( 671 task_address, 672 region_task.clip_data_address, 673 region_task.local_pos, 674 device_rect, 675 ); 676 } 677 RenderTaskKind::Scaling(ref info) => { 678 info.add_instances( 679 &mut self.scalings, 680 render_tasks.get_task_address(task_id), 681 render_tasks.get_task_address(task.children[0]), 682 ); 683 } 684 } 685 } 686 needs_depth(&self) -> bool687 fn needs_depth(&self) -> bool { 688 false 689 } 690 must_be_drawn(&self) -> bool691 fn must_be_drawn(&self) -> bool { 692 false 693 } 694 used_rect(&self) -> DeviceIntRect695 fn used_rect(&self) -> DeviceIntRect { 696 self.used_rect 697 } 698 add_used(&mut self, rect: DeviceIntRect)699 fn add_used(&mut self, rect: DeviceIntRect) { 700 self.used_rect = self.used_rect.union(&rect); 701 } 702 } 703 704 #[cfg_attr(feature = "capture", derive(Serialize))] 705 #[cfg_attr(feature = "replay", derive(Deserialize))] 706 pub struct TextureCacheRenderTarget { 707 pub target_kind: RenderTargetKind, 708 pub horizontal_blurs: Vec<BlurInstance>, 709 pub blits: Vec<BlitJob>, 710 pub glyphs: Vec<GlyphJob>, 711 pub border_segments_complex: Vec<BorderInstance>, 712 pub border_segments_solid: Vec<BorderInstance>, 713 pub clears: Vec<DeviceIntRect>, 714 pub line_decorations: Vec<LineDecorationJob>, 715 } 716 717 impl TextureCacheRenderTarget { new(target_kind: RenderTargetKind) -> Self718 fn new(target_kind: RenderTargetKind) -> Self { 719 TextureCacheRenderTarget { 720 target_kind, 721 horizontal_blurs: vec![], 722 blits: vec![], 723 glyphs: vec![], 724 border_segments_complex: vec![], 725 border_segments_solid: vec![], 726 clears: vec![], 727 line_decorations: vec![], 728 } 729 } 730 add_task( &mut self, task_id: RenderTaskId, render_tasks: &mut RenderTaskTree, )731 fn add_task( 732 &mut self, 733 task_id: RenderTaskId, 734 render_tasks: &mut RenderTaskTree, 735 ) { 736 let task_address = render_tasks.get_task_address(task_id); 737 let src_task_address = render_tasks[task_id].children.get(0).map(|src_task_id| { 738 render_tasks.get_task_address(*src_task_id) 739 }); 740 741 let task = &mut render_tasks[task_id]; 742 let target_rect = task.get_target_rect(); 743 744 match task.kind { 745 RenderTaskKind::LineDecoration(ref info) => { 746 self.clears.push(target_rect.0); 747 748 self.line_decorations.push(LineDecorationJob { 749 task_rect: target_rect.0.to_f32(), 750 local_size: info.local_size, 751 style: info.style as i32, 752 orientation: info.orientation as i32, 753 wavy_line_thickness: info.wavy_line_thickness, 754 }); 755 } 756 RenderTaskKind::HorizontalBlur(ref info) => { 757 info.add_instances( 758 &mut self.horizontal_blurs, 759 BlurDirection::Horizontal, 760 task_address, 761 src_task_address.unwrap(), 762 ); 763 } 764 RenderTaskKind::Blit(ref task_info) => { 765 match task_info.source { 766 BlitSource::Image { .. } => { 767 // reading/writing from the texture cache at the same time 768 // is undefined behavior. 769 panic!("bug: a single blit cannot be to/from texture cache"); 770 } 771 BlitSource::RenderTask { task_id } => { 772 // Add a blit job to copy from an existing render 773 // task to this target. 774 self.blits.push(BlitJob { 775 source: BlitJobSource::RenderTask(task_id), 776 target_rect: target_rect.0.inner_rect(task_info.padding), 777 }); 778 } 779 } 780 } 781 RenderTaskKind::Border(ref mut task_info) => { 782 self.clears.push(target_rect.0); 783 784 let task_origin = target_rect.0.origin.to_f32(); 785 let instances = mem::replace(&mut task_info.instances, Vec::new()); 786 for mut instance in instances { 787 // TODO(gw): It may be better to store the task origin in 788 // the render task data instead of per instance. 789 instance.task_origin = task_origin; 790 if instance.flags & STYLE_MASK == STYLE_SOLID { 791 self.border_segments_solid.push(instance); 792 } else { 793 self.border_segments_complex.push(instance); 794 } 795 } 796 } 797 RenderTaskKind::Glyph(ref mut task_info) => { 798 self.add_glyph_task(task_info, target_rect.0) 799 } 800 RenderTaskKind::VerticalBlur(..) | 801 RenderTaskKind::Picture(..) | 802 RenderTaskKind::ClipRegion(..) | 803 RenderTaskKind::CacheMask(..) | 804 RenderTaskKind::Readback(..) | 805 RenderTaskKind::Scaling(..) => { 806 panic!("BUG: unexpected task kind for texture cache target"); 807 } 808 } 809 } 810 811 #[cfg(feature = "pathfinder")] add_glyph_task(&mut self, task_info: &mut GlyphTask, target_rect: DeviceIntRect)812 fn add_glyph_task(&mut self, task_info: &mut GlyphTask, target_rect: DeviceIntRect) { 813 self.glyphs.push(GlyphJob { 814 mesh: task_info.mesh.take().unwrap(), 815 target_rect: target_rect, 816 origin: task_info.origin, 817 subpixel_offset: task_info.subpixel_offset, 818 render_mode: task_info.render_mode, 819 embolden_amount: task_info.embolden_amount, 820 }) 821 } 822 823 #[cfg(not(feature = "pathfinder"))] add_glyph_task(&mut self, _: &mut GlyphTask, _: DeviceIntRect)824 fn add_glyph_task(&mut self, _: &mut GlyphTask, _: DeviceIntRect) {} 825 } 826 827 /// Contains the set of `RenderTarget`s specific to the kind of pass. 828 #[cfg_attr(feature = "capture", derive(Serialize))] 829 #[cfg_attr(feature = "replay", derive(Deserialize))] 830 pub enum RenderPassKind { 831 /// The final pass to the main frame buffer, where we have a single color 832 /// target for display to the user. 833 MainFramebuffer(ColorRenderTarget), 834 /// An intermediate pass, where we may have multiple targets. 835 OffScreen { 836 alpha: RenderTargetList<AlphaRenderTarget>, 837 color: RenderTargetList<ColorRenderTarget>, 838 texture_cache: FastHashMap<(CacheTextureId, usize), TextureCacheRenderTarget>, 839 }, 840 } 841 842 /// A render pass represents a set of rendering operations that don't depend on one 843 /// another. 844 /// 845 /// A render pass can have several render targets if there wasn't enough space in one 846 /// target to do all of the rendering for that pass. See `RenderTargetList`. 847 #[cfg_attr(feature = "capture", derive(Serialize))] 848 #[cfg_attr(feature = "replay", derive(Deserialize))] 849 pub struct RenderPass { 850 /// The kind of pass, as well as the set of targets associated with that 851 /// kind of pass. 852 pub kind: RenderPassKind, 853 /// The set of tasks to be performed in this pass, as indices into the 854 /// `RenderTaskTree`. 855 tasks: Vec<RenderTaskId>, 856 } 857 858 impl RenderPass { 859 /// Creates a pass for the main framebuffer. There is only one of these, and 860 /// it is always the last pass. new_main_framebuffer(screen_size: DeviceIntSize) -> Self861 pub fn new_main_framebuffer(screen_size: DeviceIntSize) -> Self { 862 let target = ColorRenderTarget::new(screen_size); 863 RenderPass { 864 kind: RenderPassKind::MainFramebuffer(target), 865 tasks: vec![], 866 } 867 } 868 869 /// Creates an intermediate off-screen pass. new_off_screen(screen_size: DeviceIntSize) -> Self870 pub fn new_off_screen(screen_size: DeviceIntSize) -> Self { 871 RenderPass { 872 kind: RenderPassKind::OffScreen { 873 color: RenderTargetList::new(screen_size, ImageFormat::BGRA8), 874 alpha: RenderTargetList::new(screen_size, ImageFormat::R8), 875 texture_cache: FastHashMap::default(), 876 }, 877 tasks: vec![], 878 } 879 } 880 881 /// Adds a task to this pass. add_render_task( &mut self, task_id: RenderTaskId, size: DeviceIntSize, target_kind: RenderTargetKind, location: &RenderTaskLocation, )882 pub fn add_render_task( 883 &mut self, 884 task_id: RenderTaskId, 885 size: DeviceIntSize, 886 target_kind: RenderTargetKind, 887 location: &RenderTaskLocation, 888 ) { 889 if let RenderPassKind::OffScreen { ref mut color, ref mut alpha, .. } = self.kind { 890 // If this will be rendered to a dynamically-allocated region on an 891 // off-screen render target, update the max-encountered size. We don't 892 // need to do this for things drawn to the texture cache, since those 893 // don't affect our render target allocation. 894 if location.is_dynamic() { 895 let max_size = match target_kind { 896 RenderTargetKind::Color => &mut color.max_dynamic_size, 897 RenderTargetKind::Alpha => &mut alpha.max_dynamic_size, 898 }; 899 max_size.width = cmp::max(max_size.width, size.width); 900 max_size.height = cmp::max(max_size.height, size.height); 901 } 902 } 903 904 self.tasks.push(task_id); 905 } 906 907 /// Processes this pass to prepare it for rendering. 908 /// 909 /// Among other things, this allocates output regions for each of our tasks 910 /// (added via `add_render_task`) in a RenderTarget and assigns it into that 911 /// target. build( &mut self, ctx: &mut RenderTargetContext, gpu_cache: &mut GpuCache, render_tasks: &mut RenderTaskTree, deferred_resolves: &mut Vec<DeferredResolve>, clip_store: &ClipStore, transforms: &mut TransformPalette, prim_headers: &mut PrimitiveHeaders, z_generator: &mut ZBufferIdGenerator, )912 pub fn build( 913 &mut self, 914 ctx: &mut RenderTargetContext, 915 gpu_cache: &mut GpuCache, 916 render_tasks: &mut RenderTaskTree, 917 deferred_resolves: &mut Vec<DeferredResolve>, 918 clip_store: &ClipStore, 919 transforms: &mut TransformPalette, 920 prim_headers: &mut PrimitiveHeaders, 921 z_generator: &mut ZBufferIdGenerator, 922 ) { 923 profile_scope!("RenderPass::build"); 924 925 match self.kind { 926 RenderPassKind::MainFramebuffer(ref mut target) => { 927 for &task_id in &self.tasks { 928 assert_eq!(render_tasks[task_id].target_kind(), RenderTargetKind::Color); 929 target.add_task( 930 task_id, 931 ctx, 932 gpu_cache, 933 render_tasks, 934 clip_store, 935 transforms, 936 deferred_resolves, 937 ); 938 } 939 target.build( 940 ctx, 941 gpu_cache, 942 render_tasks, 943 deferred_resolves, 944 prim_headers, 945 transforms, 946 z_generator, 947 ); 948 } 949 RenderPassKind::OffScreen { ref mut color, ref mut alpha, ref mut texture_cache } => { 950 let saved_color = if self.tasks.iter().any(|&task_id| { 951 let t = &render_tasks[task_id]; 952 t.target_kind() == RenderTargetKind::Color && t.saved_index.is_some() 953 }) { 954 Some(render_tasks.save_target()) 955 } else { 956 None 957 }; 958 let saved_alpha = if self.tasks.iter().any(|&task_id| { 959 let t = &render_tasks[task_id]; 960 t.target_kind() == RenderTargetKind::Alpha && t.saved_index.is_some() 961 }) { 962 Some(render_tasks.save_target()) 963 } else { 964 None 965 }; 966 967 // Step through each task, adding to batches as appropriate. 968 for &task_id in &self.tasks { 969 let (target_kind, texture_target, layer) = { 970 let task = &mut render_tasks[task_id]; 971 let target_kind = task.target_kind(); 972 973 // Find a target to assign this task to, or create a new 974 // one if required. 975 let (texture_target, layer) = match task.location { 976 RenderTaskLocation::TextureCache { texture, layer, .. } => { 977 (Some(texture), layer) 978 } 979 RenderTaskLocation::Fixed(..) => { 980 (None, 0) 981 } 982 RenderTaskLocation::Dynamic(ref mut origin, size) => { 983 let (target_index, alloc_origin) = match target_kind { 984 RenderTargetKind::Color => color.allocate(size), 985 RenderTargetKind::Alpha => alpha.allocate(size), 986 }; 987 *origin = Some((alloc_origin, target_index)); 988 (None, target_index.0) 989 } 990 }; 991 992 // Replace the pending saved index with a real one 993 if let Some(index) = task.saved_index { 994 assert_eq!(index, SavedTargetIndex::PENDING); 995 task.saved_index = match target_kind { 996 RenderTargetKind::Color => saved_color, 997 RenderTargetKind::Alpha => saved_alpha, 998 }; 999 } 1000 1001 // Give the render task an opportunity to add any 1002 // information to the GPU cache, if appropriate. 1003 task.write_gpu_blocks(gpu_cache); 1004 1005 (target_kind, texture_target, layer) 1006 }; 1007 1008 match texture_target { 1009 Some(texture_target) => { 1010 let texture = texture_cache 1011 .entry((texture_target, layer)) 1012 .or_insert( 1013 TextureCacheRenderTarget::new(target_kind) 1014 ); 1015 texture.add_task(task_id, render_tasks); 1016 } 1017 None => { 1018 match target_kind { 1019 RenderTargetKind::Color => color.targets[layer].add_task( 1020 task_id, 1021 ctx, 1022 gpu_cache, 1023 render_tasks, 1024 clip_store, 1025 transforms, 1026 deferred_resolves, 1027 ), 1028 RenderTargetKind::Alpha => alpha.targets[layer].add_task( 1029 task_id, 1030 ctx, 1031 gpu_cache, 1032 render_tasks, 1033 clip_store, 1034 transforms, 1035 deferred_resolves, 1036 ), 1037 } 1038 } 1039 } 1040 } 1041 1042 color.build( 1043 ctx, 1044 gpu_cache, 1045 render_tasks, 1046 deferred_resolves, 1047 saved_color, 1048 prim_headers, 1049 transforms, 1050 z_generator, 1051 ); 1052 alpha.build( 1053 ctx, 1054 gpu_cache, 1055 render_tasks, 1056 deferred_resolves, 1057 saved_alpha, 1058 prim_headers, 1059 transforms, 1060 z_generator, 1061 ); 1062 } 1063 } 1064 } 1065 } 1066 1067 #[derive(Debug, Clone, Default)] 1068 pub struct CompositeOps { 1069 // Requires only a single texture as input (e.g. most filters) 1070 pub filters: Vec<FilterOp>, 1071 1072 // Requires two source textures (e.g. mix-blend-mode) 1073 pub mix_blend_mode: Option<MixBlendMode>, 1074 } 1075 1076 impl CompositeOps { new(filters: Vec<FilterOp>, mix_blend_mode: Option<MixBlendMode>) -> Self1077 pub fn new(filters: Vec<FilterOp>, mix_blend_mode: Option<MixBlendMode>) -> Self { 1078 CompositeOps { 1079 filters, 1080 mix_blend_mode, 1081 } 1082 } 1083 is_empty(&self) -> bool1084 pub fn is_empty(&self) -> bool { 1085 self.filters.is_empty() && self.mix_blend_mode.is_none() 1086 } 1087 } 1088 1089 /// A rendering-oriented representation of the frame built by the render backend 1090 /// and presented to the renderer. 1091 #[cfg_attr(feature = "capture", derive(Serialize))] 1092 #[cfg_attr(feature = "replay", derive(Deserialize))] 1093 pub struct Frame { 1094 //TODO: share the fields with DocumentView struct 1095 pub window_size: DeviceIntSize, 1096 pub inner_rect: DeviceIntRect, 1097 pub background_color: Option<ColorF>, 1098 pub layer: DocumentLayer, 1099 pub device_pixel_ratio: f32, 1100 pub passes: Vec<RenderPass>, 1101 #[cfg_attr(any(feature = "capture", feature = "replay"), serde(default = "FrameProfileCounters::new", skip))] 1102 pub profile_counters: FrameProfileCounters, 1103 1104 pub transform_palette: Vec<TransformData>, 1105 pub render_tasks: RenderTaskTree, 1106 pub prim_headers: PrimitiveHeaders, 1107 1108 /// The GPU cache frame that the contents of Self depend on 1109 pub gpu_cache_frame_id: FrameId, 1110 1111 /// List of textures that we don't know about yet 1112 /// from the backend thread. The render thread 1113 /// will use a callback to resolve these and 1114 /// patch the data structures. 1115 pub deferred_resolves: Vec<DeferredResolve>, 1116 1117 /// True if this frame contains any render tasks 1118 /// that write to the texture cache. 1119 pub has_texture_cache_tasks: bool, 1120 1121 /// True if this frame has been drawn by the 1122 /// renderer. 1123 pub has_been_rendered: bool, 1124 1125 /// Dirty regions recorded when generating this frame. Empty when not in 1126 /// testing. 1127 #[cfg_attr(feature = "serde", serde(skip))] 1128 pub recorded_dirty_regions: Vec<RecordedDirtyRegion>, 1129 1130 /// Debugging information to overlay for this frame. 1131 pub debug_items: Vec<DebugItem>, 1132 } 1133 1134 impl Frame { 1135 // This frame must be flushed if it writes to the 1136 // texture cache, and hasn't been drawn yet. must_be_drawn(&self) -> bool1137 pub fn must_be_drawn(&self) -> bool { 1138 self.has_texture_cache_tasks && !self.has_been_rendered 1139 } 1140 } 1141 1142 impl BlurTask { add_instances( &self, instances: &mut Vec<BlurInstance>, blur_direction: BlurDirection, task_address: RenderTaskAddress, src_task_address: RenderTaskAddress, )1143 fn add_instances( 1144 &self, 1145 instances: &mut Vec<BlurInstance>, 1146 blur_direction: BlurDirection, 1147 task_address: RenderTaskAddress, 1148 src_task_address: RenderTaskAddress, 1149 ) { 1150 let instance = BlurInstance { 1151 task_address, 1152 src_task_address, 1153 blur_direction, 1154 }; 1155 1156 instances.push(instance); 1157 } 1158 } 1159 1160 impl ScalingTask { add_instances( &self, instances: &mut Vec<ScalingInstance>, task_address: RenderTaskAddress, src_task_address: RenderTaskAddress, )1161 fn add_instances( 1162 &self, 1163 instances: &mut Vec<ScalingInstance>, 1164 task_address: RenderTaskAddress, 1165 src_task_address: RenderTaskAddress, 1166 ) { 1167 let instance = ScalingInstance { 1168 task_address, 1169 src_task_address, 1170 }; 1171 1172 instances.push(instance); 1173 } 1174 } 1175