1 /* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
4
5 //! The webrender API.
6 //!
7 //! The `webrender::renderer` module provides the interface to webrender, which
8 //! is accessible through [`Renderer`][renderer]
9 //!
10 //! [renderer]: struct.Renderer.html
11
12 use api::{BlobImageRenderer, ColorF, ColorU, DeviceIntPoint, DeviceIntRect, DeviceIntSize};
13 use api::{DeviceUintPoint, DeviceUintRect, DeviceUintSize, DocumentId, Epoch, ExternalImageId};
14 use api::{ExternalImageType, FontRenderMode, ImageFormat, PipelineId};
15 use api::{RenderApiSender, RenderNotifier, TexelRect, TextureTarget, YuvColorSpace, YuvFormat};
16 use api::{YUV_COLOR_SPACES, YUV_FORMATS, channel};
17 #[cfg(not(feature = "debugger"))]
18 use api::ApiMsg;
19 use api::DebugCommand;
20 #[cfg(not(feature = "debugger"))]
21 use api::channel::MsgSender;
22 use api::channel::PayloadReceiverHelperMethods;
23 use batch::{BatchKey, BatchKind, BatchTextures, BrushBatchKind};
24 use batch::{TransformBatchKind};
25 #[cfg(any(feature = "capture", feature = "replay"))]
26 use capture::{CaptureConfig, ExternalCaptureImage, PlainExternalImage};
27 use debug_colors;
28 use debug_render::DebugRenderer;
29 #[cfg(feature = "debugger")]
30 use debug_server::{self, DebugServer};
31 use device::{DepthFunction, Device, FrameId, Program, UploadMethod, Texture,
32 VertexDescriptor, PBO};
33 use device::{ExternalTexture, FBOId, TextureSlot, VertexAttribute, VertexAttributeKind};
34 use device::{FileWatcherHandler, ShaderError, TextureFilter,
35 VertexUsageHint, VAO, VBO, CustomVAO};
36 use device::{ProgramCache, ReadPixelsFormat};
37 use euclid::{rect, Transform3D};
38 use frame_builder::FrameBuilderConfig;
39 use gleam::gl;
40 use glyph_rasterizer::GlyphFormat;
41 use gpu_cache::{GpuBlockData, GpuCacheUpdate, GpuCacheUpdateList};
42 use gpu_types::PrimitiveInstance;
43 use internal_types::{SourceTexture, ORTHO_FAR_PLANE, ORTHO_NEAR_PLANE, ResourceCacheError};
44 use internal_types::{CacheTextureId, DebugOutput, FastHashMap, RenderedDocument, ResultMsg};
45 use internal_types::{TextureUpdateList, TextureUpdateOp, TextureUpdateSource};
46 use internal_types::{RenderTargetInfo, SavedTargetIndex};
47 use picture::ContentOrigin;
48 use prim_store::DeferredResolve;
49 use profiler::{BackendProfileCounters, FrameProfileCounters, Profiler};
50 use profiler::{GpuProfileTag, RendererProfileCounters, RendererProfileTimers};
51 use query::{GpuProfiler, GpuTimer};
52 use rayon::{ThreadPool, ThreadPoolBuilder};
53 use record::ApiRecordingReceiver;
54 use render_backend::RenderBackend;
55 use scene_builder::SceneBuilder;
56 use render_task::{RenderTaskKind, RenderTaskTree};
57 use resource_cache::ResourceCache;
58 #[cfg(feature = "debugger")]
59 use serde_json;
60 use std;
61 use std::cmp;
62 use std::collections::VecDeque;
63 use std::collections::hash_map::Entry;
64 use std::f32;
65 use std::mem;
66 use std::path::PathBuf;
67 use std::rc::Rc;
68 use std::sync::Arc;
69 use std::sync::mpsc::{channel, Receiver, Sender};
70 use std::thread;
71 use texture_cache::TextureCache;
72 use thread_profiler::{register_thread_with_profiler, write_profile};
73 use tiling::{AlphaRenderTarget, ColorRenderTarget};
74 use tiling::{BlitJob, BlitJobSource, RenderPass, RenderPassKind, RenderTargetList};
75 use tiling::{Frame, RenderTarget, ScalingInfo, TextureCacheRenderTarget};
76 use time::precise_time_ns;
77 use util::TransformedRectKind;
78
79 pub const MAX_VERTEX_TEXTURE_WIDTH: usize = 1024;
80 /// Enabling this toggle would force the GPU cache scattered texture to
81 /// be resized every frame, which enables GPU debuggers to see if this
82 /// is performed correctly.
83 const GPU_CACHE_RESIZE_TEST: bool = false;
84
85 /// Number of GPU blocks per UV rectangle provided for an image.
86 pub const BLOCKS_PER_UV_RECT: usize = 2;
87
88 const GPU_TAG_BRUSH_LINEAR_GRADIENT: GpuProfileTag = GpuProfileTag {
89 label: "B_LinearGradient",
90 color: debug_colors::POWDERBLUE,
91 };
92 const GPU_TAG_BRUSH_RADIAL_GRADIENT: GpuProfileTag = GpuProfileTag {
93 label: "B_RadialGradient",
94 color: debug_colors::LIGHTPINK,
95 };
96 const GPU_TAG_BRUSH_YUV_IMAGE: GpuProfileTag = GpuProfileTag {
97 label: "B_YuvImage",
98 color: debug_colors::DARKGREEN,
99 };
100 const GPU_TAG_BRUSH_MIXBLEND: GpuProfileTag = GpuProfileTag {
101 label: "B_MixBlend",
102 color: debug_colors::MAGENTA,
103 };
104 const GPU_TAG_BRUSH_BLEND: GpuProfileTag = GpuProfileTag {
105 label: "B_Blend",
106 color: debug_colors::LIGHTBLUE,
107 };
108 const GPU_TAG_BRUSH_IMAGE: GpuProfileTag = GpuProfileTag {
109 label: "B_Image",
110 color: debug_colors::SPRINGGREEN,
111 };
112 const GPU_TAG_BRUSH_SOLID: GpuProfileTag = GpuProfileTag {
113 label: "B_Solid",
114 color: debug_colors::RED,
115 };
116 const GPU_TAG_BRUSH_LINE: GpuProfileTag = GpuProfileTag {
117 label: "Line",
118 color: debug_colors::DARKRED,
119 };
120 const GPU_TAG_CACHE_CLIP: GpuProfileTag = GpuProfileTag {
121 label: "C_Clip",
122 color: debug_colors::PURPLE,
123 };
124 const GPU_TAG_CACHE_TEXT_RUN: GpuProfileTag = GpuProfileTag {
125 label: "C_TextRun",
126 color: debug_colors::MISTYROSE,
127 };
128 const GPU_TAG_SETUP_TARGET: GpuProfileTag = GpuProfileTag {
129 label: "target init",
130 color: debug_colors::SLATEGREY,
131 };
132 const GPU_TAG_SETUP_DATA: GpuProfileTag = GpuProfileTag {
133 label: "data init",
134 color: debug_colors::LIGHTGREY,
135 };
136 const GPU_TAG_PRIM_IMAGE: GpuProfileTag = GpuProfileTag {
137 label: "Image",
138 color: debug_colors::GREEN,
139 };
140 const GPU_TAG_PRIM_HW_COMPOSITE: GpuProfileTag = GpuProfileTag {
141 label: "HwComposite",
142 color: debug_colors::DODGERBLUE,
143 };
144 const GPU_TAG_PRIM_SPLIT_COMPOSITE: GpuProfileTag = GpuProfileTag {
145 label: "SplitComposite",
146 color: debug_colors::DARKBLUE,
147 };
148 const GPU_TAG_PRIM_TEXT_RUN: GpuProfileTag = GpuProfileTag {
149 label: "TextRun",
150 color: debug_colors::BLUE,
151 };
152 const GPU_TAG_PRIM_BORDER_CORNER: GpuProfileTag = GpuProfileTag {
153 label: "BorderCorner",
154 color: debug_colors::DARKSLATEGREY,
155 };
156 const GPU_TAG_PRIM_BORDER_EDGE: GpuProfileTag = GpuProfileTag {
157 label: "BorderEdge",
158 color: debug_colors::LAVENDER,
159 };
160 const GPU_TAG_BLUR: GpuProfileTag = GpuProfileTag {
161 label: "Blur",
162 color: debug_colors::VIOLET,
163 };
164 const GPU_TAG_BLIT: GpuProfileTag = GpuProfileTag {
165 label: "Blit",
166 color: debug_colors::LIME,
167 };
168
169 const GPU_SAMPLER_TAG_ALPHA: GpuProfileTag = GpuProfileTag {
170 label: "Alpha Targets",
171 color: debug_colors::BLACK,
172 };
173 const GPU_SAMPLER_TAG_OPAQUE: GpuProfileTag = GpuProfileTag {
174 label: "Opaque Pass",
175 color: debug_colors::BLACK,
176 };
177 const GPU_SAMPLER_TAG_TRANSPARENT: GpuProfileTag = GpuProfileTag {
178 label: "Transparent Pass",
179 color: debug_colors::BLACK,
180 };
181
182 impl TransformBatchKind {
183 #[cfg(feature = "debugger")]
debug_name(&self) -> &'static str184 fn debug_name(&self) -> &'static str {
185 match *self {
186 TransformBatchKind::TextRun(..) => "TextRun",
187 TransformBatchKind::Image(image_buffer_kind, ..) => match image_buffer_kind {
188 ImageBufferKind::Texture2D => "Image (2D)",
189 ImageBufferKind::TextureRect => "Image (Rect)",
190 ImageBufferKind::TextureExternal => "Image (External)",
191 ImageBufferKind::Texture2DArray => "Image (Array)",
192 },
193 TransformBatchKind::BorderCorner => "BorderCorner",
194 TransformBatchKind::BorderEdge => "BorderEdge",
195 }
196 }
197
gpu_sampler_tag(&self) -> GpuProfileTag198 fn gpu_sampler_tag(&self) -> GpuProfileTag {
199 match *self {
200 TransformBatchKind::TextRun(..) => GPU_TAG_PRIM_TEXT_RUN,
201 TransformBatchKind::Image(..) => GPU_TAG_PRIM_IMAGE,
202 TransformBatchKind::BorderCorner => GPU_TAG_PRIM_BORDER_CORNER,
203 TransformBatchKind::BorderEdge => GPU_TAG_PRIM_BORDER_EDGE,
204 }
205 }
206 }
207
208 impl BatchKind {
209 #[cfg(feature = "debugger")]
debug_name(&self) -> &'static str210 fn debug_name(&self) -> &'static str {
211 match *self {
212 BatchKind::HardwareComposite => "HardwareComposite",
213 BatchKind::SplitComposite => "SplitComposite",
214 BatchKind::Brush(kind) => {
215 match kind {
216 BrushBatchKind::Solid => "Brush (Solid)",
217 BrushBatchKind::Line => "Brush (Line)",
218 BrushBatchKind::Image(..) => "Brush (Image)",
219 BrushBatchKind::Blend => "Brush (Blend)",
220 BrushBatchKind::MixBlend { .. } => "Brush (Composite)",
221 BrushBatchKind::YuvImage(..) => "Brush (YuvImage)",
222 BrushBatchKind::RadialGradient => "Brush (RadialGradient)",
223 BrushBatchKind::LinearGradient => "Brush (LinearGradient)",
224 }
225 }
226 BatchKind::Transformable(_, batch_kind) => batch_kind.debug_name(),
227 }
228 }
229
gpu_sampler_tag(&self) -> GpuProfileTag230 fn gpu_sampler_tag(&self) -> GpuProfileTag {
231 match *self {
232 BatchKind::HardwareComposite => GPU_TAG_PRIM_HW_COMPOSITE,
233 BatchKind::SplitComposite => GPU_TAG_PRIM_SPLIT_COMPOSITE,
234 BatchKind::Brush(kind) => {
235 match kind {
236 BrushBatchKind::Solid => GPU_TAG_BRUSH_SOLID,
237 BrushBatchKind::Line => GPU_TAG_BRUSH_LINE,
238 BrushBatchKind::Image(..) => GPU_TAG_BRUSH_IMAGE,
239 BrushBatchKind::Blend => GPU_TAG_BRUSH_BLEND,
240 BrushBatchKind::MixBlend { .. } => GPU_TAG_BRUSH_MIXBLEND,
241 BrushBatchKind::YuvImage(..) => GPU_TAG_BRUSH_YUV_IMAGE,
242 BrushBatchKind::RadialGradient => GPU_TAG_BRUSH_RADIAL_GRADIENT,
243 BrushBatchKind::LinearGradient => GPU_TAG_BRUSH_LINEAR_GRADIENT,
244 }
245 }
246 BatchKind::Transformable(_, batch_kind) => batch_kind.gpu_sampler_tag(),
247 }
248 }
249 }
250
251 bitflags! {
252 #[derive(Default)]
253 pub struct DebugFlags: u32 {
254 const PROFILER_DBG = 1 << 0;
255 const RENDER_TARGET_DBG = 1 << 1;
256 const TEXTURE_CACHE_DBG = 1 << 2;
257 const GPU_TIME_QUERIES = 1 << 3;
258 const GPU_SAMPLE_QUERIES= 1 << 4;
259 const DISABLE_BATCHING = 1 << 5;
260 const EPOCHS = 1 << 6;
261 const COMPACT_PROFILER = 1 << 7;
262 }
263 }
264
flag_changed(before: DebugFlags, after: DebugFlags, select: DebugFlags) -> Option<bool>265 fn flag_changed(before: DebugFlags, after: DebugFlags, select: DebugFlags) -> Option<bool> {
266 if before & select != after & select {
267 Some(after.contains(select))
268 } else {
269 None
270 }
271 }
272
273 // A generic mode that can be passed to shaders to change
274 // behaviour per draw-call.
275 type ShaderMode = i32;
276
277 #[repr(C)]
278 enum TextShaderMode {
279 Alpha = 0,
280 SubpixelConstantTextColor = 1,
281 SubpixelPass0 = 2,
282 SubpixelPass1 = 3,
283 SubpixelWithBgColorPass0 = 4,
284 SubpixelWithBgColorPass1 = 5,
285 SubpixelWithBgColorPass2 = 6,
286 SubpixelDualSource = 7,
287 Bitmap = 8,
288 ColorBitmap = 9,
289 }
290
291 impl Into<ShaderMode> for TextShaderMode {
into(self) -> i32292 fn into(self) -> i32 {
293 self as i32
294 }
295 }
296
297 impl From<GlyphFormat> for TextShaderMode {
from(format: GlyphFormat) -> TextShaderMode298 fn from(format: GlyphFormat) -> TextShaderMode {
299 match format {
300 GlyphFormat::Alpha | GlyphFormat::TransformedAlpha => TextShaderMode::Alpha,
301 GlyphFormat::Subpixel | GlyphFormat::TransformedSubpixel => {
302 panic!("Subpixel glyph formats must be handled separately.");
303 }
304 GlyphFormat::Bitmap => TextShaderMode::Bitmap,
305 GlyphFormat::ColorBitmap => TextShaderMode::ColorBitmap,
306 }
307 }
308 }
309
310 #[derive(Debug, Copy, Clone, PartialEq, Eq)]
311 enum TextureSampler {
312 Color0,
313 Color1,
314 Color2,
315 CacheA8,
316 CacheRGBA8,
317 ResourceCache,
318 ClipScrollNodes,
319 RenderTasks,
320 Dither,
321 // A special sampler that is bound to the A8 output of
322 // the *first* pass. Items rendered in this target are
323 // available as inputs to tasks in any subsequent pass.
324 SharedCacheA8,
325 LocalClipRects
326 }
327
328 impl TextureSampler {
color(n: usize) -> TextureSampler329 fn color(n: usize) -> TextureSampler {
330 match n {
331 0 => TextureSampler::Color0,
332 1 => TextureSampler::Color1,
333 2 => TextureSampler::Color2,
334 _ => {
335 panic!("There are only 3 color samplers.");
336 }
337 }
338 }
339 }
340
341 impl Into<TextureSlot> for TextureSampler {
into(self) -> TextureSlot342 fn into(self) -> TextureSlot {
343 match self {
344 TextureSampler::Color0 => TextureSlot(0),
345 TextureSampler::Color1 => TextureSlot(1),
346 TextureSampler::Color2 => TextureSlot(2),
347 TextureSampler::CacheA8 => TextureSlot(3),
348 TextureSampler::CacheRGBA8 => TextureSlot(4),
349 TextureSampler::ResourceCache => TextureSlot(5),
350 TextureSampler::ClipScrollNodes => TextureSlot(6),
351 TextureSampler::RenderTasks => TextureSlot(7),
352 TextureSampler::Dither => TextureSlot(8),
353 TextureSampler::SharedCacheA8 => TextureSlot(9),
354 TextureSampler::LocalClipRects => TextureSlot(10),
355 }
356 }
357 }
358
359 #[derive(Debug, Clone, Copy)]
360 #[repr(C)]
361 pub struct PackedVertex {
362 pub pos: [f32; 2],
363 }
364
365 const DESC_PRIM_INSTANCES: VertexDescriptor = VertexDescriptor {
366 vertex_attributes: &[
367 VertexAttribute {
368 name: "aPosition",
369 count: 2,
370 kind: VertexAttributeKind::F32,
371 },
372 ],
373 instance_attributes: &[
374 VertexAttribute {
375 name: "aData0",
376 count: 4,
377 kind: VertexAttributeKind::I32,
378 },
379 VertexAttribute {
380 name: "aData1",
381 count: 4,
382 kind: VertexAttributeKind::I32,
383 },
384 ],
385 };
386
387 const DESC_BLUR: VertexDescriptor = VertexDescriptor {
388 vertex_attributes: &[
389 VertexAttribute {
390 name: "aPosition",
391 count: 2,
392 kind: VertexAttributeKind::F32,
393 },
394 ],
395 instance_attributes: &[
396 VertexAttribute {
397 name: "aBlurRenderTaskAddress",
398 count: 1,
399 kind: VertexAttributeKind::I32,
400 },
401 VertexAttribute {
402 name: "aBlurSourceTaskAddress",
403 count: 1,
404 kind: VertexAttributeKind::I32,
405 },
406 VertexAttribute {
407 name: "aBlurDirection",
408 count: 1,
409 kind: VertexAttributeKind::I32,
410 },
411 ],
412 };
413
414 const DESC_CLIP: VertexDescriptor = VertexDescriptor {
415 vertex_attributes: &[
416 VertexAttribute {
417 name: "aPosition",
418 count: 2,
419 kind: VertexAttributeKind::F32,
420 },
421 ],
422 instance_attributes: &[
423 VertexAttribute {
424 name: "aClipRenderTaskAddress",
425 count: 1,
426 kind: VertexAttributeKind::I32,
427 },
428 VertexAttribute {
429 name: "aScrollNodeId",
430 count: 1,
431 kind: VertexAttributeKind::I32,
432 },
433 VertexAttribute {
434 name: "aClipSegment",
435 count: 1,
436 kind: VertexAttributeKind::I32,
437 },
438 VertexAttribute {
439 name: "aClipDataResourceAddress",
440 count: 4,
441 kind: VertexAttributeKind::U16,
442 },
443 ],
444 };
445
446 const DESC_GPU_CACHE_UPDATE: VertexDescriptor = VertexDescriptor {
447 vertex_attributes: &[
448 VertexAttribute {
449 name: "aPosition",
450 count: 2,
451 kind: VertexAttributeKind::U16Norm,
452 },
453 VertexAttribute {
454 name: "aValue",
455 count: 4,
456 kind: VertexAttributeKind::F32,
457 },
458 ],
459 instance_attributes: &[],
460 };
461
462 #[derive(Debug, Copy, Clone)]
463 enum VertexArrayKind {
464 Primitive,
465 Blur,
466 Clip,
467 }
468
469 #[derive(Clone, Debug, PartialEq)]
470 pub enum GraphicsApi {
471 OpenGL,
472 }
473
474 #[derive(Clone, Debug)]
475 pub struct GraphicsApiInfo {
476 pub kind: GraphicsApi,
477 pub renderer: String,
478 pub version: String,
479 }
480
481 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
482 #[cfg_attr(feature = "capture", derive(Serialize))]
483 #[cfg_attr(feature = "replay", derive(Deserialize))]
484 pub enum ImageBufferKind {
485 Texture2D = 0,
486 TextureRect = 1,
487 TextureExternal = 2,
488 Texture2DArray = 3,
489 }
490
491 //TODO: those types are the same, so let's merge them
492 impl From<TextureTarget> for ImageBufferKind {
from(target: TextureTarget) -> Self493 fn from(target: TextureTarget) -> Self {
494 match target {
495 TextureTarget::Default => ImageBufferKind::Texture2D,
496 TextureTarget::Rect => ImageBufferKind::TextureRect,
497 TextureTarget::Array => ImageBufferKind::Texture2DArray,
498 TextureTarget::External => ImageBufferKind::TextureExternal,
499 }
500 }
501 }
502
503 pub const IMAGE_BUFFER_KINDS: [ImageBufferKind; 4] = [
504 ImageBufferKind::Texture2D,
505 ImageBufferKind::TextureRect,
506 ImageBufferKind::TextureExternal,
507 ImageBufferKind::Texture2DArray,
508 ];
509
510 impl ImageBufferKind {
get_feature_string(&self) -> &'static str511 pub fn get_feature_string(&self) -> &'static str {
512 match *self {
513 ImageBufferKind::Texture2D => "TEXTURE_2D",
514 ImageBufferKind::Texture2DArray => "",
515 ImageBufferKind::TextureRect => "TEXTURE_RECT",
516 ImageBufferKind::TextureExternal => "TEXTURE_EXTERNAL",
517 }
518 }
519
has_platform_support(&self, gl_type: &gl::GlType) -> bool520 pub fn has_platform_support(&self, gl_type: &gl::GlType) -> bool {
521 match *gl_type {
522 gl::GlType::Gles => match *self {
523 ImageBufferKind::Texture2D => true,
524 ImageBufferKind::Texture2DArray => true,
525 ImageBufferKind::TextureRect => true,
526 ImageBufferKind::TextureExternal => true,
527 },
528 gl::GlType::Gl => match *self {
529 ImageBufferKind::Texture2D => true,
530 ImageBufferKind::Texture2DArray => true,
531 ImageBufferKind::TextureRect => true,
532 ImageBufferKind::TextureExternal => false,
533 },
534 }
535 }
536 }
537
538 #[derive(Debug, Copy, Clone)]
539 pub enum RendererKind {
540 Native,
541 OSMesa,
542 }
543
544 #[derive(Debug)]
545 pub struct GpuProfile {
546 pub frame_id: FrameId,
547 pub paint_time_ns: u64,
548 }
549
550 impl GpuProfile {
new<T>(frame_id: FrameId, timers: &[GpuTimer<T>]) -> GpuProfile551 fn new<T>(frame_id: FrameId, timers: &[GpuTimer<T>]) -> GpuProfile {
552 let mut paint_time_ns = 0;
553 for timer in timers {
554 paint_time_ns += timer.time_ns;
555 }
556 GpuProfile {
557 frame_id,
558 paint_time_ns,
559 }
560 }
561 }
562
563 #[derive(Debug)]
564 pub struct CpuProfile {
565 pub frame_id: FrameId,
566 pub backend_time_ns: u64,
567 pub composite_time_ns: u64,
568 pub draw_calls: usize,
569 }
570
571 impl CpuProfile {
new( frame_id: FrameId, backend_time_ns: u64, composite_time_ns: u64, draw_calls: usize, ) -> CpuProfile572 fn new(
573 frame_id: FrameId,
574 backend_time_ns: u64,
575 composite_time_ns: u64,
576 draw_calls: usize,
577 ) -> CpuProfile {
578 CpuProfile {
579 frame_id,
580 backend_time_ns,
581 composite_time_ns,
582 draw_calls,
583 }
584 }
585 }
586
587 struct ActiveTexture {
588 texture: Texture,
589 saved_index: Option<SavedTargetIndex>,
590 is_shared: bool,
591 }
592
593 struct SourceTextureResolver {
594 /// A vector for fast resolves of texture cache IDs to
595 /// native texture IDs. This maps to a free-list managed
596 /// by the backend thread / texture cache. We free the
597 /// texture memory associated with a TextureId when its
598 /// texture cache ID is freed by the texture cache, but
599 /// reuse the TextureId when the texture caches's free
600 /// list reuses the texture cache ID. This saves having to
601 /// use a hashmap, and allows a flat vector for performance.
602 cache_texture_map: Vec<Texture>,
603
604 /// Map of external image IDs to native textures.
605 external_images: FastHashMap<(ExternalImageId, u8), ExternalTexture>,
606
607 /// A special 1x1 dummy cache texture used for shaders that expect to work
608 /// with the cache but are actually running in the first pass
609 /// when no target is yet provided as a cache texture input.
610 dummy_cache_texture: Texture,
611
612 /// The current cache textures.
613 cache_rgba8_texture: Option<ActiveTexture>,
614 cache_a8_texture: Option<ActiveTexture>,
615
616 /// An alpha texture shared between all passes.
617 //TODO: just use the standard texture saving logic instead.
618 shared_alpha_texture: Option<Texture>,
619
620 /// Saved cache textures that are to be re-used.
621 saved_textures: Vec<Texture>,
622
623 /// General pool of render targets.
624 render_target_pool: Vec<Texture>,
625 }
626
627 impl SourceTextureResolver {
new(device: &mut Device) -> SourceTextureResolver628 fn new(device: &mut Device) -> SourceTextureResolver {
629 let mut dummy_cache_texture = device
630 .create_texture(TextureTarget::Array, ImageFormat::BGRA8);
631 device.init_texture(
632 &mut dummy_cache_texture,
633 1,
634 1,
635 TextureFilter::Linear,
636 None,
637 1,
638 None,
639 );
640
641 SourceTextureResolver {
642 cache_texture_map: Vec::new(),
643 external_images: FastHashMap::default(),
644 dummy_cache_texture,
645 cache_a8_texture: None,
646 cache_rgba8_texture: None,
647 shared_alpha_texture: None,
648 saved_textures: Vec::default(),
649 render_target_pool: Vec::new(),
650 }
651 }
652
deinit(self, device: &mut Device)653 fn deinit(self, device: &mut Device) {
654 device.delete_texture(self.dummy_cache_texture);
655
656 for texture in self.cache_texture_map {
657 device.delete_texture(texture);
658 }
659
660 for texture in self.render_target_pool {
661 device.delete_texture(texture);
662 }
663 }
664
begin_frame(&mut self)665 fn begin_frame(&mut self) {
666 assert!(self.cache_rgba8_texture.is_none());
667 assert!(self.cache_a8_texture.is_none());
668 assert!(self.saved_textures.is_empty());
669 }
670
end_frame(&mut self)671 fn end_frame(&mut self) {
672 // return the cached targets to the pool
673 self.end_pass(None, None);
674 // return the global alpha texture
675 self.render_target_pool.extend(self.shared_alpha_texture.take());
676 // return the saved targets as well
677 self.render_target_pool.extend(self.saved_textures.drain(..));
678 }
679
end_pass( &mut self, a8_texture: Option<ActiveTexture>, rgba8_texture: Option<ActiveTexture>, )680 fn end_pass(
681 &mut self,
682 a8_texture: Option<ActiveTexture>,
683 rgba8_texture: Option<ActiveTexture>,
684 ) {
685 // If we have cache textures from previous pass, return them to the pool.
686 // Also assign the pool index of those cache textures to last pass's index because this is
687 // the result of last pass.
688 // Note: the order here is important, needs to match the logic in `RenderPass::build()`.
689 if let Some(at) = self.cache_rgba8_texture.take() {
690 assert!(!at.is_shared);
691 if let Some(index) = at.saved_index {
692 assert_eq!(self.saved_textures.len(), index.0);
693 self.saved_textures.push(at.texture);
694 } else {
695 self.render_target_pool.push(at.texture);
696 }
697 }
698 if let Some(at) = self.cache_a8_texture.take() {
699 if let Some(index) = at.saved_index {
700 assert!(!at.is_shared);
701 assert_eq!(self.saved_textures.len(), index.0);
702 self.saved_textures.push(at.texture);
703 } else if at.is_shared {
704 assert!(self.shared_alpha_texture.is_none());
705 self.shared_alpha_texture = Some(at.texture);
706 } else {
707 self.render_target_pool.push(at.texture);
708 }
709 }
710
711 // We have another pass to process, make these textures available
712 // as inputs to the next pass.
713 self.cache_rgba8_texture = rgba8_texture;
714 self.cache_a8_texture = a8_texture;
715 }
716
717 // Bind a source texture to the device.
bind(&self, texture_id: &SourceTexture, sampler: TextureSampler, device: &mut Device)718 fn bind(&self, texture_id: &SourceTexture, sampler: TextureSampler, device: &mut Device) {
719 match *texture_id {
720 SourceTexture::Invalid => {}
721 SourceTexture::CacheA8 => {
722 let texture = match self.cache_a8_texture {
723 Some(ref at) => &at.texture,
724 None => &self.dummy_cache_texture,
725 };
726 device.bind_texture(sampler, texture);
727 }
728 SourceTexture::CacheRGBA8 => {
729 let texture = match self.cache_rgba8_texture {
730 Some(ref at) => &at.texture,
731 None => &self.dummy_cache_texture,
732 };
733 device.bind_texture(sampler, texture);
734 }
735 SourceTexture::External(external_image) => {
736 let texture = self.external_images
737 .get(&(external_image.id, external_image.channel_index))
738 .expect(&format!("BUG: External image should be resolved by now"));
739 device.bind_external_texture(sampler, texture);
740 }
741 SourceTexture::TextureCache(index) => {
742 let texture = &self.cache_texture_map[index.0];
743 device.bind_texture(sampler, texture);
744 }
745 SourceTexture::RenderTaskCache(saved_index) => {
746 let texture = &self.saved_textures[saved_index.0];
747 device.bind_texture(sampler, texture)
748 }
749 }
750 }
751
752 // Get the real (OpenGL) texture ID for a given source texture.
753 // For a texture cache texture, the IDs are stored in a vector
754 // map for fast access.
resolve(&self, texture_id: &SourceTexture) -> Option<&Texture>755 fn resolve(&self, texture_id: &SourceTexture) -> Option<&Texture> {
756 match *texture_id {
757 SourceTexture::Invalid => None,
758 SourceTexture::CacheA8 => Some(
759 match self.cache_a8_texture {
760 Some(ref at) => &at.texture,
761 None => &self.dummy_cache_texture,
762 }
763 ),
764 SourceTexture::CacheRGBA8 => Some(
765 match self.cache_rgba8_texture {
766 Some(ref at) => &at.texture,
767 None => &self.dummy_cache_texture,
768 }
769 ),
770 SourceTexture::External(..) => {
771 panic!("BUG: External textures cannot be resolved, they can only be bound.");
772 }
773 SourceTexture::TextureCache(index) => {
774 Some(&self.cache_texture_map[index.0])
775 }
776 SourceTexture::RenderTaskCache(saved_index) => {
777 Some(&self.saved_textures[saved_index.0])
778 }
779 }
780 }
781 }
782
783 #[derive(Debug, Copy, Clone, PartialEq)]
784 #[cfg_attr(feature = "capture", derive(Serialize))]
785 #[cfg_attr(feature = "replay", derive(Deserialize))]
786 #[allow(dead_code)] // SubpixelVariableTextColor is not used at the moment.
787 pub enum BlendMode {
788 None,
789 Alpha,
790 PremultipliedAlpha,
791 PremultipliedDestOut,
792 SubpixelDualSource,
793 SubpixelConstantTextColor(ColorF),
794 SubpixelWithBgColor,
795 SubpixelVariableTextColor,
796 }
797
798 // Tracks the state of each row in the GPU cache texture.
799 struct CacheRow {
800 is_dirty: bool,
801 }
802
803 impl CacheRow {
new() -> Self804 fn new() -> Self {
805 CacheRow { is_dirty: false }
806 }
807 }
808
809 /// The bus over which CPU and GPU versions of the cache
810 /// get synchronized.
811 enum CacheBus {
812 /// PBO-based updates, currently operate on a row granularity.
813 /// Therefore, are subject to fragmentation issues.
814 PixelBuffer {
815 /// PBO used for transfers.
816 buffer: PBO,
817 /// Meta-data about the cached rows.
818 rows: Vec<CacheRow>,
819 /// Mirrored block data on CPU.
820 cpu_blocks: Vec<GpuBlockData>,
821 },
822 /// Shader-based scattering updates. Currently rendered by a set
823 /// of points into the GPU texture, each carrying a `GpuBlockData`.
824 Scatter {
825 /// Special program to run the scattered update.
826 program: Program,
827 /// VAO containing the source vertex buffers.
828 vao: CustomVAO,
829 /// VBO for positional data, supplied as normalized `u16`.
830 buf_position: VBO<[u16; 2]>,
831 /// VBO for gpu block data.
832 buf_value: VBO<GpuBlockData>,
833 /// Currently stored block count.
834 count: usize,
835 },
836 }
837
838 /// The device-specific representation of the cache texture in gpu_cache.rs
839 struct CacheTexture {
840 texture: Texture,
841 bus: CacheBus,
842 }
843
844 impl CacheTexture {
new(device: &mut Device, use_scatter: bool) -> Result<Self, RendererError>845 fn new(device: &mut Device, use_scatter: bool) -> Result<Self, RendererError> {
846 let texture = device.create_texture(TextureTarget::Default, ImageFormat::RGBAF32);
847
848 let bus = if use_scatter {
849 let program = device
850 .create_program("gpu_cache_update", "", &DESC_GPU_CACHE_UPDATE)?;
851 let buf_position = device.create_vbo();
852 let buf_value = device.create_vbo();
853 //Note: the vertex attributes have to be supplied in the same order
854 // as for program creation, but each assigned to a different stream.
855 let vao = device.create_custom_vao(&[
856 buf_position.stream_with(&DESC_GPU_CACHE_UPDATE.vertex_attributes[0..1]),
857 buf_value .stream_with(&DESC_GPU_CACHE_UPDATE.vertex_attributes[1..2]),
858 ]);
859 CacheBus::Scatter {
860 program,
861 vao,
862 buf_position,
863 buf_value,
864 count: 0,
865 }
866 } else {
867 let buffer = device.create_pbo();
868 CacheBus::PixelBuffer {
869 buffer,
870 rows: Vec::new(),
871 cpu_blocks: Vec::new(),
872 }
873 };
874
875 Ok(CacheTexture {
876 texture,
877 bus,
878 })
879 }
880
deinit(self, device: &mut Device)881 fn deinit(self, device: &mut Device) {
882 device.delete_texture(self.texture);
883 match self.bus {
884 CacheBus::PixelBuffer { buffer, ..} => {
885 device.delete_pbo(buffer);
886 }
887 CacheBus::Scatter { program, vao, buf_position, buf_value, ..} => {
888 device.delete_program(program);
889 device.delete_custom_vao(vao);
890 device.delete_vbo(buf_position);
891 device.delete_vbo(buf_value);
892 }
893 }
894 }
895
get_height(&self) -> u32896 fn get_height(&self) -> u32 {
897 self.texture.get_dimensions().height
898 }
899
prepare_for_updates( &mut self, device: &mut Device, total_block_count: usize, max_height: u32, )900 fn prepare_for_updates(
901 &mut self,
902 device: &mut Device,
903 total_block_count: usize,
904 max_height: u32,
905 ) {
906 // See if we need to create or resize the texture.
907 let old_size = self.texture.get_dimensions();
908 let new_size = DeviceUintSize::new(MAX_VERTEX_TEXTURE_WIDTH as _, max_height);
909
910 match self.bus {
911 CacheBus::PixelBuffer { ref mut rows, .. } => {
912 if max_height > old_size.height {
913 // Create a f32 texture that can be used for the vertex shader
914 // to fetch data from.
915 device.init_texture(
916 &mut self.texture,
917 new_size.width,
918 new_size.height,
919 TextureFilter::Nearest,
920 None,
921 1,
922 None,
923 );
924
925 // If we had to resize the texture, just mark all rows
926 // as dirty so they will be uploaded to the texture
927 // during the next flush.
928 for row in rows.iter_mut() {
929 row.is_dirty = true;
930 }
931 }
932 }
933 CacheBus::Scatter {
934 ref mut buf_position,
935 ref mut buf_value,
936 ref mut count,
937 ..
938 } => {
939 *count = 0;
940 if total_block_count > buf_value.allocated_count() {
941 device.allocate_vbo(buf_position, total_block_count, VertexUsageHint::Stream);
942 device.allocate_vbo(buf_value, total_block_count, VertexUsageHint::Stream);
943 }
944
945 if new_size.height > old_size.height || GPU_CACHE_RESIZE_TEST {
946 if old_size.height > 0 {
947 device.resize_renderable_texture(&mut self.texture, new_size);
948 } else {
949 device.init_texture(
950 &mut self.texture,
951 new_size.width,
952 new_size.height,
953 TextureFilter::Nearest,
954 Some(RenderTargetInfo {
955 has_depth: false,
956 }),
957 1,
958 None,
959 );
960 }
961 }
962 }
963 }
964 }
965
update(&mut self, device: &mut Device, updates: &GpuCacheUpdateList)966 fn update(&mut self, device: &mut Device, updates: &GpuCacheUpdateList) {
967 match self.bus {
968 CacheBus::PixelBuffer { ref mut rows, ref mut cpu_blocks, .. } => {
969 for update in &updates.updates {
970 match update {
971 &GpuCacheUpdate::Copy {
972 block_index,
973 block_count,
974 address,
975 } => {
976 let row = address.v as usize;
977
978 // Ensure that the CPU-side shadow copy of the GPU cache data has enough
979 // rows to apply this patch.
980 while rows.len() <= row {
981 // Add a new row.
982 rows.push(CacheRow::new());
983 // Add enough GPU blocks for this row.
984 cpu_blocks
985 .extend_from_slice(&[GpuBlockData::EMPTY; MAX_VERTEX_TEXTURE_WIDTH]);
986 }
987
988 // This row is dirty (needs to be updated in GPU texture).
989 rows[row].is_dirty = true;
990
991 // Copy the blocks from the patch array in the shadow CPU copy.
992 let block_offset = row * MAX_VERTEX_TEXTURE_WIDTH + address.u as usize;
993 let data = &mut cpu_blocks[block_offset .. (block_offset + block_count)];
994 for i in 0 .. block_count {
995 data[i] = updates.blocks[block_index + i];
996 }
997 }
998 }
999 }
1000 }
1001 CacheBus::Scatter {
1002 ref buf_position,
1003 ref buf_value,
1004 ref mut count,
1005 ..
1006 } => {
1007 //TODO: re-use this heap allocation
1008 // Unused positions will be left as 0xFFFF, which translates to
1009 // (1.0, 1.0) in the vertex output position and gets culled out
1010 let mut position_data = vec![[!0u16; 2]; updates.blocks.len()];
1011 let size = self.texture.get_dimensions().to_usize();
1012
1013 for update in &updates.updates {
1014 match update {
1015 &GpuCacheUpdate::Copy {
1016 block_index,
1017 block_count,
1018 address,
1019 } => {
1020 // Convert the absolute texel position into normalized
1021 let y = ((2*address.v as usize + 1) << 15) / size.height;
1022 for i in 0 .. block_count {
1023 let x = ((2*address.u as usize + 2*i + 1) << 15) / size.width;
1024 position_data[block_index + i] = [x as _, y as _];
1025 }
1026 }
1027 }
1028 }
1029
1030 device.fill_vbo(buf_value, &updates.blocks, *count);
1031 device.fill_vbo(buf_position, &position_data, *count);
1032 *count += position_data.len();
1033 }
1034 }
1035 }
1036
flush(&mut self, device: &mut Device) -> usize1037 fn flush(&mut self, device: &mut Device) -> usize {
1038 match self.bus {
1039 CacheBus::PixelBuffer { ref buffer, ref mut rows, ref cpu_blocks } => {
1040 let rows_dirty = rows
1041 .iter()
1042 .filter(|row| row.is_dirty)
1043 .count();
1044 if rows_dirty == 0 {
1045 return 0
1046 }
1047
1048 let mut uploader = device.upload_texture(
1049 &self.texture,
1050 buffer,
1051 rows_dirty * MAX_VERTEX_TEXTURE_WIDTH,
1052 );
1053
1054 for (row_index, row) in rows.iter_mut().enumerate() {
1055 if !row.is_dirty {
1056 continue;
1057 }
1058
1059 let block_index = row_index * MAX_VERTEX_TEXTURE_WIDTH;
1060 let cpu_blocks =
1061 &cpu_blocks[block_index .. (block_index + MAX_VERTEX_TEXTURE_WIDTH)];
1062 let rect = DeviceUintRect::new(
1063 DeviceUintPoint::new(0, row_index as u32),
1064 DeviceUintSize::new(MAX_VERTEX_TEXTURE_WIDTH as u32, 1),
1065 );
1066
1067 uploader.upload(rect, 0, None, cpu_blocks);
1068
1069 row.is_dirty = false;
1070 }
1071
1072 rows_dirty
1073 }
1074 CacheBus::Scatter { ref program, ref vao, count, .. } => {
1075 device.disable_depth();
1076 device.set_blend(false);
1077 device.bind_program(program);
1078 device.bind_custom_vao(vao);
1079 device.bind_draw_target(
1080 Some((&self.texture, 0)),
1081 Some(self.texture.get_dimensions()),
1082 );
1083 device.draw_nonindexed_points(0, count as _);
1084 0
1085 }
1086 }
1087 }
1088 }
1089
1090 struct VertexDataTexture {
1091 texture: Texture,
1092 pbo: PBO,
1093 }
1094
1095 impl VertexDataTexture {
new(device: &mut Device) -> VertexDataTexture1096 fn new(device: &mut Device) -> VertexDataTexture {
1097 let texture = device.create_texture(TextureTarget::Default, ImageFormat::RGBAF32);
1098 let pbo = device.create_pbo();
1099
1100 VertexDataTexture { texture, pbo }
1101 }
1102
update<T>(&mut self, device: &mut Device, data: &mut Vec<T>)1103 fn update<T>(&mut self, device: &mut Device, data: &mut Vec<T>) {
1104 if data.is_empty() {
1105 return;
1106 }
1107
1108 debug_assert!(mem::size_of::<T>() % 16 == 0);
1109 let texels_per_item = mem::size_of::<T>() / 16;
1110 let items_per_row = MAX_VERTEX_TEXTURE_WIDTH / texels_per_item;
1111
1112 // Extend the data array to be a multiple of the row size.
1113 // This ensures memory safety when the array is passed to
1114 // OpenGL to upload to the GPU.
1115 if items_per_row != 0 {
1116 while data.len() % items_per_row != 0 {
1117 data.push(unsafe { mem::uninitialized() });
1118 }
1119 }
1120
1121 let width =
1122 (MAX_VERTEX_TEXTURE_WIDTH - (MAX_VERTEX_TEXTURE_WIDTH % texels_per_item)) as u32;
1123 let needed_height = (data.len() / items_per_row) as u32;
1124
1125 // Determine if the texture needs to be resized.
1126 let texture_size = self.texture.get_dimensions();
1127
1128 if needed_height > texture_size.height {
1129 let new_height = (needed_height + 127) & !127;
1130
1131 device.init_texture(
1132 &mut self.texture,
1133 width,
1134 new_height,
1135 TextureFilter::Nearest,
1136 None,
1137 1,
1138 None,
1139 );
1140 }
1141
1142 let rect = DeviceUintRect::new(
1143 DeviceUintPoint::zero(),
1144 DeviceUintSize::new(width, needed_height),
1145 );
1146 device
1147 .upload_texture(&self.texture, &self.pbo, 0)
1148 .upload(rect, 0, None, data);
1149 }
1150
deinit(self, device: &mut Device)1151 fn deinit(self, device: &mut Device) {
1152 device.delete_pbo(self.pbo);
1153 device.delete_texture(self.texture);
1154 }
1155 }
1156
1157 const TRANSFORM_FEATURE: &str = "TRANSFORM";
1158 const ALPHA_FEATURE: &str = "ALPHA_PASS";
1159
1160 enum ShaderKind {
1161 Primitive,
1162 Cache(VertexArrayKind),
1163 ClipCache,
1164 Brush,
1165 Text,
1166 }
1167
1168 struct LazilyCompiledShader {
1169 program: Option<Program>,
1170 name: &'static str,
1171 kind: ShaderKind,
1172 features: Vec<&'static str>,
1173 }
1174
1175 impl LazilyCompiledShader {
new( kind: ShaderKind, name: &'static str, features: &[&'static str], device: &mut Device, precache: bool, ) -> Result<LazilyCompiledShader, ShaderError>1176 fn new(
1177 kind: ShaderKind,
1178 name: &'static str,
1179 features: &[&'static str],
1180 device: &mut Device,
1181 precache: bool,
1182 ) -> Result<LazilyCompiledShader, ShaderError> {
1183 let mut shader = LazilyCompiledShader {
1184 program: None,
1185 name,
1186 kind,
1187 features: features.to_vec(),
1188 };
1189
1190 if precache {
1191 let t0 = precise_time_ns();
1192 let program = try!{ shader.get(device) };
1193 let t1 = precise_time_ns();
1194 device.bind_program(program);
1195 device.draw_triangles_u16(0, 3);
1196 let t2 = precise_time_ns();
1197 debug!("[C: {:.1} ms D: {:.1} ms] Precache {} {:?}",
1198 (t1 - t0) as f64 / 1000000.0,
1199 (t2 - t1) as f64 / 1000000.0,
1200 name,
1201 features
1202 );
1203 }
1204
1205 Ok(shader)
1206 }
1207
bind<M>( &mut self, device: &mut Device, projection: &Transform3D<f32>, mode: M, renderer_errors: &mut Vec<RendererError>, ) where M: Into<ShaderMode>1208 fn bind<M>(
1209 &mut self,
1210 device: &mut Device,
1211 projection: &Transform3D<f32>,
1212 mode: M,
1213 renderer_errors: &mut Vec<RendererError>,
1214 ) where M: Into<ShaderMode> {
1215 let program = match self.get(device) {
1216 Ok(program) => program,
1217 Err(e) => {
1218 renderer_errors.push(RendererError::from(e));
1219 return;
1220 }
1221 };
1222 device.bind_program(program);
1223 device.set_uniforms(program, projection, mode.into());
1224 }
1225
get(&mut self, device: &mut Device) -> Result<&Program, ShaderError>1226 fn get(&mut self, device: &mut Device) -> Result<&Program, ShaderError> {
1227 if self.program.is_none() {
1228 let program = try!{
1229 match self.kind {
1230 ShaderKind::Primitive | ShaderKind::Brush | ShaderKind::Text => {
1231 create_prim_shader(self.name,
1232 device,
1233 &self.features,
1234 VertexArrayKind::Primitive)
1235 }
1236 ShaderKind::Cache(format) => {
1237 create_prim_shader(self.name,
1238 device,
1239 &self.features,
1240 format)
1241 }
1242 ShaderKind::ClipCache => {
1243 create_clip_shader(self.name, device)
1244 }
1245 }
1246 };
1247 self.program = Some(program);
1248 }
1249
1250 Ok(self.program.as_ref().unwrap())
1251 }
1252
deinit(self, device: &mut Device)1253 fn deinit(self, device: &mut Device) {
1254 if let Some(program) = self.program {
1255 device.delete_program(program);
1256 }
1257 }
1258 }
1259
1260 // A brush shader supports two modes:
1261 // opaque:
1262 // Used for completely opaque primitives,
1263 // or inside segments of partially
1264 // opaque primitives. Assumes no need
1265 // for clip masks, AA etc.
1266 // alpha:
1267 // Used for brush primitives in the alpha
1268 // pass. Assumes that AA should be applied
1269 // along the primitive edge, and also that
1270 // clip mask is present.
1271 struct BrushShader {
1272 opaque: LazilyCompiledShader,
1273 alpha: LazilyCompiledShader,
1274 }
1275
1276 impl BrushShader {
new( name: &'static str, device: &mut Device, features: &[&'static str], precache: bool, ) -> Result<Self, ShaderError>1277 fn new(
1278 name: &'static str,
1279 device: &mut Device,
1280 features: &[&'static str],
1281 precache: bool,
1282 ) -> Result<Self, ShaderError> {
1283 let opaque = try!{
1284 LazilyCompiledShader::new(ShaderKind::Brush,
1285 name,
1286 features,
1287 device,
1288 precache)
1289 };
1290
1291 let mut alpha_features = features.to_vec();
1292 alpha_features.push(ALPHA_FEATURE);
1293
1294 let alpha = try!{
1295 LazilyCompiledShader::new(ShaderKind::Brush,
1296 name,
1297 &alpha_features,
1298 device,
1299 precache)
1300 };
1301
1302 Ok(BrushShader { opaque, alpha })
1303 }
1304
bind<M>( &mut self, device: &mut Device, blend_mode: BlendMode, projection: &Transform3D<f32>, mode: M, renderer_errors: &mut Vec<RendererError>, ) where M: Into<ShaderMode>1305 fn bind<M>(
1306 &mut self,
1307 device: &mut Device,
1308 blend_mode: BlendMode,
1309 projection: &Transform3D<f32>,
1310 mode: M,
1311 renderer_errors: &mut Vec<RendererError>,
1312 ) where M: Into<ShaderMode> {
1313 match blend_mode {
1314 BlendMode::None => {
1315 self.opaque.bind(device, projection, mode, renderer_errors)
1316 }
1317 BlendMode::Alpha |
1318 BlendMode::PremultipliedAlpha |
1319 BlendMode::PremultipliedDestOut |
1320 BlendMode::SubpixelDualSource |
1321 BlendMode::SubpixelConstantTextColor(..) |
1322 BlendMode::SubpixelVariableTextColor |
1323 BlendMode::SubpixelWithBgColor => {
1324 self.alpha.bind(device, projection, mode, renderer_errors)
1325 }
1326 }
1327 }
1328
deinit(self, device: &mut Device)1329 fn deinit(self, device: &mut Device) {
1330 self.opaque.deinit(device);
1331 self.alpha.deinit(device);
1332 }
1333 }
1334
1335 struct PrimitiveShader {
1336 simple: LazilyCompiledShader,
1337 transform: LazilyCompiledShader,
1338 }
1339
1340 impl PrimitiveShader {
new( name: &'static str, device: &mut Device, features: &[&'static str], precache: bool, ) -> Result<Self, ShaderError>1341 fn new(
1342 name: &'static str,
1343 device: &mut Device,
1344 features: &[&'static str],
1345 precache: bool,
1346 ) -> Result<Self, ShaderError> {
1347 let simple = try!{
1348 LazilyCompiledShader::new(ShaderKind::Primitive,
1349 name,
1350 features,
1351 device,
1352 precache)
1353 };
1354
1355 let mut transform_features = features.to_vec();
1356 transform_features.push(TRANSFORM_FEATURE);
1357
1358 let transform = try!{
1359 LazilyCompiledShader::new(ShaderKind::Primitive,
1360 name,
1361 &transform_features,
1362 device,
1363 precache)
1364 };
1365
1366 Ok(PrimitiveShader { simple, transform })
1367 }
1368
bind<M>( &mut self, device: &mut Device, transform_kind: TransformedRectKind, projection: &Transform3D<f32>, mode: M, renderer_errors: &mut Vec<RendererError>, ) where M: Into<ShaderMode>1369 fn bind<M>(
1370 &mut self,
1371 device: &mut Device,
1372 transform_kind: TransformedRectKind,
1373 projection: &Transform3D<f32>,
1374 mode: M,
1375 renderer_errors: &mut Vec<RendererError>,
1376 ) where M: Into<ShaderMode> {
1377 match transform_kind {
1378 TransformedRectKind::AxisAligned => {
1379 self.simple.bind(device, projection, mode, renderer_errors)
1380 }
1381 TransformedRectKind::Complex => {
1382 self.transform.bind(device, projection, mode, renderer_errors)
1383 }
1384 }
1385 }
1386
deinit(self, device: &mut Device)1387 fn deinit(self, device: &mut Device) {
1388 self.simple.deinit(device);
1389 self.transform.deinit(device);
1390 }
1391 }
1392
1393 struct TextShader {
1394 simple: LazilyCompiledShader,
1395 transform: LazilyCompiledShader,
1396 glyph_transform: LazilyCompiledShader,
1397 }
1398
1399 impl TextShader {
new( name: &'static str, device: &mut Device, features: &[&'static str], precache: bool, ) -> Result<Self, ShaderError>1400 fn new(
1401 name: &'static str,
1402 device: &mut Device,
1403 features: &[&'static str],
1404 precache: bool,
1405 ) -> Result<Self, ShaderError> {
1406 let simple = try!{
1407 LazilyCompiledShader::new(ShaderKind::Text,
1408 name,
1409 features,
1410 device,
1411 precache)
1412 };
1413
1414 let mut transform_features = features.to_vec();
1415 transform_features.push("TRANSFORM");
1416
1417 let transform = try!{
1418 LazilyCompiledShader::new(ShaderKind::Text,
1419 name,
1420 &transform_features,
1421 device,
1422 precache)
1423 };
1424
1425 let mut glyph_transform_features = features.to_vec();
1426 glyph_transform_features.push("GLYPH_TRANSFORM");
1427
1428 let glyph_transform = try!{
1429 LazilyCompiledShader::new(ShaderKind::Text,
1430 name,
1431 &glyph_transform_features,
1432 device,
1433 precache)
1434 };
1435
1436 Ok(TextShader { simple, transform, glyph_transform })
1437 }
1438
bind<M>( &mut self, device: &mut Device, glyph_format: GlyphFormat, transform_kind: TransformedRectKind, projection: &Transform3D<f32>, mode: M, renderer_errors: &mut Vec<RendererError>, ) where M: Into<ShaderMode>1439 fn bind<M>(
1440 &mut self,
1441 device: &mut Device,
1442 glyph_format: GlyphFormat,
1443 transform_kind: TransformedRectKind,
1444 projection: &Transform3D<f32>,
1445 mode: M,
1446 renderer_errors: &mut Vec<RendererError>,
1447 ) where M: Into<ShaderMode> {
1448 match glyph_format {
1449 GlyphFormat::Alpha |
1450 GlyphFormat::Subpixel |
1451 GlyphFormat::Bitmap |
1452 GlyphFormat::ColorBitmap => {
1453 match transform_kind {
1454 TransformedRectKind::AxisAligned => {
1455 self.simple.bind(device, projection, mode, renderer_errors)
1456 }
1457 TransformedRectKind::Complex => {
1458 self.transform.bind(device, projection, mode, renderer_errors)
1459 }
1460 }
1461 }
1462 GlyphFormat::TransformedAlpha |
1463 GlyphFormat::TransformedSubpixel => {
1464 self.glyph_transform.bind(device, projection, mode, renderer_errors)
1465 }
1466 }
1467 }
1468
deinit(self, device: &mut Device)1469 fn deinit(self, device: &mut Device) {
1470 self.simple.deinit(device);
1471 self.transform.deinit(device);
1472 self.glyph_transform.deinit(device);
1473 }
1474 }
1475
create_prim_shader( name: &'static str, device: &mut Device, features: &[&'static str], vertex_format: VertexArrayKind, ) -> Result<Program, ShaderError>1476 fn create_prim_shader(
1477 name: &'static str,
1478 device: &mut Device,
1479 features: &[&'static str],
1480 vertex_format: VertexArrayKind,
1481 ) -> Result<Program, ShaderError> {
1482 let mut prefix = format!(
1483 "#define WR_MAX_VERTEX_TEXTURE_WIDTH {}\n",
1484 MAX_VERTEX_TEXTURE_WIDTH
1485 );
1486
1487 for feature in features {
1488 prefix.push_str(&format!("#define WR_FEATURE_{}\n", feature));
1489 }
1490
1491 debug!("PrimShader {}", name);
1492
1493 let vertex_descriptor = match vertex_format {
1494 VertexArrayKind::Primitive => DESC_PRIM_INSTANCES,
1495 VertexArrayKind::Blur => DESC_BLUR,
1496 VertexArrayKind::Clip => DESC_CLIP,
1497 };
1498
1499 let program = device.create_program(name, &prefix, &vertex_descriptor);
1500
1501 if let Ok(ref program) = program {
1502 device.bind_shader_samplers(
1503 program,
1504 &[
1505 ("sColor0", TextureSampler::Color0),
1506 ("sColor1", TextureSampler::Color1),
1507 ("sColor2", TextureSampler::Color2),
1508 ("sDither", TextureSampler::Dither),
1509 ("sCacheA8", TextureSampler::CacheA8),
1510 ("sCacheRGBA8", TextureSampler::CacheRGBA8),
1511 ("sClipScrollNodes", TextureSampler::ClipScrollNodes),
1512 ("sRenderTasks", TextureSampler::RenderTasks),
1513 ("sResourceCache", TextureSampler::ResourceCache),
1514 ("sSharedCacheA8", TextureSampler::SharedCacheA8),
1515 ("sLocalClipRects", TextureSampler::LocalClipRects),
1516 ],
1517 );
1518 }
1519
1520 program
1521 }
1522
create_clip_shader(name: &'static str, device: &mut Device) -> Result<Program, ShaderError>1523 fn create_clip_shader(name: &'static str, device: &mut Device) -> Result<Program, ShaderError> {
1524 let prefix = format!(
1525 "#define WR_MAX_VERTEX_TEXTURE_WIDTH {}\n
1526 #define WR_FEATURE_TRANSFORM\n",
1527 MAX_VERTEX_TEXTURE_WIDTH
1528 );
1529
1530 debug!("ClipShader {}", name);
1531
1532 let program = device.create_program(name, &prefix, &DESC_CLIP);
1533
1534 if let Ok(ref program) = program {
1535 device.bind_shader_samplers(
1536 program,
1537 &[
1538 ("sColor0", TextureSampler::Color0),
1539 ("sClipScrollNodes", TextureSampler::ClipScrollNodes),
1540 ("sRenderTasks", TextureSampler::RenderTasks),
1541 ("sResourceCache", TextureSampler::ResourceCache),
1542 ("sSharedCacheA8", TextureSampler::SharedCacheA8),
1543 ("sLocalClipRects", TextureSampler::LocalClipRects),
1544 ],
1545 );
1546 }
1547
1548 program
1549 }
1550
1551 struct FileWatcher {
1552 notifier: Box<RenderNotifier>,
1553 result_tx: Sender<ResultMsg>,
1554 }
1555
1556 impl FileWatcherHandler for FileWatcher {
file_changed(&self, path: PathBuf)1557 fn file_changed(&self, path: PathBuf) {
1558 self.result_tx.send(ResultMsg::RefreshShader(path)).ok();
1559 self.notifier.wake_up();
1560 }
1561 }
1562
1563 struct FrameOutput {
1564 last_access: FrameId,
1565 fbo_id: FBOId,
1566 }
1567
1568 #[derive(PartialEq)]
1569 struct TargetSelector {
1570 size: DeviceUintSize,
1571 num_layers: usize,
1572 format: ImageFormat,
1573 }
1574
1575
1576 /// The renderer is responsible for submitting to the GPU the work prepared by the
1577 /// RenderBackend.
1578 pub struct Renderer {
1579 result_rx: Receiver<ResultMsg>,
1580 debug_server: DebugServer,
1581 device: Device,
1582 pending_texture_updates: Vec<TextureUpdateList>,
1583 pending_gpu_cache_updates: Vec<GpuCacheUpdateList>,
1584 pending_shader_updates: Vec<PathBuf>,
1585 active_documents: Vec<(DocumentId, RenderedDocument)>,
1586
1587 // These are "cache shaders". These shaders are used to
1588 // draw intermediate results to cache targets. The results
1589 // of these shaders are then used by the primitive shaders.
1590 cs_text_run: LazilyCompiledShader,
1591 cs_blur_a8: LazilyCompiledShader,
1592 cs_blur_rgba8: LazilyCompiledShader,
1593
1594 // Brush shaders
1595 brush_solid: BrushShader,
1596 brush_line: BrushShader,
1597 brush_image: Vec<Option<BrushShader>>,
1598 brush_blend: BrushShader,
1599 brush_mix_blend: BrushShader,
1600 brush_yuv_image: Vec<Option<BrushShader>>,
1601 brush_radial_gradient: BrushShader,
1602 brush_linear_gradient: BrushShader,
1603
1604 /// These are "cache clip shaders". These shaders are used to
1605 /// draw clip instances into the cached clip mask. The results
1606 /// of these shaders are also used by the primitive shaders.
1607 cs_clip_rectangle: LazilyCompiledShader,
1608 cs_clip_box_shadow: LazilyCompiledShader,
1609 cs_clip_image: LazilyCompiledShader,
1610 cs_clip_border: LazilyCompiledShader,
1611
1612 // The are "primitive shaders". These shaders draw and blend
1613 // final results on screen. They are aware of tile boundaries.
1614 // Most draw directly to the framebuffer, but some use inputs
1615 // from the cache shaders to draw. Specifically, the box
1616 // shadow primitive shader stretches the box shadow cache
1617 // output, and the cache_image shader blits the results of
1618 // a cache shader (e.g. blur) to the screen.
1619 ps_text_run: TextShader,
1620 ps_text_run_dual_source: TextShader,
1621 ps_image: Vec<Option<PrimitiveShader>>,
1622 ps_border_corner: PrimitiveShader,
1623 ps_border_edge: PrimitiveShader,
1624
1625 ps_hw_composite: LazilyCompiledShader,
1626 ps_split_composite: LazilyCompiledShader,
1627
1628 max_texture_size: u32,
1629
1630 max_recorded_profiles: usize,
1631 clear_color: Option<ColorF>,
1632 enable_clear_scissor: bool,
1633 debug: DebugRenderer,
1634 debug_flags: DebugFlags,
1635 backend_profile_counters: BackendProfileCounters,
1636 profile_counters: RendererProfileCounters,
1637 profiler: Profiler,
1638 last_time: u64,
1639
1640 gpu_profile: GpuProfiler<GpuProfileTag>,
1641 prim_vao: VAO,
1642 blur_vao: VAO,
1643 clip_vao: VAO,
1644
1645 node_data_texture: VertexDataTexture,
1646 local_clip_rects_texture: VertexDataTexture,
1647 render_task_texture: VertexDataTexture,
1648 gpu_cache_texture: CacheTexture,
1649
1650 gpu_cache_frame_id: FrameId,
1651 gpu_cache_overflow: bool,
1652
1653 pipeline_info: PipelineInfo,
1654
1655 // Manages and resolves source textures IDs to real texture IDs.
1656 texture_resolver: SourceTextureResolver,
1657
1658 // A PBO used to do asynchronous texture cache uploads.
1659 texture_cache_upload_pbo: PBO,
1660
1661 dither_matrix_texture: Option<Texture>,
1662
1663 /// Optional trait object that allows the client
1664 /// application to provide external buffers for image data.
1665 external_image_handler: Option<Box<ExternalImageHandler>>,
1666
1667 /// Optional trait object that allows the client
1668 /// application to provide a texture handle to
1669 /// copy the WR output to.
1670 output_image_handler: Option<Box<OutputImageHandler>>,
1671
1672 // Currently allocated FBOs for output frames.
1673 output_targets: FastHashMap<u32, FrameOutput>,
1674
1675 renderer_errors: Vec<RendererError>,
1676
1677 /// List of profile results from previous frames. Can be retrieved
1678 /// via get_frame_profiles().
1679 cpu_profiles: VecDeque<CpuProfile>,
1680 gpu_profiles: VecDeque<GpuProfile>,
1681
1682 #[cfg(feature = "capture")]
1683 read_fbo: FBOId,
1684 #[cfg(feature = "replay")]
1685 owned_external_images: FastHashMap<(ExternalImageId, u8), ExternalTexture>,
1686 }
1687
1688 #[derive(Debug)]
1689 pub enum RendererError {
1690 Shader(ShaderError),
1691 Thread(std::io::Error),
1692 Resource(ResourceCacheError),
1693 MaxTextureSize,
1694 }
1695
1696 impl From<ShaderError> for RendererError {
from(err: ShaderError) -> Self1697 fn from(err: ShaderError) -> Self {
1698 RendererError::Shader(err)
1699 }
1700 }
1701
1702 impl From<std::io::Error> for RendererError {
from(err: std::io::Error) -> Self1703 fn from(err: std::io::Error) -> Self {
1704 RendererError::Thread(err)
1705 }
1706 }
1707
1708 impl From<ResourceCacheError> for RendererError {
from(err: ResourceCacheError) -> Self1709 fn from(err: ResourceCacheError) -> Self {
1710 RendererError::Resource(err)
1711 }
1712 }
1713
1714 impl Renderer {
1715 /// Initializes webrender and creates a `Renderer` and `RenderApiSender`.
1716 ///
1717 /// # Examples
1718 /// Initializes a `Renderer` with some reasonable values. For more information see
1719 /// [`RendererOptions`][rendereroptions].
1720 ///
1721 /// ```rust,ignore
1722 /// # use webrender::renderer::Renderer;
1723 /// # use std::path::PathBuf;
1724 /// let opts = webrender::RendererOptions {
1725 /// device_pixel_ratio: 1.0,
1726 /// resource_override_path: None,
1727 /// enable_aa: false,
1728 /// };
1729 /// let (renderer, sender) = Renderer::new(opts);
1730 /// ```
1731 /// [rendereroptions]: struct.RendererOptions.html
new( gl: Rc<gl::Gl>, notifier: Box<RenderNotifier>, mut options: RendererOptions, ) -> Result<(Renderer, RenderApiSender), RendererError>1732 pub fn new(
1733 gl: Rc<gl::Gl>,
1734 notifier: Box<RenderNotifier>,
1735 mut options: RendererOptions,
1736 ) -> Result<(Renderer, RenderApiSender), RendererError> {
1737 let (api_tx, api_rx) = try!{ channel::msg_channel() };
1738 let (payload_tx, payload_rx) = try!{ channel::payload_channel() };
1739 let (result_tx, result_rx) = channel();
1740 let gl_type = gl.get_type();
1741 let dithering_feature = ["DITHERING"];
1742
1743 let debug_server = DebugServer::new(api_tx.clone());
1744
1745 let file_watch_handler = FileWatcher {
1746 result_tx: result_tx.clone(),
1747 notifier: notifier.clone(),
1748 };
1749
1750 let mut device = Device::new(
1751 gl,
1752 options.resource_override_path.clone(),
1753 options.upload_method,
1754 Box::new(file_watch_handler),
1755 options.cached_programs,
1756 );
1757
1758 let ext_dual_source_blending = !options.disable_dual_source_blending &&
1759 device.supports_extension("GL_ARB_blend_func_extended");
1760
1761 let device_max_size = device.max_texture_size();
1762 // 512 is the minimum that the texture cache can work with.
1763 // Broken GL contexts can return a max texture size of zero (See #1260). Better to
1764 // gracefully fail now than panic as soon as a texture is allocated.
1765 let min_texture_size = 512;
1766 if device_max_size < min_texture_size {
1767 println!(
1768 "Device reporting insufficient max texture size ({})",
1769 device_max_size
1770 );
1771 return Err(RendererError::MaxTextureSize);
1772 }
1773 let max_device_size = cmp::max(
1774 cmp::min(
1775 device_max_size,
1776 options.max_texture_size.unwrap_or(device_max_size),
1777 ),
1778 min_texture_size,
1779 );
1780
1781 register_thread_with_profiler("Compositor".to_owned());
1782
1783 device.begin_frame();
1784
1785 let cs_text_run = try!{
1786 LazilyCompiledShader::new(ShaderKind::Cache(VertexArrayKind::Primitive),
1787 "cs_text_run",
1788 &[],
1789 &mut device,
1790 options.precache_shaders)
1791 };
1792
1793 let brush_solid = try!{
1794 BrushShader::new("brush_solid",
1795 &mut device,
1796 &[],
1797 options.precache_shaders)
1798 };
1799
1800 let brush_line = try!{
1801 BrushShader::new("brush_line",
1802 &mut device,
1803 &[],
1804 options.precache_shaders)
1805 };
1806
1807 let brush_blend = try!{
1808 BrushShader::new("brush_blend",
1809 &mut device,
1810 &[],
1811 options.precache_shaders)
1812 };
1813
1814 let brush_mix_blend = try!{
1815 BrushShader::new("brush_mix_blend",
1816 &mut device,
1817 &[],
1818 options.precache_shaders)
1819 };
1820
1821 let brush_radial_gradient = try!{
1822 BrushShader::new("brush_radial_gradient",
1823 &mut device,
1824 if options.enable_dithering {
1825 &dithering_feature
1826 } else {
1827 &[]
1828 },
1829 options.precache_shaders)
1830 };
1831
1832 let brush_linear_gradient = try!{
1833 BrushShader::new("brush_linear_gradient",
1834 &mut device,
1835 if options.enable_dithering {
1836 &dithering_feature
1837 } else {
1838 &[]
1839 },
1840 options.precache_shaders)
1841 };
1842
1843 let cs_blur_a8 = try!{
1844 LazilyCompiledShader::new(ShaderKind::Cache(VertexArrayKind::Blur),
1845 "cs_blur",
1846 &["ALPHA_TARGET"],
1847 &mut device,
1848 options.precache_shaders)
1849 };
1850
1851 let cs_blur_rgba8 = try!{
1852 LazilyCompiledShader::new(ShaderKind::Cache(VertexArrayKind::Blur),
1853 "cs_blur",
1854 &["COLOR_TARGET"],
1855 &mut device,
1856 options.precache_shaders)
1857 };
1858
1859 let cs_clip_rectangle = try!{
1860 LazilyCompiledShader::new(ShaderKind::ClipCache,
1861 "cs_clip_rectangle",
1862 &[],
1863 &mut device,
1864 options.precache_shaders)
1865 };
1866
1867 let cs_clip_box_shadow = try!{
1868 LazilyCompiledShader::new(ShaderKind::ClipCache,
1869 "cs_clip_box_shadow",
1870 &[],
1871 &mut device,
1872 options.precache_shaders)
1873 };
1874
1875 let cs_clip_image = try!{
1876 LazilyCompiledShader::new(ShaderKind::ClipCache,
1877 "cs_clip_image",
1878 &[],
1879 &mut device,
1880 options.precache_shaders)
1881 };
1882
1883 let cs_clip_border = try!{
1884 LazilyCompiledShader::new(ShaderKind::ClipCache,
1885 "cs_clip_border",
1886 &[],
1887 &mut device,
1888 options.precache_shaders)
1889 };
1890
1891 let ps_text_run = try!{
1892 TextShader::new("ps_text_run",
1893 &mut device,
1894 &[],
1895 options.precache_shaders)
1896 };
1897
1898 let ps_text_run_dual_source = try!{
1899 TextShader::new("ps_text_run",
1900 &mut device,
1901 &["DUAL_SOURCE_BLENDING"],
1902 options.precache_shaders)
1903 };
1904
1905 // All image configuration.
1906 let mut image_features = Vec::new();
1907 let mut ps_image = Vec::new();
1908 let mut brush_image = Vec::new();
1909 // PrimitiveShader is not clonable. Use push() to initialize the vec.
1910 for _ in 0 .. IMAGE_BUFFER_KINDS.len() {
1911 ps_image.push(None);
1912 brush_image.push(None);
1913 }
1914 for buffer_kind in 0 .. IMAGE_BUFFER_KINDS.len() {
1915 if IMAGE_BUFFER_KINDS[buffer_kind].has_platform_support(&gl_type) {
1916 let feature_string = IMAGE_BUFFER_KINDS[buffer_kind].get_feature_string();
1917 if feature_string != "" {
1918 image_features.push(feature_string);
1919 }
1920 let shader = try!{
1921 PrimitiveShader::new("ps_image",
1922 &mut device,
1923 &image_features,
1924 options.precache_shaders)
1925 };
1926 ps_image[buffer_kind] = Some(shader);
1927
1928 let shader = try!{
1929 BrushShader::new("brush_image",
1930 &mut device,
1931 &image_features,
1932 options.precache_shaders)
1933 };
1934 brush_image[buffer_kind] = Some(shader);
1935 }
1936 image_features.clear();
1937 }
1938
1939 // All yuv_image configuration.
1940 let mut yuv_features = Vec::new();
1941 let yuv_shader_num = IMAGE_BUFFER_KINDS.len() * YUV_FORMATS.len() * YUV_COLOR_SPACES.len();
1942 let mut brush_yuv_image = Vec::new();
1943 // PrimitiveShader is not clonable. Use push() to initialize the vec.
1944 for _ in 0 .. yuv_shader_num {
1945 brush_yuv_image.push(None);
1946 }
1947 for buffer_kind in 0 .. IMAGE_BUFFER_KINDS.len() {
1948 if IMAGE_BUFFER_KINDS[buffer_kind].has_platform_support(&gl_type) {
1949 for format_kind in 0 .. YUV_FORMATS.len() {
1950 for color_space_kind in 0 .. YUV_COLOR_SPACES.len() {
1951 let feature_string = IMAGE_BUFFER_KINDS[buffer_kind].get_feature_string();
1952 if feature_string != "" {
1953 yuv_features.push(feature_string);
1954 }
1955 let feature_string = YUV_FORMATS[format_kind].get_feature_string();
1956 if feature_string != "" {
1957 yuv_features.push(feature_string);
1958 }
1959 let feature_string =
1960 YUV_COLOR_SPACES[color_space_kind].get_feature_string();
1961 if feature_string != "" {
1962 yuv_features.push(feature_string);
1963 }
1964
1965 let shader = try!{
1966 BrushShader::new("brush_yuv_image",
1967 &mut device,
1968 &yuv_features,
1969 options.precache_shaders)
1970 };
1971 let index = Renderer::get_yuv_shader_index(
1972 IMAGE_BUFFER_KINDS[buffer_kind],
1973 YUV_FORMATS[format_kind],
1974 YUV_COLOR_SPACES[color_space_kind],
1975 );
1976 brush_yuv_image[index] = Some(shader);
1977 yuv_features.clear();
1978 }
1979 }
1980 }
1981 }
1982
1983 let ps_border_corner = try!{
1984 PrimitiveShader::new("ps_border_corner",
1985 &mut device,
1986 &[],
1987 options.precache_shaders)
1988 };
1989
1990 let ps_border_edge = try!{
1991 PrimitiveShader::new("ps_border_edge",
1992 &mut device,
1993 &[],
1994 options.precache_shaders)
1995 };
1996
1997 let ps_hw_composite = try!{
1998 LazilyCompiledShader::new(ShaderKind::Primitive,
1999 "ps_hardware_composite",
2000 &[],
2001 &mut device,
2002 options.precache_shaders)
2003 };
2004
2005 let ps_split_composite = try!{
2006 LazilyCompiledShader::new(ShaderKind::Primitive,
2007 "ps_split_composite",
2008 &[],
2009 &mut device,
2010 options.precache_shaders)
2011 };
2012
2013 let texture_cache = TextureCache::new(max_device_size);
2014 let max_texture_size = texture_cache.max_texture_size();
2015
2016 let backend_profile_counters = BackendProfileCounters::new();
2017
2018 let dither_matrix_texture = if options.enable_dithering {
2019 let dither_matrix: [u8; 64] = [
2020 00,
2021 48,
2022 12,
2023 60,
2024 03,
2025 51,
2026 15,
2027 63,
2028 32,
2029 16,
2030 44,
2031 28,
2032 35,
2033 19,
2034 47,
2035 31,
2036 08,
2037 56,
2038 04,
2039 52,
2040 11,
2041 59,
2042 07,
2043 55,
2044 40,
2045 24,
2046 36,
2047 20,
2048 43,
2049 27,
2050 39,
2051 23,
2052 02,
2053 50,
2054 14,
2055 62,
2056 01,
2057 49,
2058 13,
2059 61,
2060 34,
2061 18,
2062 46,
2063 30,
2064 33,
2065 17,
2066 45,
2067 29,
2068 10,
2069 58,
2070 06,
2071 54,
2072 09,
2073 57,
2074 05,
2075 53,
2076 42,
2077 26,
2078 38,
2079 22,
2080 41,
2081 25,
2082 37,
2083 21,
2084 ];
2085
2086 let mut texture = device
2087 .create_texture(TextureTarget::Default, ImageFormat::R8);
2088 device.init_texture(
2089 &mut texture,
2090 8,
2091 8,
2092 TextureFilter::Nearest,
2093 None,
2094 1,
2095 Some(&dither_matrix),
2096 );
2097
2098 Some(texture)
2099 } else {
2100 None
2101 };
2102
2103 let debug_renderer = DebugRenderer::new(&mut device);
2104
2105 let x0 = 0.0;
2106 let y0 = 0.0;
2107 let x1 = 1.0;
2108 let y1 = 1.0;
2109
2110 let quad_indices: [u16; 6] = [0, 1, 2, 2, 1, 3];
2111 let quad_vertices = [
2112 PackedVertex { pos: [x0, y0] },
2113 PackedVertex { pos: [x1, y0] },
2114 PackedVertex { pos: [x0, y1] },
2115 PackedVertex { pos: [x1, y1] },
2116 ];
2117
2118 let prim_vao = device.create_vao(&DESC_PRIM_INSTANCES);
2119 device.bind_vao(&prim_vao);
2120 device.update_vao_indices(&prim_vao, &quad_indices, VertexUsageHint::Static);
2121 device.update_vao_main_vertices(&prim_vao, &quad_vertices, VertexUsageHint::Static);
2122
2123 let blur_vao = device.create_vao_with_new_instances(&DESC_BLUR, &prim_vao);
2124 let clip_vao = device.create_vao_with_new_instances(&DESC_CLIP, &prim_vao);
2125
2126 let texture_cache_upload_pbo = device.create_pbo();
2127
2128 let texture_resolver = SourceTextureResolver::new(&mut device);
2129
2130 let node_data_texture = VertexDataTexture::new(&mut device);
2131 let local_clip_rects_texture = VertexDataTexture::new(&mut device);
2132 let render_task_texture = VertexDataTexture::new(&mut device);
2133
2134 let gpu_cache_texture = CacheTexture::new(
2135 &mut device,
2136 options.scatter_gpu_cache_updates,
2137 )?;
2138
2139 device.end_frame();
2140
2141 let backend_notifier = notifier.clone();
2142
2143 let default_font_render_mode = match (options.enable_aa, options.enable_subpixel_aa) {
2144 (true, true) => FontRenderMode::Subpixel,
2145 (true, false) => FontRenderMode::Alpha,
2146 (false, _) => FontRenderMode::Mono,
2147 };
2148
2149 let config = FrameBuilderConfig {
2150 enable_scrollbars: options.enable_scrollbars,
2151 default_font_render_mode,
2152 debug: options.debug,
2153 dual_source_blending_is_enabled: true,
2154 dual_source_blending_is_supported: ext_dual_source_blending,
2155 };
2156
2157 let device_pixel_ratio = options.device_pixel_ratio;
2158 // First set the flags to default and later call set_debug_flags to ensure any
2159 // potential transition when enabling a flag is run.
2160 let debug_flags = DebugFlags::default();
2161 let payload_rx_for_backend = payload_rx.to_mpsc_receiver();
2162 let recorder = options.recorder;
2163 let thread_listener = Arc::new(options.thread_listener);
2164 let thread_listener_for_rayon_start = thread_listener.clone();
2165 let thread_listener_for_rayon_end = thread_listener.clone();
2166 let workers = options
2167 .workers
2168 .take()
2169 .unwrap_or_else(|| {
2170 let worker = ThreadPoolBuilder::new()
2171 .thread_name(|idx|{ format!("WRWorker#{}", idx) })
2172 .start_handler(move |idx| {
2173 register_thread_with_profiler(format!("WRWorker#{}", idx));
2174 if let Some(ref thread_listener) = *thread_listener_for_rayon_start {
2175 thread_listener.thread_started(&format!("WRWorker#{}", idx));
2176 }
2177 })
2178 .exit_handler(move |idx| {
2179 if let Some(ref thread_listener) = *thread_listener_for_rayon_end {
2180 thread_listener.thread_stopped(&format!("WRWorker#{}", idx));
2181 }
2182 })
2183 .build();
2184 Arc::new(worker.unwrap())
2185 });
2186 let enable_render_on_scroll = options.enable_render_on_scroll;
2187
2188 let blob_image_renderer = options.blob_image_renderer.take();
2189 let thread_listener_for_render_backend = thread_listener.clone();
2190 let thread_listener_for_scene_builder = thread_listener.clone();
2191 let rb_thread_name = format!("WRRenderBackend#{}", options.renderer_id.unwrap_or(0));
2192 let scene_thread_name = format!("WRSceneBuilder#{}", options.renderer_id.unwrap_or(0));
2193 let resource_cache = ResourceCache::new(
2194 texture_cache,
2195 workers,
2196 blob_image_renderer,
2197 )?;
2198
2199 let (scene_builder, scene_tx, scene_rx) = SceneBuilder::new(config, api_tx.clone());
2200 try! {
2201 thread::Builder::new().name(scene_thread_name.clone()).spawn(move || {
2202 register_thread_with_profiler(scene_thread_name.clone());
2203 if let Some(ref thread_listener) = *thread_listener_for_scene_builder {
2204 thread_listener.thread_started(&scene_thread_name);
2205 }
2206
2207 let mut scene_builder = scene_builder;
2208 scene_builder.run();
2209
2210 if let Some(ref thread_listener) = *thread_listener_for_scene_builder {
2211 thread_listener.thread_stopped(&scene_thread_name);
2212 }
2213 })
2214 };
2215
2216 try!{
2217 thread::Builder::new().name(rb_thread_name.clone()).spawn(move || {
2218 register_thread_with_profiler(rb_thread_name.clone());
2219 if let Some(ref thread_listener) = *thread_listener_for_render_backend {
2220 thread_listener.thread_started(&rb_thread_name);
2221 }
2222 let mut backend = RenderBackend::new(
2223 api_rx,
2224 payload_rx_for_backend,
2225 result_tx,
2226 scene_tx,
2227 scene_rx,
2228 device_pixel_ratio,
2229 resource_cache,
2230 backend_notifier,
2231 config,
2232 recorder,
2233 enable_render_on_scroll,
2234 );
2235 backend.run(backend_profile_counters);
2236 if let Some(ref thread_listener) = *thread_listener_for_render_backend {
2237 thread_listener.thread_stopped(&rb_thread_name);
2238 }
2239 })
2240 };
2241
2242 let gpu_profile = GpuProfiler::new(Rc::clone(device.rc_gl()));
2243 #[cfg(feature = "capture")]
2244 let read_fbo = device.create_fbo_for_external_texture(0);
2245
2246 let mut renderer = Renderer {
2247 result_rx,
2248 debug_server,
2249 device,
2250 active_documents: Vec::new(),
2251 pending_texture_updates: Vec::new(),
2252 pending_gpu_cache_updates: Vec::new(),
2253 pending_shader_updates: Vec::new(),
2254 cs_text_run,
2255 cs_blur_a8,
2256 cs_blur_rgba8,
2257 brush_solid,
2258 brush_line,
2259 brush_image,
2260 brush_blend,
2261 brush_mix_blend,
2262 brush_yuv_image,
2263 brush_radial_gradient,
2264 brush_linear_gradient,
2265 cs_clip_rectangle,
2266 cs_clip_box_shadow,
2267 cs_clip_border,
2268 cs_clip_image,
2269 ps_text_run,
2270 ps_text_run_dual_source,
2271 ps_image,
2272 ps_border_corner,
2273 ps_border_edge,
2274 ps_hw_composite,
2275 ps_split_composite,
2276 debug: debug_renderer,
2277 debug_flags,
2278 backend_profile_counters: BackendProfileCounters::new(),
2279 profile_counters: RendererProfileCounters::new(),
2280 profiler: Profiler::new(),
2281 max_texture_size: max_texture_size,
2282 max_recorded_profiles: options.max_recorded_profiles,
2283 clear_color: options.clear_color,
2284 enable_clear_scissor: options.enable_clear_scissor,
2285 last_time: 0,
2286 gpu_profile,
2287 prim_vao,
2288 blur_vao,
2289 clip_vao,
2290 node_data_texture,
2291 local_clip_rects_texture,
2292 render_task_texture,
2293 pipeline_info: PipelineInfo::default(),
2294 dither_matrix_texture,
2295 external_image_handler: None,
2296 output_image_handler: None,
2297 output_targets: FastHashMap::default(),
2298 cpu_profiles: VecDeque::new(),
2299 gpu_profiles: VecDeque::new(),
2300 gpu_cache_texture,
2301 gpu_cache_frame_id: FrameId::new(0),
2302 gpu_cache_overflow: false,
2303 texture_cache_upload_pbo,
2304 texture_resolver,
2305 renderer_errors: Vec::new(),
2306 #[cfg(feature = "capture")]
2307 read_fbo,
2308 #[cfg(feature = "replay")]
2309 owned_external_images: FastHashMap::default(),
2310 };
2311
2312 renderer.set_debug_flags(options.debug_flags);
2313
2314 let sender = RenderApiSender::new(api_tx, payload_tx);
2315 Ok((renderer, sender))
2316 }
2317
get_max_texture_size(&self) -> u322318 pub fn get_max_texture_size(&self) -> u32 {
2319 self.max_texture_size
2320 }
2321
get_graphics_api_info(&self) -> GraphicsApiInfo2322 pub fn get_graphics_api_info(&self) -> GraphicsApiInfo {
2323 GraphicsApiInfo {
2324 kind: GraphicsApi::OpenGL,
2325 version: self.device.gl().get_string(gl::VERSION),
2326 renderer: self.device.gl().get_string(gl::RENDERER),
2327 }
2328 }
2329
get_yuv_shader_index( buffer_kind: ImageBufferKind, format: YuvFormat, color_space: YuvColorSpace, ) -> usize2330 fn get_yuv_shader_index(
2331 buffer_kind: ImageBufferKind,
2332 format: YuvFormat,
2333 color_space: YuvColorSpace,
2334 ) -> usize {
2335 ((buffer_kind as usize) * YUV_FORMATS.len() + (format as usize)) * YUV_COLOR_SPACES.len() +
2336 (color_space as usize)
2337 }
2338
2339 /// Returns the Epoch of the current frame in a pipeline.
current_epoch(&self, pipeline_id: PipelineId) -> Option<Epoch>2340 pub fn current_epoch(&self, pipeline_id: PipelineId) -> Option<Epoch> {
2341 self.pipeline_info.epochs.get(&pipeline_id).cloned()
2342 }
2343
flush_pipeline_info(&mut self) -> PipelineInfo2344 pub fn flush_pipeline_info(&mut self) -> PipelineInfo {
2345 mem::replace(&mut self.pipeline_info, PipelineInfo::default())
2346 }
2347
2348 // update the program cache with new binaries, e.g. when some of the lazy loaded
2349 // shader programs got activated in the mean time
update_program_cache(&mut self, cached_programs: Rc<ProgramCache>)2350 pub fn update_program_cache(&mut self, cached_programs: Rc<ProgramCache>) {
2351 self.device.update_program_cache(cached_programs);
2352 }
2353
2354 /// Processes the result queue.
2355 ///
2356 /// Should be called before `render()`, as texture cache updates are done here.
update(&mut self)2357 pub fn update(&mut self) {
2358 profile_scope!("update");
2359 // Pull any pending results and return the most recent.
2360 while let Ok(msg) = self.result_rx.try_recv() {
2361 match msg {
2362 ResultMsg::PublishDocument(
2363 document_id,
2364 mut doc,
2365 texture_update_list,
2366 profile_counters,
2367 ) => {
2368 // Update the list of available epochs for use during reftests.
2369 // This is a workaround for https://github.com/servo/servo/issues/13149.
2370 for (pipeline_id, epoch) in &doc.pipeline_info.epochs {
2371 self.pipeline_info.epochs.insert(*pipeline_id, *epoch);
2372 }
2373 self.pipeline_info.removed_pipelines.extend(doc.pipeline_info.removed_pipelines.drain(..));
2374
2375 // Add a new document to the active set, expressed as a `Vec` in order
2376 // to re-order based on `DocumentLayer` during rendering.
2377 match self.active_documents.iter().position(|&(id, _)| id == document_id) {
2378 Some(pos) => {
2379 // If the document we are replacing must be drawn
2380 // (in order to update the texture cache), issue
2381 // a render just to off-screen targets.
2382 if self.active_documents[pos].1.frame.must_be_drawn() {
2383 self.render_impl(None).ok();
2384 }
2385 self.active_documents[pos].1 = doc;
2386 }
2387 None => self.active_documents.push((document_id, doc)),
2388 }
2389
2390 // IMPORTANT: The pending texture cache updates must be applied
2391 // *after* the previous frame has been rendered above
2392 // (if neceessary for a texture cache update). For
2393 // an example of why this is required:
2394 // 1) Previous frame contains a render task that
2395 // targets Texture X.
2396 // 2) New frame contains a texture cache update which
2397 // frees Texture X.
2398 // 3) bad stuff happens.
2399
2400 //TODO: associate `document_id` with target window
2401 self.pending_texture_updates.push(texture_update_list);
2402 self.backend_profile_counters = profile_counters;
2403 }
2404 ResultMsg::UpdateGpuCache(list) => {
2405 self.pending_gpu_cache_updates.push(list);
2406 }
2407 ResultMsg::UpdateResources {
2408 updates,
2409 cancel_rendering,
2410 } => {
2411 self.pending_texture_updates.push(updates);
2412 self.device.begin_frame();
2413 self.update_texture_cache();
2414 self.device.end_frame();
2415 // If we receive a `PublishDocument` message followed by this one
2416 // within the same update we need to cancel the frame because we
2417 // might have deleted the resources in use in the frame due to a
2418 // memory pressure event.
2419 if cancel_rendering {
2420 self.active_documents.clear();
2421 }
2422 }
2423 ResultMsg::RefreshShader(path) => {
2424 self.pending_shader_updates.push(path);
2425 }
2426 ResultMsg::DebugOutput(output) => match output {
2427 DebugOutput::FetchDocuments(string) |
2428 DebugOutput::FetchClipScrollTree(string) => {
2429 self.debug_server.send(string);
2430 }
2431 #[cfg(feature = "capture")]
2432 DebugOutput::SaveCapture(config, deferred) => {
2433 self.save_capture(config, deferred);
2434 }
2435 #[cfg(feature = "replay")]
2436 DebugOutput::LoadCapture(root, plain_externals) => {
2437 self.active_documents.clear();
2438 self.load_capture(root, plain_externals);
2439 }
2440 },
2441 ResultMsg::DebugCommand(command) => {
2442 self.handle_debug_command(command);
2443 }
2444 }
2445 }
2446 }
2447
2448 #[cfg(not(feature = "debugger"))]
get_screenshot_for_debugger(&mut self) -> String2449 fn get_screenshot_for_debugger(&mut self) -> String {
2450 // Avoid unused param warning.
2451 let _ = &self.debug_server;
2452 String::new()
2453 }
2454
2455
2456 #[cfg(feature = "debugger")]
get_screenshot_for_debugger(&mut self) -> String2457 fn get_screenshot_for_debugger(&mut self) -> String {
2458 use api::ImageDescriptor;
2459
2460 let desc = ImageDescriptor::new(1024, 768, ImageFormat::BGRA8, true);
2461 let data = self.device.read_pixels(&desc);
2462 let screenshot = debug_server::Screenshot::new(desc.width, desc.height, data);
2463
2464 serde_json::to_string(&screenshot).unwrap()
2465 }
2466
2467 #[cfg(not(feature = "debugger"))]
get_passes_for_debugger(&self) -> String2468 fn get_passes_for_debugger(&self) -> String {
2469 // Avoid unused param warning.
2470 let _ = &self.debug_server;
2471 String::new()
2472 }
2473
2474 #[cfg(feature = "debugger")]
debug_alpha_target(target: &AlphaRenderTarget) -> debug_server::Target2475 fn debug_alpha_target(target: &AlphaRenderTarget) -> debug_server::Target {
2476 let mut debug_target = debug_server::Target::new("A8");
2477
2478 debug_target.add(
2479 debug_server::BatchKind::Cache,
2480 "Scalings",
2481 target.scalings.len(),
2482 );
2483 debug_target.add(
2484 debug_server::BatchKind::Cache,
2485 "Zero Clears",
2486 target.zero_clears.len(),
2487 );
2488 debug_target.add(
2489 debug_server::BatchKind::Clip,
2490 "Clear",
2491 target.clip_batcher.border_clears.len(),
2492 );
2493 debug_target.add(
2494 debug_server::BatchKind::Clip,
2495 "Borders",
2496 target.clip_batcher.borders.len(),
2497 );
2498 debug_target.add(
2499 debug_server::BatchKind::Clip,
2500 "BoxShadows",
2501 target.clip_batcher.box_shadows.len(),
2502 );
2503 debug_target.add(
2504 debug_server::BatchKind::Cache,
2505 "Vertical Blur",
2506 target.vertical_blurs.len(),
2507 );
2508 debug_target.add(
2509 debug_server::BatchKind::Cache,
2510 "Horizontal Blur",
2511 target.horizontal_blurs.len(),
2512 );
2513 debug_target.add(
2514 debug_server::BatchKind::Clip,
2515 "Rectangles",
2516 target.clip_batcher.rectangles.len(),
2517 );
2518 for (_, items) in target.clip_batcher.images.iter() {
2519 debug_target.add(debug_server::BatchKind::Clip, "Image mask", items.len());
2520 }
2521
2522 debug_target
2523 }
2524
2525 #[cfg(feature = "debugger")]
debug_color_target(target: &ColorRenderTarget) -> debug_server::Target2526 fn debug_color_target(target: &ColorRenderTarget) -> debug_server::Target {
2527 let mut debug_target = debug_server::Target::new("RGBA8");
2528
2529 debug_target.add(
2530 debug_server::BatchKind::Cache,
2531 "Scalings",
2532 target.scalings.len(),
2533 );
2534 debug_target.add(
2535 debug_server::BatchKind::Cache,
2536 "Readbacks",
2537 target.readbacks.len(),
2538 );
2539 debug_target.add(
2540 debug_server::BatchKind::Cache,
2541 "Vertical Blur",
2542 target.vertical_blurs.len(),
2543 );
2544 debug_target.add(
2545 debug_server::BatchKind::Cache,
2546 "Horizontal Blur",
2547 target.horizontal_blurs.len(),
2548 );
2549
2550 for alpha_batch_container in &target.alpha_batch_containers {
2551 for (_, batch) in &alpha_batch_container.text_run_cache_prims {
2552 debug_target.add(
2553 debug_server::BatchKind::Cache,
2554 "Text Shadow",
2555 batch.len(),
2556 );
2557 }
2558
2559 for batch in alpha_batch_container
2560 .opaque_batches
2561 .iter()
2562 .rev() {
2563 debug_target.add(
2564 debug_server::BatchKind::Opaque,
2565 batch.key.kind.debug_name(),
2566 batch.instances.len(),
2567 );
2568 }
2569
2570 for batch in &alpha_batch_container
2571 .alpha_batches {
2572 debug_target.add(
2573 debug_server::BatchKind::Alpha,
2574 batch.key.kind.debug_name(),
2575 batch.instances.len(),
2576 );
2577 }
2578 }
2579
2580 debug_target
2581 }
2582
2583 #[cfg(feature = "debugger")]
debug_texture_cache_target(target: &TextureCacheRenderTarget) -> debug_server::Target2584 fn debug_texture_cache_target(target: &TextureCacheRenderTarget) -> debug_server::Target {
2585 let mut debug_target = debug_server::Target::new("Texture Cache");
2586
2587 debug_target.add(
2588 debug_server::BatchKind::Cache,
2589 "Horizontal Blur",
2590 target.horizontal_blurs.len(),
2591 );
2592
2593 debug_target
2594 }
2595
2596 #[cfg(feature = "debugger")]
get_passes_for_debugger(&self) -> String2597 fn get_passes_for_debugger(&self) -> String {
2598 let mut debug_passes = debug_server::PassList::new();
2599
2600 for &(_, ref render_doc) in &self.active_documents {
2601 for pass in &render_doc.frame.passes {
2602 let mut debug_targets = Vec::new();
2603 match pass.kind {
2604 RenderPassKind::MainFramebuffer(ref target) => {
2605 debug_targets.push(Self::debug_color_target(target));
2606 }
2607 RenderPassKind::OffScreen { ref alpha, ref color, ref texture_cache } => {
2608 debug_targets.extend(alpha.targets.iter().map(Self::debug_alpha_target));
2609 debug_targets.extend(color.targets.iter().map(Self::debug_color_target));
2610 debug_targets.extend(texture_cache.iter().map(|(_, target)| Self::debug_texture_cache_target(target)))
2611 }
2612 }
2613
2614 debug_passes.add(debug_server::Pass { targets: debug_targets });
2615 }
2616 }
2617
2618 serde_json::to_string(&debug_passes).unwrap()
2619 }
2620
2621 #[cfg(not(feature = "debugger"))]
get_render_tasks_for_debugger(&self) -> String2622 fn get_render_tasks_for_debugger(&self) -> String {
2623 String::new()
2624 }
2625
2626 #[cfg(feature = "debugger")]
get_render_tasks_for_debugger(&self) -> String2627 fn get_render_tasks_for_debugger(&self) -> String {
2628 let mut debug_root = debug_server::RenderTaskList::new();
2629
2630 for &(_, ref render_doc) in &self.active_documents {
2631 let debug_node = debug_server::TreeNode::new("document render tasks");
2632 let mut builder = debug_server::TreeNodeBuilder::new(debug_node);
2633
2634 let render_tasks = &render_doc.frame.render_tasks;
2635 match render_tasks.tasks.last() {
2636 Some(main_task) => main_task.print_with(&mut builder, render_tasks),
2637 None => continue,
2638 };
2639
2640 debug_root.add(builder.build());
2641 }
2642
2643 serde_json::to_string(&debug_root).unwrap()
2644 }
2645
handle_debug_command(&mut self, command: DebugCommand)2646 fn handle_debug_command(&mut self, command: DebugCommand) {
2647 match command {
2648 DebugCommand::EnableProfiler(enable) => {
2649 self.set_debug_flag(DebugFlags::PROFILER_DBG, enable);
2650 }
2651 DebugCommand::EnableTextureCacheDebug(enable) => {
2652 self.set_debug_flag(DebugFlags::TEXTURE_CACHE_DBG, enable);
2653 }
2654 DebugCommand::EnableRenderTargetDebug(enable) => {
2655 self.set_debug_flag(DebugFlags::RENDER_TARGET_DBG, enable);
2656 }
2657 DebugCommand::EnableGpuTimeQueries(enable) => {
2658 self.set_debug_flag(DebugFlags::GPU_TIME_QUERIES, enable);
2659 }
2660 DebugCommand::EnableGpuSampleQueries(enable) => {
2661 self.set_debug_flag(DebugFlags::GPU_SAMPLE_QUERIES, enable);
2662 }
2663 DebugCommand::EnableDualSourceBlending(_) => {
2664 panic!("Should be handled by render backend");
2665 }
2666 DebugCommand::FetchDocuments |
2667 DebugCommand::FetchClipScrollTree => {}
2668 DebugCommand::FetchRenderTasks => {
2669 let json = self.get_render_tasks_for_debugger();
2670 self.debug_server.send(json);
2671 }
2672 DebugCommand::FetchPasses => {
2673 let json = self.get_passes_for_debugger();
2674 self.debug_server.send(json);
2675 }
2676 DebugCommand::FetchScreenshot => {
2677 let json = self.get_screenshot_for_debugger();
2678 self.debug_server.send(json);
2679 }
2680 DebugCommand::SaveCapture(..) |
2681 DebugCommand::LoadCapture(..) => {
2682 panic!("Capture commands are not welcome here! Did you build with 'capture' feature?")
2683 }
2684 DebugCommand::ClearCaches(_) => {}
2685 DebugCommand::InvalidateGpuCache => {
2686 match self.gpu_cache_texture.bus {
2687 CacheBus::PixelBuffer { ref mut rows, .. } => {
2688 info!("Invalidating GPU caches");
2689 for row in rows {
2690 row.is_dirty = true;
2691 }
2692 }
2693 CacheBus::Scatter { .. } => {
2694 warn!("Unable to invalidate scattered GPU cache");
2695 }
2696 }
2697 }
2698 }
2699 }
2700
2701 /// Set a callback for handling external images.
set_external_image_handler(&mut self, handler: Box<ExternalImageHandler>)2702 pub fn set_external_image_handler(&mut self, handler: Box<ExternalImageHandler>) {
2703 self.external_image_handler = Some(handler);
2704 }
2705
2706 /// Set a callback for handling external outputs.
set_output_image_handler(&mut self, handler: Box<OutputImageHandler>)2707 pub fn set_output_image_handler(&mut self, handler: Box<OutputImageHandler>) {
2708 self.output_image_handler = Some(handler);
2709 }
2710
2711 /// Retrieve (and clear) the current list of recorded frame profiles.
get_frame_profiles(&mut self) -> (Vec<CpuProfile>, Vec<GpuProfile>)2712 pub fn get_frame_profiles(&mut self) -> (Vec<CpuProfile>, Vec<GpuProfile>) {
2713 let cpu_profiles = self.cpu_profiles.drain(..).collect();
2714 let gpu_profiles = self.gpu_profiles.drain(..).collect();
2715 (cpu_profiles, gpu_profiles)
2716 }
2717
2718 /// Returns `true` if the active rendered documents (that need depth buffer)
2719 /// intersect on the main framebuffer, in which case we don't clear
2720 /// the whole depth and instead clear each document area separately.
are_documents_intersecting_depth(&self) -> bool2721 fn are_documents_intersecting_depth(&self) -> bool {
2722 let document_rects = self.active_documents
2723 .iter()
2724 .filter_map(|&(_, ref render_doc)| {
2725 match render_doc.frame.passes.last() {
2726 Some(&RenderPass { kind: RenderPassKind::MainFramebuffer(ref target), .. })
2727 if target.needs_depth() => Some(render_doc.frame.inner_rect),
2728 _ => None,
2729 }
2730 })
2731 .collect::<Vec<_>>();
2732
2733 for (i, rect) in document_rects.iter().enumerate() {
2734 for other in &document_rects[i+1 ..] {
2735 if rect.intersects(other) {
2736 return true
2737 }
2738 }
2739 }
2740
2741 false
2742 }
2743
2744 /// Renders the current frame.
2745 ///
2746 /// A Frame is supplied by calling [`generate_frame()`][genframe].
2747 /// [genframe]: ../../webrender_api/struct.DocumentApi.html#method.generate_frame
render( &mut self, framebuffer_size: DeviceUintSize, ) -> Result<RendererStats, Vec<RendererError>>2748 pub fn render(
2749 &mut self,
2750 framebuffer_size: DeviceUintSize,
2751 ) -> Result<RendererStats, Vec<RendererError>> {
2752 self.render_impl(Some(framebuffer_size))
2753 }
2754
2755 // If framebuffer_size is None, don't render
2756 // to the main frame buffer. This is useful
2757 // to update texture cache render tasks but
2758 // avoid doing a full frame render.
render_impl( &mut self, framebuffer_size: Option<DeviceUintSize> ) -> Result<RendererStats, Vec<RendererError>>2759 fn render_impl(
2760 &mut self,
2761 framebuffer_size: Option<DeviceUintSize>
2762 ) -> Result<RendererStats, Vec<RendererError>> {
2763 profile_scope!("render");
2764 if self.active_documents.is_empty() {
2765 self.last_time = precise_time_ns();
2766 return Ok(RendererStats::empty());
2767 }
2768
2769 let mut stats = RendererStats::empty();
2770 let mut frame_profiles = Vec::new();
2771 let mut profile_timers = RendererProfileTimers::new();
2772
2773 let profile_samplers = {
2774 let _gm = self.gpu_profile.start_marker("build samples");
2775 // Block CPU waiting for last frame's GPU profiles to arrive.
2776 // In general this shouldn't block unless heavily GPU limited.
2777 let (gpu_frame_id, timers, samplers) = self.gpu_profile.build_samples();
2778
2779 if self.max_recorded_profiles > 0 {
2780 while self.gpu_profiles.len() >= self.max_recorded_profiles {
2781 self.gpu_profiles.pop_front();
2782 }
2783 self.gpu_profiles
2784 .push_back(GpuProfile::new(gpu_frame_id, &timers));
2785 }
2786 profile_timers.gpu_samples = timers;
2787 samplers
2788 };
2789
2790
2791 let cpu_frame_id = profile_timers.cpu_time.profile(|| {
2792 let _gm = self.gpu_profile.start_marker("begin frame");
2793 let frame_id = self.device.begin_frame();
2794 self.gpu_profile.begin_frame(frame_id);
2795
2796 self.device.disable_scissor();
2797 self.device.disable_depth();
2798 self.device.set_blend(false);
2799 //self.update_shaders();
2800
2801 self.update_texture_cache();
2802
2803 frame_id
2804 });
2805
2806 profile_timers.cpu_time.profile(|| {
2807 let clear_depth_value = if self.are_documents_intersecting_depth() {
2808 None
2809 } else {
2810 Some(1.0)
2811 };
2812
2813 //Note: another borrowck dance
2814 let mut active_documents = mem::replace(&mut self.active_documents, Vec::default());
2815 // sort by the document layer id
2816 active_documents.sort_by_key(|&(_, ref render_doc)| render_doc.frame.layer);
2817
2818 // don't clear the framebuffer if one of the rendered documents will overwrite it
2819 if let Some(framebuffer_size) = framebuffer_size {
2820 let needs_color_clear = !active_documents
2821 .iter()
2822 .any(|&(_, RenderedDocument { ref frame, .. })| {
2823 frame.background_color.is_some() &&
2824 frame.inner_rect.origin == DeviceUintPoint::zero() &&
2825 frame.inner_rect.size == framebuffer_size
2826 });
2827
2828 if needs_color_clear || clear_depth_value.is_some() {
2829 let clear_color = if needs_color_clear {
2830 self.clear_color.map(|color| color.to_array())
2831 } else {
2832 None
2833 };
2834 self.device.bind_draw_target(None, None);
2835 self.device.enable_depth_write();
2836 self.device.clear_target(clear_color, clear_depth_value, None);
2837 self.device.disable_depth_write();
2838 }
2839 }
2840
2841 #[cfg(feature = "replay")]
2842 self.texture_resolver.external_images.extend(
2843 self.owned_external_images.iter().map(|(key, value)| (*key, value.clone()))
2844 );
2845
2846 for &mut (_, RenderedDocument { ref mut frame, .. }) in &mut active_documents {
2847 frame.profile_counters.reset_targets();
2848 self.prepare_gpu_cache(frame);
2849 assert!(frame.gpu_cache_frame_id <= self.gpu_cache_frame_id,
2850 "Received frame depends on a later GPU cache epoch ({:?}) than one we received last via `UpdateGpuCache` ({:?})",
2851 frame.gpu_cache_frame_id, self.gpu_cache_frame_id);
2852
2853 self.draw_tile_frame(
2854 frame,
2855 framebuffer_size,
2856 clear_depth_value.is_some(),
2857 cpu_frame_id,
2858 &mut stats
2859 );
2860
2861 if self.debug_flags.contains(DebugFlags::PROFILER_DBG) {
2862 frame_profiles.push(frame.profile_counters.clone());
2863 }
2864 }
2865
2866 self.unlock_external_images();
2867 self.active_documents = active_documents;
2868 });
2869
2870 let current_time = precise_time_ns();
2871 let ns = current_time - self.last_time;
2872 self.profile_counters.frame_time.set(ns);
2873
2874 if self.max_recorded_profiles > 0 {
2875 while self.cpu_profiles.len() >= self.max_recorded_profiles {
2876 self.cpu_profiles.pop_front();
2877 }
2878 let cpu_profile = CpuProfile::new(
2879 cpu_frame_id,
2880 self.backend_profile_counters.total_time.get(),
2881 profile_timers.cpu_time.get(),
2882 self.profile_counters.draw_calls.get(),
2883 );
2884 self.cpu_profiles.push_back(cpu_profile);
2885 }
2886
2887 if self.debug_flags.contains(DebugFlags::PROFILER_DBG) {
2888 if let Some(framebuffer_size) = framebuffer_size {
2889 //TODO: take device/pixel ratio into equation?
2890 let screen_fraction = 1.0 / framebuffer_size.to_f32().area();
2891 self.profiler.draw_profile(
2892 &frame_profiles,
2893 &self.backend_profile_counters,
2894 &self.profile_counters,
2895 &mut profile_timers,
2896 &profile_samplers,
2897 screen_fraction,
2898 &mut self.debug,
2899 self.debug_flags.contains(DebugFlags::COMPACT_PROFILER),
2900 );
2901 }
2902 }
2903
2904 self.backend_profile_counters.reset();
2905 self.profile_counters.reset();
2906 self.profile_counters.frame_counter.inc();
2907
2908 profile_timers.cpu_time.profile(|| {
2909 let _gm = self.gpu_profile.start_marker("end frame");
2910 self.gpu_profile.end_frame();
2911 self.debug.render(&mut self.device, framebuffer_size);
2912 self.device.end_frame();
2913 });
2914 self.last_time = current_time;
2915
2916 if self.renderer_errors.is_empty() {
2917 Ok(stats)
2918 } else {
2919 Err(mem::replace(&mut self.renderer_errors, Vec::new()))
2920 }
2921 }
2922
layers_are_bouncing_back(&self) -> bool2923 pub fn layers_are_bouncing_back(&self) -> bool {
2924 self.active_documents
2925 .iter()
2926 .any(|&(_, ref render_doc)| !render_doc.layers_bouncing_back.is_empty())
2927 }
2928
update_gpu_cache(&mut self)2929 fn update_gpu_cache(&mut self) {
2930 let _gm = self.gpu_profile.start_marker("gpu cache update");
2931
2932 // For an artificial stress test of GPU cache resizing,
2933 // always pass an extra update list with at least one block in it.
2934 let gpu_cache_height = self.gpu_cache_texture.get_height();
2935 if gpu_cache_height != 0 && GPU_CACHE_RESIZE_TEST {
2936 self.pending_gpu_cache_updates.push(GpuCacheUpdateList {
2937 frame_id: FrameId::new(0),
2938 height: gpu_cache_height,
2939 blocks: vec![[1f32; 4].into()],
2940 updates: Vec::new(),
2941 });
2942 }
2943
2944 let (updated_blocks, max_requested_height) = self
2945 .pending_gpu_cache_updates
2946 .iter()
2947 .fold((0, gpu_cache_height), |(count, height), list| {
2948 (count + list.blocks.len(), cmp::max(height, list.height))
2949 });
2950
2951 if max_requested_height > self.max_texture_size && !self.gpu_cache_overflow {
2952 self.gpu_cache_overflow = true;
2953 self.renderer_errors.push(RendererError::MaxTextureSize);
2954 }
2955
2956 //Note: if we decide to switch to scatter-style GPU cache update
2957 // permanently, we can have this code nicer with `BufferUploader` kind
2958 // of helper, similarly to how `TextureUploader` API is used.
2959 self.gpu_cache_texture.prepare_for_updates(
2960 &mut self.device,
2961 updated_blocks,
2962 max_requested_height,
2963 );
2964
2965 for update_list in self.pending_gpu_cache_updates.drain(..) {
2966 assert!(update_list.height <= max_requested_height);
2967 if update_list.frame_id > self.gpu_cache_frame_id {
2968 self.gpu_cache_frame_id = update_list.frame_id
2969 }
2970 self.gpu_cache_texture
2971 .update(&mut self.device, &update_list);
2972 }
2973
2974 let updated_rows = self.gpu_cache_texture.flush(&mut self.device);
2975
2976 let counters = &mut self.backend_profile_counters.resources.gpu_cache;
2977 counters.updated_rows.set(updated_rows);
2978 counters.updated_blocks.set(updated_blocks);
2979 }
2980
prepare_gpu_cache(&mut self, frame: &Frame)2981 fn prepare_gpu_cache(&mut self, frame: &Frame) {
2982 let deferred_update_list = self.update_deferred_resolves(&frame.deferred_resolves);
2983 self.pending_gpu_cache_updates.extend(deferred_update_list);
2984
2985 self.update_gpu_cache();
2986
2987 // Note: the texture might have changed during the `update`,
2988 // so we need to bind it here.
2989 self.device.bind_texture(
2990 TextureSampler::ResourceCache,
2991 &self.gpu_cache_texture.texture,
2992 );
2993 }
2994
update_texture_cache(&mut self)2995 fn update_texture_cache(&mut self) {
2996 let _gm = self.gpu_profile.start_marker("texture cache update");
2997 let mut pending_texture_updates = mem::replace(&mut self.pending_texture_updates, vec![]);
2998
2999 for update_list in pending_texture_updates.drain(..) {
3000 for update in update_list.updates {
3001 match update.op {
3002 TextureUpdateOp::Create {
3003 width,
3004 height,
3005 layer_count,
3006 format,
3007 filter,
3008 render_target,
3009 } => {
3010 let CacheTextureId(cache_texture_index) = update.id;
3011 if self.texture_resolver.cache_texture_map.len() == cache_texture_index {
3012 // Create a new native texture, as requested by the texture cache.
3013 let texture = self.device.create_texture(TextureTarget::Array, format);
3014 self.texture_resolver.cache_texture_map.push(texture);
3015 }
3016 let texture =
3017 &mut self.texture_resolver.cache_texture_map[cache_texture_index];
3018 assert_eq!(texture.get_format(), format);
3019
3020 // Ensure no PBO is bound when creating the texture storage,
3021 // or GL will attempt to read data from there.
3022 self.device.init_texture(
3023 texture,
3024 width,
3025 height,
3026 filter,
3027 render_target,
3028 layer_count,
3029 None,
3030 );
3031 }
3032 TextureUpdateOp::Update {
3033 rect,
3034 source,
3035 stride,
3036 layer_index,
3037 offset,
3038 } => {
3039 let texture = &self.texture_resolver.cache_texture_map[update.id.0];
3040 let mut uploader = self.device.upload_texture(
3041 texture,
3042 &self.texture_cache_upload_pbo,
3043 0,
3044 );
3045
3046 match source {
3047 TextureUpdateSource::Bytes { data } => {
3048 uploader.upload(
3049 rect, layer_index, stride,
3050 &data[offset as usize ..],
3051 );
3052 }
3053 TextureUpdateSource::External { id, channel_index } => {
3054 let handler = self.external_image_handler
3055 .as_mut()
3056 .expect("Found external image, but no handler set!");
3057 match handler.lock(id, channel_index).source {
3058 ExternalImageSource::RawData(data) => {
3059 uploader.upload(
3060 rect, layer_index, stride,
3061 &data[offset as usize ..],
3062 );
3063 }
3064 ExternalImageSource::Invalid => {
3065 // Create a local buffer to fill the pbo.
3066 let bpp = texture.get_format().bytes_per_pixel();
3067 let width = stride.unwrap_or(rect.size.width * bpp);
3068 let total_size = width * rect.size.height;
3069 // WR haven't support RGBAF32 format in texture_cache, so
3070 // we use u8 type here.
3071 let dummy_data: Vec<u8> = vec![255; total_size as usize];
3072 uploader.upload(rect, layer_index, stride, &dummy_data);
3073 }
3074 _ => panic!("No external buffer found"),
3075 };
3076 handler.unlock(id, channel_index);
3077 }
3078 }
3079 }
3080 TextureUpdateOp::Free => {
3081 let texture = &mut self.texture_resolver.cache_texture_map[update.id.0];
3082 self.device.free_texture_storage(texture);
3083 }
3084 }
3085 }
3086 }
3087 }
3088
draw_instanced_batch<T>( &mut self, data: &[T], vertex_array_kind: VertexArrayKind, textures: &BatchTextures, stats: &mut RendererStats, )3089 fn draw_instanced_batch<T>(
3090 &mut self,
3091 data: &[T],
3092 vertex_array_kind: VertexArrayKind,
3093 textures: &BatchTextures,
3094 stats: &mut RendererStats,
3095 ) {
3096 for i in 0 .. textures.colors.len() {
3097 self.texture_resolver.bind(
3098 &textures.colors[i],
3099 TextureSampler::color(i),
3100 &mut self.device,
3101 );
3102 }
3103
3104 // TODO: this probably isn't the best place for this.
3105 if let Some(ref texture) = self.dither_matrix_texture {
3106 self.device.bind_texture(TextureSampler::Dither, texture);
3107 }
3108
3109 let vao = match vertex_array_kind {
3110 VertexArrayKind::Primitive => &self.prim_vao,
3111 VertexArrayKind::Clip => &self.clip_vao,
3112 VertexArrayKind::Blur => &self.blur_vao,
3113 };
3114
3115 self.device.bind_vao(vao);
3116
3117 let batched = !self.debug_flags.contains(DebugFlags::DISABLE_BATCHING);
3118
3119 if batched {
3120 self.device
3121 .update_vao_instances(vao, data, VertexUsageHint::Stream);
3122 self.device
3123 .draw_indexed_triangles_instanced_u16(6, data.len() as i32);
3124 self.profile_counters.draw_calls.inc();
3125 stats.total_draw_calls += 1;
3126 } else {
3127 for i in 0 .. data.len() {
3128 self.device
3129 .update_vao_instances(vao, &data[i .. i + 1], VertexUsageHint::Stream);
3130 self.device.draw_triangles_u16(0, 6);
3131 self.profile_counters.draw_calls.inc();
3132 stats.total_draw_calls += 1;
3133 }
3134 }
3135
3136 self.profile_counters.vertices.add(6 * data.len());
3137 }
3138
submit_batch( &mut self, key: &BatchKey, instances: &[PrimitiveInstance], projection: &Transform3D<f32>, render_tasks: &RenderTaskTree, render_target: Option<(&Texture, i32)>, framebuffer_size: DeviceUintSize, stats: &mut RendererStats, scissor_rect: Option<DeviceIntRect>, )3139 fn submit_batch(
3140 &mut self,
3141 key: &BatchKey,
3142 instances: &[PrimitiveInstance],
3143 projection: &Transform3D<f32>,
3144 render_tasks: &RenderTaskTree,
3145 render_target: Option<(&Texture, i32)>,
3146 framebuffer_size: DeviceUintSize,
3147 stats: &mut RendererStats,
3148 scissor_rect: Option<DeviceIntRect>,
3149 ) {
3150 match key.kind {
3151 BatchKind::HardwareComposite => {
3152 self.ps_hw_composite
3153 .bind(&mut self.device, projection, 0, &mut self.renderer_errors);
3154 }
3155 BatchKind::SplitComposite => {
3156 self.ps_split_composite.bind(
3157 &mut self.device,
3158 projection,
3159 0,
3160 &mut self.renderer_errors,
3161 );
3162 }
3163 BatchKind::Brush(brush_kind) => {
3164 match brush_kind {
3165 BrushBatchKind::Solid => {
3166 self.brush_solid.bind(
3167 &mut self.device,
3168 key.blend_mode,
3169 projection,
3170 0,
3171 &mut self.renderer_errors,
3172 );
3173 }
3174 BrushBatchKind::Image(image_buffer_kind) => {
3175 self.brush_image[image_buffer_kind as usize]
3176 .as_mut()
3177 .expect("Unsupported image shader kind")
3178 .bind(
3179 &mut self.device,
3180 key.blend_mode,
3181 projection,
3182 0,
3183 &mut self.renderer_errors,
3184 );
3185 }
3186 BrushBatchKind::Line => {
3187 self.brush_line.bind(
3188 &mut self.device,
3189 key.blend_mode,
3190 projection,
3191 0,
3192 &mut self.renderer_errors,
3193 );
3194 }
3195 BrushBatchKind::Blend => {
3196 self.brush_blend.bind(
3197 &mut self.device,
3198 key.blend_mode,
3199 projection,
3200 0,
3201 &mut self.renderer_errors,
3202 );
3203 }
3204 BrushBatchKind::MixBlend { .. } => {
3205 self.brush_mix_blend.bind(
3206 &mut self.device,
3207 key.blend_mode,
3208 projection,
3209 0,
3210 &mut self.renderer_errors,
3211 );
3212 }
3213 BrushBatchKind::RadialGradient => {
3214 self.brush_radial_gradient.bind(
3215 &mut self.device,
3216 key.blend_mode,
3217 projection,
3218 0,
3219 &mut self.renderer_errors,
3220 );
3221 }
3222 BrushBatchKind::LinearGradient => {
3223 self.brush_linear_gradient.bind(
3224 &mut self.device,
3225 key.blend_mode,
3226 projection,
3227 0,
3228 &mut self.renderer_errors,
3229 );
3230 }
3231 BrushBatchKind::YuvImage(image_buffer_kind, format, color_space) => {
3232 let shader_index =
3233 Renderer::get_yuv_shader_index(image_buffer_kind, format, color_space);
3234 self.brush_yuv_image[shader_index]
3235 .as_mut()
3236 .expect("Unsupported YUV shader kind")
3237 .bind(
3238 &mut self.device,
3239 key.blend_mode,
3240 projection,
3241 0,
3242 &mut self.renderer_errors,
3243 );
3244 }
3245 }
3246 }
3247 BatchKind::Transformable(transform_kind, batch_kind) => match batch_kind {
3248 TransformBatchKind::TextRun(..) => {
3249 unreachable!("bug: text batches are special cased");
3250 }
3251 TransformBatchKind::Image(image_buffer_kind) => {
3252 self.ps_image[image_buffer_kind as usize]
3253 .as_mut()
3254 .expect("Unsupported image shader kind")
3255 .bind(
3256 &mut self.device,
3257 transform_kind,
3258 projection,
3259 0,
3260 &mut self.renderer_errors,
3261 );
3262 }
3263 TransformBatchKind::BorderCorner => {
3264 self.ps_border_corner.bind(
3265 &mut self.device,
3266 transform_kind,
3267 projection,
3268 0,
3269 &mut self.renderer_errors,
3270 );
3271 }
3272 TransformBatchKind::BorderEdge => {
3273 self.ps_border_edge.bind(
3274 &mut self.device,
3275 transform_kind,
3276 projection,
3277 0,
3278 &mut self.renderer_errors,
3279 );
3280 }
3281 },
3282 };
3283
3284 // Handle special case readback for composites.
3285 if let BatchKind::Brush(BrushBatchKind::MixBlend { task_id, source_id, backdrop_id }) = key.kind {
3286 if scissor_rect.is_some() {
3287 self.device.disable_scissor();
3288 }
3289
3290 // composites can't be grouped together because
3291 // they may overlap and affect each other.
3292 debug_assert_eq!(instances.len(), 1);
3293 let cache_texture = self.texture_resolver
3294 .resolve(&SourceTexture::CacheRGBA8)
3295 .unwrap();
3296
3297 // Before submitting the composite batch, do the
3298 // framebuffer readbacks that are needed for each
3299 // composite operation in this batch.
3300 let source = &render_tasks[source_id];
3301 let backdrop = &render_tasks[task_id];
3302 let readback = &render_tasks[backdrop_id];
3303
3304 let (readback_rect, readback_layer) = readback.get_target_rect();
3305 let (backdrop_rect, _) = backdrop.get_target_rect();
3306 let backdrop_screen_origin = match backdrop.kind {
3307 RenderTaskKind::Picture(ref task_info) => match task_info.content_origin {
3308 ContentOrigin::Local(_) => panic!("bug: composite from a local-space rasterized picture?"),
3309 ContentOrigin::Screen(p) => p,
3310 },
3311 _ => panic!("bug: composite on non-picture?"),
3312 };
3313 let source_screen_origin = match source.kind {
3314 RenderTaskKind::Picture(ref task_info) => match task_info.content_origin {
3315 ContentOrigin::Local(_) => panic!("bug: composite from a local-space rasterized picture?"),
3316 ContentOrigin::Screen(p) => p,
3317 },
3318 _ => panic!("bug: composite on non-picture?"),
3319 };
3320
3321 // Bind the FBO to blit the backdrop to.
3322 // Called per-instance in case the layer (and therefore FBO)
3323 // changes. The device will skip the GL call if the requested
3324 // target is already bound.
3325 let cache_draw_target = (cache_texture, readback_layer.0 as i32);
3326 self.device.bind_draw_target(Some(cache_draw_target), None);
3327
3328 let mut src = DeviceIntRect::new(
3329 source_screen_origin + (backdrop_rect.origin - backdrop_screen_origin),
3330 readback_rect.size,
3331 );
3332 let mut dest = readback_rect.to_i32();
3333
3334 // Need to invert the y coordinates and flip the image vertically when
3335 // reading back from the framebuffer.
3336 if render_target.is_none() {
3337 src.origin.y = framebuffer_size.height as i32 - src.size.height - src.origin.y;
3338 dest.origin.y += dest.size.height;
3339 dest.size.height = -dest.size.height;
3340 }
3341
3342 self.device.bind_read_target(render_target);
3343 self.device.blit_render_target(src, dest);
3344
3345 // Restore draw target to current pass render target + layer.
3346 // Note: leaving the viewport unchanged, it's not a part of FBO state
3347 self.device.bind_draw_target(render_target, None);
3348
3349 if scissor_rect.is_some() {
3350 self.device.enable_scissor();
3351 }
3352 }
3353
3354 let _timer = self.gpu_profile.start_timer(key.kind.gpu_sampler_tag());
3355 self.draw_instanced_batch(
3356 instances,
3357 VertexArrayKind::Primitive,
3358 &key.textures,
3359 stats
3360 );
3361 }
3362
handle_blits( &mut self, blits: &[BlitJob], render_tasks: &RenderTaskTree, )3363 fn handle_blits(
3364 &mut self,
3365 blits: &[BlitJob],
3366 render_tasks: &RenderTaskTree,
3367 ) {
3368 if blits.is_empty() {
3369 return;
3370 }
3371
3372 let _timer = self.gpu_profile.start_timer(GPU_TAG_BLIT);
3373
3374 // TODO(gw): For now, we don't bother batching these by source texture.
3375 // If if ever shows up as an issue, we can easily batch them.
3376 for blit in blits {
3377 let source_rect = match blit.source {
3378 BlitJobSource::Texture(texture_id, layer, source_rect) => {
3379 // A blit from a texture into this target.
3380 let src_texture = self.texture_resolver
3381 .resolve(&texture_id)
3382 .expect("BUG: invalid source texture");
3383 self.device.bind_read_target(Some((src_texture, layer)));
3384 source_rect
3385 }
3386 BlitJobSource::RenderTask(task_id) => {
3387 // A blit from the child render task into this target.
3388 // TODO(gw): Support R8 format here once we start
3389 // creating mips for alpha masks.
3390 let src_texture = self.texture_resolver
3391 .resolve(&SourceTexture::CacheRGBA8)
3392 .expect("BUG: invalid source texture");
3393 let source = &render_tasks[task_id];
3394 let (source_rect, layer) = source.get_target_rect();
3395 self.device.bind_read_target(Some((src_texture, layer.0 as i32)));
3396 source_rect
3397 }
3398 };
3399 debug_assert_eq!(source_rect.size, blit.target_rect.size);
3400 self.device.blit_render_target(
3401 source_rect,
3402 blit.target_rect,
3403 );
3404 }
3405 }
3406
handle_scaling( &mut self, render_tasks: &RenderTaskTree, scalings: &Vec<ScalingInfo>, source: SourceTexture, )3407 fn handle_scaling(
3408 &mut self,
3409 render_tasks: &RenderTaskTree,
3410 scalings: &Vec<ScalingInfo>,
3411 source: SourceTexture,
3412 ) {
3413 let cache_texture = self.texture_resolver
3414 .resolve(&source)
3415 .unwrap();
3416 for scaling in scalings {
3417 let source = &render_tasks[scaling.src_task_id];
3418 let dest = &render_tasks[scaling.dest_task_id];
3419
3420 let (source_rect, source_layer) = source.get_target_rect();
3421 let (dest_rect, _) = dest.get_target_rect();
3422
3423 let cache_draw_target = (cache_texture, source_layer.0 as i32);
3424 self.device
3425 .bind_read_target(Some(cache_draw_target));
3426
3427 self.device.blit_render_target(source_rect, dest_rect);
3428 }
3429 }
3430
draw_color_target( &mut self, render_target: Option<(&Texture, i32)>, target: &ColorRenderTarget, framebuffer_target_rect: DeviceUintRect, target_size: DeviceUintSize, depth_is_ready: bool, clear_color: Option<[f32; 4]>, render_tasks: &RenderTaskTree, projection: &Transform3D<f32>, frame_id: FrameId, stats: &mut RendererStats, )3431 fn draw_color_target(
3432 &mut self,
3433 render_target: Option<(&Texture, i32)>,
3434 target: &ColorRenderTarget,
3435 framebuffer_target_rect: DeviceUintRect,
3436 target_size: DeviceUintSize,
3437 depth_is_ready: bool,
3438 clear_color: Option<[f32; 4]>,
3439 render_tasks: &RenderTaskTree,
3440 projection: &Transform3D<f32>,
3441 frame_id: FrameId,
3442 stats: &mut RendererStats,
3443 ) {
3444 self.profile_counters.color_targets.inc();
3445 let _gm = self.gpu_profile.start_marker("color target");
3446
3447 // sanity check for the depth buffer
3448 if let Some((texture, _)) = render_target {
3449 assert!(texture.has_depth() >= target.needs_depth());
3450 }
3451
3452 {
3453 let _timer = self.gpu_profile.start_timer(GPU_TAG_SETUP_TARGET);
3454 self.device
3455 .bind_draw_target(render_target, Some(target_size));
3456 self.device.disable_depth();
3457 self.device.set_blend(false);
3458
3459 let depth_clear = if !depth_is_ready && target.needs_depth() {
3460 self.device.enable_depth_write();
3461 Some(1.0)
3462 } else {
3463 None
3464 };
3465
3466 let clear_rect = if render_target.is_some() {
3467 if self.enable_clear_scissor {
3468 // TODO(gw): Applying a scissor rect and minimal clear here
3469 // is a very large performance win on the Intel and nVidia
3470 // GPUs that I have tested with. It's possible it may be a
3471 // performance penalty on other GPU types - we should test this
3472 // and consider different code paths.
3473 Some(target.used_rect())
3474 } else {
3475 None
3476 }
3477 } else if framebuffer_target_rect == DeviceUintRect::new(DeviceUintPoint::zero(), target_size) {
3478 // whole screen is covered, no need for scissor
3479 None
3480 } else {
3481 let mut rect = framebuffer_target_rect.to_i32();
3482 // Note: `framebuffer_target_rect` needs a Y-flip before going to GL
3483 // Note: at this point, the target rectangle is not guaranteed to be within the main framebuffer bounds
3484 // but `clear_target_rect` is totally fine with negative origin, as long as width & height are positive
3485 rect.origin.y = target_size.height as i32 - rect.origin.y - rect.size.height;
3486 Some(rect)
3487 };
3488
3489 self.device.clear_target(clear_color, depth_clear, clear_rect);
3490
3491 if depth_clear.is_some() {
3492 self.device.disable_depth_write();
3493 }
3494 }
3495
3496 // Handle any blits from the texture cache to this target.
3497 self.handle_blits(&target.blits, render_tasks);
3498
3499 // Draw any blurs for this target.
3500 // Blurs are rendered as a standard 2-pass
3501 // separable implementation.
3502 // TODO(gw): In the future, consider having
3503 // fast path blur shaders for common
3504 // blur radii with fixed weights.
3505 if !target.vertical_blurs.is_empty() || !target.horizontal_blurs.is_empty() {
3506 let _timer = self.gpu_profile.start_timer(GPU_TAG_BLUR);
3507
3508 self.device.set_blend(false);
3509 self.cs_blur_rgba8
3510 .bind(&mut self.device, projection, 0, &mut self.renderer_errors);
3511
3512 if !target.vertical_blurs.is_empty() {
3513 self.draw_instanced_batch(
3514 &target.vertical_blurs,
3515 VertexArrayKind::Blur,
3516 &BatchTextures::no_texture(),
3517 stats,
3518 );
3519 }
3520
3521 if !target.horizontal_blurs.is_empty() {
3522 self.draw_instanced_batch(
3523 &target.horizontal_blurs,
3524 VertexArrayKind::Blur,
3525 &BatchTextures::no_texture(),
3526 stats,
3527 );
3528 }
3529 }
3530
3531 self.handle_scaling(render_tasks, &target.scalings, SourceTexture::CacheRGBA8);
3532
3533 // Draw any textrun caches for this target. For now, this
3534 // is only used to cache text runs that are to be blurred
3535 // for shadow support. In the future it may be worth
3536 // considering using this for (some) other text runs, since
3537 // it removes the overhead of submitting many small glyphs
3538 // to multiple tiles in the normal text run case.
3539 for alpha_batch_container in &target.alpha_batch_containers {
3540 if !alpha_batch_container.text_run_cache_prims.is_empty() {
3541 self.device.set_blend(true);
3542 self.device.set_blend_mode_premultiplied_alpha();
3543
3544 let _timer = self.gpu_profile.start_timer(GPU_TAG_CACHE_TEXT_RUN);
3545 self.cs_text_run
3546 .bind(&mut self.device, projection, 0, &mut self.renderer_errors);
3547 for (texture_id, instances) in &alpha_batch_container.text_run_cache_prims {
3548 self.draw_instanced_batch(
3549 instances,
3550 VertexArrayKind::Primitive,
3551 &BatchTextures::color(*texture_id),
3552 stats,
3553 );
3554 }
3555 }
3556 }
3557
3558 //TODO: record the pixel count for cached primitives
3559
3560 if target.needs_depth() {
3561 let _gl = self.gpu_profile.start_marker("opaque batches");
3562 let opaque_sampler = self.gpu_profile.start_sampler(GPU_SAMPLER_TAG_OPAQUE);
3563 self.device.set_blend(false);
3564 //Note: depth equality is needed for split planes
3565 self.device.set_depth_func(DepthFunction::LessEqual);
3566 self.device.enable_depth();
3567 self.device.enable_depth_write();
3568
3569 for alpha_batch_container in &target.alpha_batch_containers {
3570 if let Some(target_rect) = alpha_batch_container.target_rect {
3571 self.device.enable_scissor();
3572 self.device.set_scissor_rect(target_rect);
3573 }
3574
3575 // Draw opaque batches front-to-back for maximum
3576 // z-buffer efficiency!
3577 for batch in alpha_batch_container
3578 .opaque_batches
3579 .iter()
3580 .rev()
3581 {
3582 self.submit_batch(
3583 &batch.key,
3584 &batch.instances,
3585 &projection,
3586 render_tasks,
3587 render_target,
3588 target_size,
3589 stats,
3590 alpha_batch_container.target_rect,
3591 );
3592 }
3593
3594 if alpha_batch_container.target_rect.is_some() {
3595 self.device.disable_scissor();
3596 }
3597 }
3598
3599 self.device.disable_depth_write();
3600 self.gpu_profile.finish_sampler(opaque_sampler);
3601 }
3602
3603 let _gl = self.gpu_profile.start_marker("alpha batches");
3604 let transparent_sampler = self.gpu_profile.start_sampler(GPU_SAMPLER_TAG_TRANSPARENT);
3605 self.device.set_blend(false);
3606 let mut prev_blend_mode = BlendMode::None;
3607
3608 for alpha_batch_container in &target.alpha_batch_containers {
3609 if let Some(target_rect) = alpha_batch_container.target_rect {
3610 self.device.enable_scissor();
3611 self.device.set_scissor_rect(target_rect);
3612 }
3613
3614 for batch in &alpha_batch_container.alpha_batches {
3615 match batch.key.kind {
3616 BatchKind::Transformable(transform_kind, TransformBatchKind::TextRun(glyph_format)) => {
3617 // Text run batches are handled by this special case branch.
3618 // In the case of subpixel text, we draw it as a two pass
3619 // effect, to ensure we can apply clip masks correctly.
3620 // In the future, there are several optimizations available:
3621 // 1) Use dual source blending where available (almost all recent hardware).
3622 // 2) Use frame buffer fetch where available (most modern hardware).
3623 // 3) Consider the old constant color blend method where no clip is applied.
3624 let _timer = self.gpu_profile.start_timer(GPU_TAG_PRIM_TEXT_RUN);
3625
3626 self.device.set_blend(true);
3627
3628 match batch.key.blend_mode {
3629 BlendMode::Alpha => panic!("Attempt to composite non-premultiplied text primitives."),
3630 BlendMode::PremultipliedAlpha => {
3631 self.device.set_blend_mode_premultiplied_alpha();
3632
3633 self.ps_text_run.bind(
3634 &mut self.device,
3635 glyph_format,
3636 transform_kind,
3637 projection,
3638 TextShaderMode::from(glyph_format),
3639 &mut self.renderer_errors,
3640 );
3641
3642 self.draw_instanced_batch(
3643 &batch.instances,
3644 VertexArrayKind::Primitive,
3645 &batch.key.textures,
3646 stats,
3647 );
3648 }
3649 BlendMode::SubpixelDualSource => {
3650 self.device.set_blend_mode_subpixel_dual_source();
3651
3652 self.ps_text_run_dual_source.bind(
3653 &mut self.device,
3654 glyph_format,
3655 transform_kind,
3656 projection,
3657 TextShaderMode::SubpixelDualSource,
3658 &mut self.renderer_errors,
3659 );
3660
3661 self.draw_instanced_batch(
3662 &batch.instances,
3663 VertexArrayKind::Primitive,
3664 &batch.key.textures,
3665 stats,
3666 );
3667 }
3668 BlendMode::SubpixelConstantTextColor(color) => {
3669 self.device.set_blend_mode_subpixel_constant_text_color(color);
3670
3671 self.ps_text_run.bind(
3672 &mut self.device,
3673 glyph_format,
3674 transform_kind,
3675 projection,
3676 TextShaderMode::SubpixelConstantTextColor,
3677 &mut self.renderer_errors,
3678 );
3679
3680 self.draw_instanced_batch(
3681 &batch.instances,
3682 VertexArrayKind::Primitive,
3683 &batch.key.textures,
3684 stats,
3685 );
3686 }
3687 BlendMode::SubpixelVariableTextColor => {
3688 // Using the two pass component alpha rendering technique:
3689 //
3690 // http://anholt.livejournal.com/32058.html
3691 //
3692 self.device.set_blend_mode_subpixel_pass0();
3693
3694 self.ps_text_run.bind(
3695 &mut self.device,
3696 glyph_format,
3697 transform_kind,
3698 projection,
3699 TextShaderMode::SubpixelPass0,
3700 &mut self.renderer_errors,
3701 );
3702
3703 self.draw_instanced_batch(
3704 &batch.instances,
3705 VertexArrayKind::Primitive,
3706 &batch.key.textures,
3707 stats,
3708 );
3709
3710 self.device.set_blend_mode_subpixel_pass1();
3711
3712 self.ps_text_run.bind(
3713 &mut self.device,
3714 glyph_format,
3715 transform_kind,
3716 projection,
3717 TextShaderMode::SubpixelPass1,
3718 &mut self.renderer_errors,
3719 );
3720
3721 // When drawing the 2nd pass, we know that the VAO, textures etc
3722 // are all set up from the previous draw_instanced_batch call,
3723 // so just issue a draw call here to avoid re-uploading the
3724 // instances and re-binding textures etc.
3725 self.device
3726 .draw_indexed_triangles_instanced_u16(6, batch.instances.len() as i32);
3727 }
3728 BlendMode::SubpixelWithBgColor => {
3729 // Using the three pass "component alpha with font smoothing
3730 // background color" rendering technique:
3731 //
3732 // /webrender/doc/text-rendering.md
3733 //
3734 self.device.set_blend_mode_subpixel_with_bg_color_pass0();
3735
3736 self.ps_text_run.bind(
3737 &mut self.device,
3738 glyph_format,
3739 transform_kind,
3740 projection,
3741 TextShaderMode::SubpixelWithBgColorPass0,
3742 &mut self.renderer_errors,
3743 );
3744
3745 self.draw_instanced_batch(
3746 &batch.instances,
3747 VertexArrayKind::Primitive,
3748 &batch.key.textures,
3749 stats,
3750 );
3751
3752 self.device.set_blend_mode_subpixel_with_bg_color_pass1();
3753
3754 self.ps_text_run.bind(
3755 &mut self.device,
3756 glyph_format,
3757 transform_kind,
3758 projection,
3759 TextShaderMode::SubpixelWithBgColorPass1,
3760 &mut self.renderer_errors,
3761 );
3762
3763 // When drawing the 2nd and 3rd passes, we know that the VAO, textures etc
3764 // are all set up from the previous draw_instanced_batch call,
3765 // so just issue a draw call here to avoid re-uploading the
3766 // instances and re-binding textures etc.
3767 self.device
3768 .draw_indexed_triangles_instanced_u16(6, batch.instances.len() as i32);
3769
3770 self.device.set_blend_mode_subpixel_with_bg_color_pass2();
3771
3772 self.ps_text_run.bind(
3773 &mut self.device,
3774 glyph_format,
3775 transform_kind,
3776 projection,
3777 TextShaderMode::SubpixelWithBgColorPass2,
3778 &mut self.renderer_errors,
3779 );
3780
3781 self.device
3782 .draw_indexed_triangles_instanced_u16(6, batch.instances.len() as i32);
3783 }
3784 BlendMode::PremultipliedDestOut | BlendMode::None => {
3785 unreachable!("bug: bad blend mode for text");
3786 }
3787 }
3788
3789 prev_blend_mode = BlendMode::None;
3790 self.device.set_blend(false);
3791 }
3792 _ => {
3793 if batch.key.blend_mode != prev_blend_mode {
3794 match batch.key.blend_mode {
3795 BlendMode::None => {
3796 self.device.set_blend(false);
3797 }
3798 BlendMode::Alpha => {
3799 self.device.set_blend(true);
3800 self.device.set_blend_mode_alpha();
3801 }
3802 BlendMode::PremultipliedAlpha => {
3803 self.device.set_blend(true);
3804 self.device.set_blend_mode_premultiplied_alpha();
3805 }
3806 BlendMode::PremultipliedDestOut => {
3807 self.device.set_blend(true);
3808 self.device.set_blend_mode_premultiplied_dest_out();
3809 }
3810 BlendMode::SubpixelConstantTextColor(..) |
3811 BlendMode::SubpixelVariableTextColor |
3812 BlendMode::SubpixelWithBgColor |
3813 BlendMode::SubpixelDualSource => {
3814 unreachable!("bug: subpx text handled earlier");
3815 }
3816 }
3817 prev_blend_mode = batch.key.blend_mode;
3818 }
3819
3820 self.submit_batch(
3821 &batch.key,
3822 &batch.instances,
3823 &projection,
3824 render_tasks,
3825 render_target,
3826 target_size,
3827 stats,
3828 alpha_batch_container.target_rect,
3829 );
3830 }
3831 }
3832 }
3833
3834 if alpha_batch_container.target_rect.is_some() {
3835 self.device.disable_scissor();
3836 }
3837 }
3838
3839 self.device.disable_depth();
3840 self.device.set_blend(false);
3841 self.gpu_profile.finish_sampler(transparent_sampler);
3842
3843 // For any registered image outputs on this render target,
3844 // get the texture from caller and blit it.
3845 for output in &target.outputs {
3846 let handler = self.output_image_handler
3847 .as_mut()
3848 .expect("Found output image, but no handler set!");
3849 if let Some((texture_id, output_size)) = handler.lock(output.pipeline_id) {
3850 let fbo_id = match self.output_targets.entry(texture_id) {
3851 Entry::Vacant(entry) => {
3852 let fbo_id = self.device.create_fbo_for_external_texture(texture_id);
3853 entry.insert(FrameOutput {
3854 fbo_id,
3855 last_access: frame_id,
3856 });
3857 fbo_id
3858 }
3859 Entry::Occupied(mut entry) => {
3860 let target = entry.get_mut();
3861 target.last_access = frame_id;
3862 target.fbo_id
3863 }
3864 };
3865 let (src_rect, _) = render_tasks[output.task_id].get_target_rect();
3866 let dest_rect = DeviceIntRect::new(DeviceIntPoint::zero(), output_size);
3867 self.device.bind_read_target(render_target);
3868 self.device.bind_external_draw_target(fbo_id);
3869 self.device.blit_render_target(src_rect, dest_rect);
3870 handler.unlock(output.pipeline_id);
3871 }
3872 }
3873 }
3874
draw_alpha_target( &mut self, render_target: (&Texture, i32), target: &AlphaRenderTarget, target_size: DeviceUintSize, projection: &Transform3D<f32>, render_tasks: &RenderTaskTree, stats: &mut RendererStats, )3875 fn draw_alpha_target(
3876 &mut self,
3877 render_target: (&Texture, i32),
3878 target: &AlphaRenderTarget,
3879 target_size: DeviceUintSize,
3880 projection: &Transform3D<f32>,
3881 render_tasks: &RenderTaskTree,
3882 stats: &mut RendererStats,
3883 ) {
3884 self.profile_counters.alpha_targets.inc();
3885 let _gm = self.gpu_profile.start_marker("alpha target");
3886 let alpha_sampler = self.gpu_profile.start_sampler(GPU_SAMPLER_TAG_ALPHA);
3887
3888 {
3889 let _timer = self.gpu_profile.start_timer(GPU_TAG_SETUP_TARGET);
3890 self.device
3891 .bind_draw_target(Some(render_target), Some(target_size));
3892 self.device.disable_depth();
3893 self.device.disable_depth_write();
3894
3895 // TODO(gw): Applying a scissor rect and minimal clear here
3896 // is a very large performance win on the Intel and nVidia
3897 // GPUs that I have tested with. It's possible it may be a
3898 // performance penalty on other GPU types - we should test this
3899 // and consider different code paths.
3900 let clear_color = [1.0, 1.0, 1.0, 0.0];
3901 self.device.clear_target(
3902 Some(clear_color),
3903 None,
3904 Some(target.used_rect()),
3905 );
3906
3907 let zero_color = [0.0, 0.0, 0.0, 0.0];
3908 for &task_id in &target.zero_clears {
3909 let (rect, _) = render_tasks[task_id].get_target_rect();
3910 self.device.clear_target(
3911 Some(zero_color),
3912 None,
3913 Some(rect),
3914 );
3915 }
3916 }
3917
3918 // Draw any blurs for this target.
3919 // Blurs are rendered as a standard 2-pass
3920 // separable implementation.
3921 // TODO(gw): In the future, consider having
3922 // fast path blur shaders for common
3923 // blur radii with fixed weights.
3924 if !target.vertical_blurs.is_empty() || !target.horizontal_blurs.is_empty() {
3925 let _timer = self.gpu_profile.start_timer(GPU_TAG_BLUR);
3926
3927 self.device.set_blend(false);
3928 self.cs_blur_a8
3929 .bind(&mut self.device, projection, 0, &mut self.renderer_errors);
3930
3931 if !target.vertical_blurs.is_empty() {
3932 self.draw_instanced_batch(
3933 &target.vertical_blurs,
3934 VertexArrayKind::Blur,
3935 &BatchTextures::no_texture(),
3936 stats,
3937 );
3938 }
3939
3940 if !target.horizontal_blurs.is_empty() {
3941 self.draw_instanced_batch(
3942 &target.horizontal_blurs,
3943 VertexArrayKind::Blur,
3944 &BatchTextures::no_texture(),
3945 stats,
3946 );
3947 }
3948 }
3949
3950 self.handle_scaling(render_tasks, &target.scalings, SourceTexture::CacheA8);
3951
3952 // Draw the clip items into the tiled alpha mask.
3953 {
3954 let _timer = self.gpu_profile.start_timer(GPU_TAG_CACHE_CLIP);
3955
3956 // If we have border corner clips, the first step is to clear out the
3957 // area in the clip mask. This allows drawing multiple invididual clip
3958 // in regions below.
3959 if !target.clip_batcher.border_clears.is_empty() {
3960 let _gm2 = self.gpu_profile.start_marker("clip borders [clear]");
3961 self.device.set_blend(false);
3962 self.cs_clip_border
3963 .bind(&mut self.device, projection, 0, &mut self.renderer_errors);
3964 self.draw_instanced_batch(
3965 &target.clip_batcher.border_clears,
3966 VertexArrayKind::Clip,
3967 &BatchTextures::no_texture(),
3968 stats,
3969 );
3970 }
3971
3972 // Draw any dots or dashes for border corners.
3973 if !target.clip_batcher.borders.is_empty() {
3974 let _gm2 = self.gpu_profile.start_marker("clip borders");
3975 // We are masking in parts of the corner (dots or dashes) here.
3976 // Blend mode is set to max to allow drawing multiple dots.
3977 // The individual dots and dashes in a border never overlap, so using
3978 // a max blend mode here is fine.
3979 self.device.set_blend(true);
3980 self.device.set_blend_mode_max();
3981 self.cs_clip_border
3982 .bind(&mut self.device, projection, 0, &mut self.renderer_errors);
3983 self.draw_instanced_batch(
3984 &target.clip_batcher.borders,
3985 VertexArrayKind::Clip,
3986 &BatchTextures::no_texture(),
3987 stats,
3988 );
3989 }
3990
3991 // switch to multiplicative blending
3992 self.device.set_blend(true);
3993 self.device.set_blend_mode_multiply();
3994
3995 // draw rounded cornered rectangles
3996 if !target.clip_batcher.rectangles.is_empty() {
3997 let _gm2 = self.gpu_profile.start_marker("clip rectangles");
3998 self.cs_clip_rectangle.bind(
3999 &mut self.device,
4000 projection,
4001 0,
4002 &mut self.renderer_errors,
4003 );
4004 self.draw_instanced_batch(
4005 &target.clip_batcher.rectangles,
4006 VertexArrayKind::Clip,
4007 &BatchTextures::no_texture(),
4008 stats,
4009 );
4010 }
4011 // draw box-shadow clips
4012 for (mask_texture_id, items) in target.clip_batcher.box_shadows.iter() {
4013 let _gm2 = self.gpu_profile.start_marker("box-shadows");
4014 let textures = BatchTextures {
4015 colors: [
4016 mask_texture_id.clone(),
4017 SourceTexture::Invalid,
4018 SourceTexture::Invalid,
4019 ],
4020 };
4021 self.cs_clip_box_shadow
4022 .bind(&mut self.device, projection, 0, &mut self.renderer_errors);
4023 self.draw_instanced_batch(
4024 items,
4025 VertexArrayKind::Clip,
4026 &textures,
4027 stats,
4028 );
4029 }
4030
4031 // draw image masks
4032 for (mask_texture_id, items) in target.clip_batcher.images.iter() {
4033 let _gm2 = self.gpu_profile.start_marker("clip images");
4034 let textures = BatchTextures {
4035 colors: [
4036 mask_texture_id.clone(),
4037 SourceTexture::Invalid,
4038 SourceTexture::Invalid,
4039 ],
4040 };
4041 self.cs_clip_image
4042 .bind(&mut self.device, projection, 0, &mut self.renderer_errors);
4043 self.draw_instanced_batch(
4044 items,
4045 VertexArrayKind::Clip,
4046 &textures,
4047 stats,
4048 );
4049 }
4050 }
4051
4052 self.gpu_profile.finish_sampler(alpha_sampler);
4053 }
4054
draw_texture_cache_target( &mut self, texture: &SourceTexture, layer: i32, target: &TextureCacheRenderTarget, render_tasks: &RenderTaskTree, stats: &mut RendererStats, )4055 fn draw_texture_cache_target(
4056 &mut self,
4057 texture: &SourceTexture,
4058 layer: i32,
4059 target: &TextureCacheRenderTarget,
4060 render_tasks: &RenderTaskTree,
4061 stats: &mut RendererStats,
4062 ) {
4063 let projection = {
4064 let texture = self.texture_resolver
4065 .resolve(texture)
4066 .expect("BUG: invalid target texture");
4067 let target_size = texture.get_dimensions();
4068
4069 self.device
4070 .bind_draw_target(Some((texture, layer)), Some(target_size));
4071 self.device.disable_depth();
4072 self.device.disable_depth_write();
4073 self.device.set_blend(false);
4074
4075 Transform3D::ortho(
4076 0.0,
4077 target_size.width as f32,
4078 0.0,
4079 target_size.height as f32,
4080 ORTHO_NEAR_PLANE,
4081 ORTHO_FAR_PLANE,
4082 )
4083 };
4084
4085 // Handle any blits to this texture from child tasks.
4086 self.handle_blits(&target.blits, render_tasks);
4087
4088 // Draw any blurs for this target.
4089 if !target.horizontal_blurs.is_empty() {
4090 let _timer = self.gpu_profile.start_timer(GPU_TAG_BLUR);
4091
4092 self.cs_blur_a8
4093 .bind(&mut self.device, &projection, 0, &mut self.renderer_errors);
4094
4095 self.draw_instanced_batch(
4096 &target.horizontal_blurs,
4097 VertexArrayKind::Blur,
4098 &BatchTextures::no_texture(),
4099 stats,
4100 );
4101 }
4102 }
4103
update_deferred_resolves(&mut self, deferred_resolves: &[DeferredResolve]) -> Option<GpuCacheUpdateList>4104 fn update_deferred_resolves(&mut self, deferred_resolves: &[DeferredResolve]) -> Option<GpuCacheUpdateList> {
4105 // The first thing we do is run through any pending deferred
4106 // resolves, and use a callback to get the UV rect for this
4107 // custom item. Then we patch the resource_rects structure
4108 // here before it's uploaded to the GPU.
4109 if deferred_resolves.is_empty() {
4110 return None;
4111 }
4112
4113 let handler = self.external_image_handler
4114 .as_mut()
4115 .expect("Found external image, but no handler set!");
4116
4117 let mut list = GpuCacheUpdateList {
4118 frame_id: FrameId::new(0),
4119 height: self.gpu_cache_texture.get_height(),
4120 blocks: Vec::new(),
4121 updates: Vec::new(),
4122 };
4123
4124 for deferred_resolve in deferred_resolves {
4125 self.gpu_profile.place_marker("deferred resolve");
4126 let props = &deferred_resolve.image_properties;
4127 let ext_image = props
4128 .external_image
4129 .expect("BUG: Deferred resolves must be external images!");
4130 let image = handler.lock(ext_image.id, ext_image.channel_index);
4131 let texture_target = match ext_image.image_type {
4132 ExternalImageType::TextureHandle(target) => target,
4133 ExternalImageType::Buffer => {
4134 panic!("not a suitable image type in update_deferred_resolves()");
4135 }
4136 };
4137
4138 // In order to produce the handle, the external image handler may call into
4139 // the GL context and change some states.
4140 self.device.reset_state();
4141
4142 let texture = match image.source {
4143 ExternalImageSource::NativeTexture(texture_id) => {
4144 ExternalTexture::new(texture_id, texture_target)
4145 }
4146 ExternalImageSource::Invalid => {
4147 warn!("Invalid ext-image");
4148 debug!(
4149 "For ext_id:{:?}, channel:{}.",
4150 ext_image.id,
4151 ext_image.channel_index
4152 );
4153 // Just use 0 as the gl handle for this failed case.
4154 ExternalTexture::new(0, texture_target)
4155 }
4156 ExternalImageSource::RawData(_) => {
4157 panic!("Raw external data is not expected for deferred resolves!");
4158 }
4159 };
4160
4161 self.texture_resolver
4162 .external_images
4163 .insert((ext_image.id, ext_image.channel_index), texture);
4164
4165 list.updates.push(GpuCacheUpdate::Copy {
4166 block_index: list.blocks.len(),
4167 block_count: BLOCKS_PER_UV_RECT,
4168 address: deferred_resolve.address,
4169 });
4170 list.blocks.push(image.uv.into());
4171 list.blocks.push([0f32; 4].into());
4172 }
4173
4174 Some(list)
4175 }
4176
unlock_external_images(&mut self)4177 fn unlock_external_images(&mut self) {
4178 if !self.texture_resolver.external_images.is_empty() {
4179 let handler = self.external_image_handler
4180 .as_mut()
4181 .expect("Found external image, but no handler set!");
4182
4183 for (ext_data, _) in self.texture_resolver.external_images.drain() {
4184 handler.unlock(ext_data.0, ext_data.1);
4185 }
4186 }
4187 }
4188
allocate_target_texture<T: RenderTarget>( &mut self, list: &mut RenderTargetList<T>, counters: &mut FrameProfileCounters, frame_id: FrameId, ) -> Option<ActiveTexture>4189 fn allocate_target_texture<T: RenderTarget>(
4190 &mut self,
4191 list: &mut RenderTargetList<T>,
4192 counters: &mut FrameProfileCounters,
4193 frame_id: FrameId,
4194 ) -> Option<ActiveTexture> {
4195 debug_assert_ne!(list.max_size, DeviceUintSize::zero());
4196 if list.targets.is_empty() {
4197 return None
4198 }
4199
4200 counters.targets_used.inc();
4201
4202 // First, try finding a perfect match
4203 let selector = TargetSelector {
4204 size: list.max_size,
4205 num_layers: list.targets.len() as _,
4206 format: list.format,
4207 };
4208 let mut index = self.texture_resolver.render_target_pool
4209 .iter()
4210 .position(|texture| {
4211 //TODO: re-use a part of a larger target, if available
4212 selector == TargetSelector {
4213 size: texture.get_dimensions(),
4214 num_layers: texture.get_render_target_layer_count(),
4215 format: texture.get_format(),
4216 }
4217 });
4218
4219 // Next, try at least finding a matching format
4220 if index.is_none() {
4221 counters.targets_changed.inc();
4222 index = self.texture_resolver.render_target_pool
4223 .iter()
4224 .position(|texture| texture.get_format() == list.format && !texture.used_in_frame(frame_id));
4225 }
4226
4227 let mut texture = match index {
4228 Some(pos) => {
4229 self.texture_resolver.render_target_pool.swap_remove(pos)
4230 }
4231 None => {
4232 counters.targets_created.inc();
4233 // finally, give up and create a new one
4234 self.device.create_texture(TextureTarget::Array, list.format)
4235 }
4236 };
4237
4238 self.device.init_texture(
4239 &mut texture,
4240 list.max_size.width,
4241 list.max_size.height,
4242 TextureFilter::Linear,
4243 Some(RenderTargetInfo {
4244 has_depth: list.needs_depth(),
4245 }),
4246 list.targets.len() as _,
4247 None,
4248 );
4249
4250 list.check_ready(&texture);
4251 Some(ActiveTexture {
4252 texture,
4253 saved_index: list.saved_index.clone(),
4254 is_shared: list.is_shared,
4255 })
4256 }
4257
bind_frame_data(&mut self, frame: &mut Frame)4258 fn bind_frame_data(&mut self, frame: &mut Frame) {
4259 let _timer = self.gpu_profile.start_timer(GPU_TAG_SETUP_DATA);
4260 self.device.set_device_pixel_ratio(frame.device_pixel_ratio);
4261
4262 self.node_data_texture.update(&mut self.device, &mut frame.node_data);
4263 self.device.bind_texture(TextureSampler::ClipScrollNodes, &self.node_data_texture.texture);
4264
4265 self.local_clip_rects_texture.update(
4266 &mut self.device,
4267 &mut frame.clip_chain_local_clip_rects
4268 );
4269 self.device.bind_texture(
4270 TextureSampler::LocalClipRects,
4271 &self.local_clip_rects_texture.texture
4272 );
4273
4274 self.render_task_texture
4275 .update(&mut self.device, &mut frame.render_tasks.task_data);
4276 self.device.bind_texture(
4277 TextureSampler::RenderTasks,
4278 &self.render_task_texture.texture,
4279 );
4280
4281 debug_assert!(self.texture_resolver.cache_a8_texture.is_none());
4282 debug_assert!(self.texture_resolver.cache_rgba8_texture.is_none());
4283 }
4284
draw_tile_frame( &mut self, frame: &mut Frame, framebuffer_size: Option<DeviceUintSize>, framebuffer_depth_is_ready: bool, frame_id: FrameId, stats: &mut RendererStats, )4285 fn draw_tile_frame(
4286 &mut self,
4287 frame: &mut Frame,
4288 framebuffer_size: Option<DeviceUintSize>,
4289 framebuffer_depth_is_ready: bool,
4290 frame_id: FrameId,
4291 stats: &mut RendererStats,
4292 ) {
4293 let _gm = self.gpu_profile.start_marker("tile frame draw");
4294
4295 if frame.passes.is_empty() {
4296 frame.has_been_rendered = true;
4297 return;
4298 }
4299
4300 self.device.disable_depth_write();
4301 self.device.disable_stencil();
4302 self.device.set_blend(false);
4303
4304 self.bind_frame_data(frame);
4305 self.texture_resolver.begin_frame();
4306
4307 for (pass_index, pass) in frame.passes.iter_mut().enumerate() {
4308 self.gpu_profile.place_marker(&format!("pass {}", pass_index));
4309
4310 self.texture_resolver.bind(
4311 &SourceTexture::CacheA8,
4312 TextureSampler::CacheA8,
4313 &mut self.device,
4314 );
4315 self.texture_resolver.bind(
4316 &SourceTexture::CacheRGBA8,
4317 TextureSampler::CacheRGBA8,
4318 &mut self.device,
4319 );
4320
4321 let (cur_alpha, cur_color) = match pass.kind {
4322 RenderPassKind::MainFramebuffer(ref target) => {
4323 if let Some(framebuffer_size) = framebuffer_size {
4324 stats.color_target_count += 1;
4325
4326 let clear_color = frame.background_color.map(|color| color.to_array());
4327 let projection = Transform3D::ortho(
4328 0.0,
4329 framebuffer_size.width as f32,
4330 framebuffer_size.height as f32,
4331 0.0,
4332 ORTHO_NEAR_PLANE,
4333 ORTHO_FAR_PLANE,
4334 );
4335
4336 self.draw_color_target(
4337 None,
4338 target,
4339 frame.inner_rect,
4340 framebuffer_size,
4341 framebuffer_depth_is_ready,
4342 clear_color,
4343 &frame.render_tasks,
4344 &projection,
4345 frame_id,
4346 stats,
4347 );
4348 }
4349
4350 (None, None)
4351 }
4352 RenderPassKind::OffScreen { ref mut alpha, ref mut color, ref mut texture_cache } => {
4353 let alpha_tex = self.allocate_target_texture(alpha, &mut frame.profile_counters, frame_id);
4354 let color_tex = self.allocate_target_texture(color, &mut frame.profile_counters, frame_id);
4355
4356 // If this frame has already been drawn, then any texture
4357 // cache targets have already been updated and can be
4358 // skipped this time.
4359 if !frame.has_been_rendered {
4360 for (&(texture_id, target_index), target) in texture_cache {
4361 self.draw_texture_cache_target(
4362 &texture_id,
4363 target_index,
4364 target,
4365 &frame.render_tasks,
4366 stats,
4367 );
4368 }
4369 }
4370
4371 for (target_index, target) in alpha.targets.iter().enumerate() {
4372 stats.alpha_target_count += 1;
4373
4374 let projection = Transform3D::ortho(
4375 0.0,
4376 alpha.max_size.width as f32,
4377 0.0,
4378 alpha.max_size.height as f32,
4379 ORTHO_NEAR_PLANE,
4380 ORTHO_FAR_PLANE,
4381 );
4382
4383 self.draw_alpha_target(
4384 (&alpha_tex.as_ref().unwrap().texture, target_index as i32),
4385 target,
4386 alpha.max_size,
4387 &projection,
4388 &frame.render_tasks,
4389 stats,
4390 );
4391 }
4392
4393 for (target_index, target) in color.targets.iter().enumerate() {
4394 stats.color_target_count += 1;
4395
4396 let projection = Transform3D::ortho(
4397 0.0,
4398 color.max_size.width as f32,
4399 0.0,
4400 color.max_size.height as f32,
4401 ORTHO_NEAR_PLANE,
4402 ORTHO_FAR_PLANE,
4403 );
4404
4405 self.draw_color_target(
4406 Some((&color_tex.as_ref().unwrap().texture, target_index as i32)),
4407 target,
4408 frame.inner_rect,
4409 color.max_size,
4410 false,
4411 Some([0.0, 0.0, 0.0, 0.0]),
4412 &frame.render_tasks,
4413 &projection,
4414 frame_id,
4415 stats,
4416 );
4417 }
4418
4419 (alpha_tex, color_tex)
4420 }
4421 };
4422
4423 //Note: the `end_pass` will make sure this texture is not recycled this frame
4424 if let Some(ActiveTexture { ref texture, is_shared: true, .. }) = cur_alpha {
4425 self.device
4426 .bind_texture(TextureSampler::SharedCacheA8, texture);
4427 }
4428
4429 self.texture_resolver.end_pass(
4430 cur_alpha,
4431 cur_color,
4432 );
4433 }
4434
4435 self.texture_resolver.end_frame();
4436 if let Some(framebuffer_size) = framebuffer_size {
4437 self.draw_render_target_debug(framebuffer_size);
4438 self.draw_texture_cache_debug(framebuffer_size);
4439 }
4440 self.draw_epoch_debug();
4441
4442 // Garbage collect any frame outputs that weren't used this frame.
4443 let device = &mut self.device;
4444 self.output_targets
4445 .retain(|_, target| if target.last_access != frame_id {
4446 device.delete_fbo(target.fbo_id);
4447 false
4448 } else {
4449 true
4450 });
4451
4452 frame.has_been_rendered = true;
4453 }
4454
debug_renderer<'b>(&'b mut self) -> &'b mut DebugRenderer4455 pub fn debug_renderer<'b>(&'b mut self) -> &'b mut DebugRenderer {
4456 &mut self.debug
4457 }
4458
get_debug_flags(&self) -> DebugFlags4459 pub fn get_debug_flags(&self) -> DebugFlags {
4460 self.debug_flags
4461 }
4462
set_debug_flags(&mut self, flags: DebugFlags)4463 pub fn set_debug_flags(&mut self, flags: DebugFlags) {
4464 if let Some(enabled) = flag_changed(self.debug_flags, flags, DebugFlags::GPU_TIME_QUERIES) {
4465 if enabled {
4466 self.gpu_profile.enable_timers();
4467 } else {
4468 self.gpu_profile.disable_timers();
4469 }
4470 }
4471 if let Some(enabled) = flag_changed(self.debug_flags, flags, DebugFlags::GPU_SAMPLE_QUERIES) {
4472 if enabled {
4473 self.gpu_profile.enable_samplers();
4474 } else {
4475 self.gpu_profile.disable_samplers();
4476 }
4477 }
4478
4479 self.debug_flags = flags;
4480 }
4481
set_debug_flag(&mut self, flag: DebugFlags, enabled: bool)4482 pub fn set_debug_flag(&mut self, flag: DebugFlags, enabled: bool) {
4483 let mut new_flags = self.debug_flags;
4484 new_flags.set(flag, enabled);
4485 self.set_debug_flags(new_flags);
4486 }
4487
toggle_debug_flags(&mut self, toggle: DebugFlags)4488 pub fn toggle_debug_flags(&mut self, toggle: DebugFlags) {
4489 let mut new_flags = self.debug_flags;
4490 new_flags.toggle(toggle);
4491 self.set_debug_flags(new_flags);
4492 }
4493
save_cpu_profile(&self, filename: &str)4494 pub fn save_cpu_profile(&self, filename: &str) {
4495 write_profile(filename);
4496 }
4497
draw_render_target_debug(&mut self, framebuffer_size: DeviceUintSize)4498 fn draw_render_target_debug(&mut self, framebuffer_size: DeviceUintSize) {
4499 if !self.debug_flags.contains(DebugFlags::RENDER_TARGET_DBG) {
4500 return;
4501 }
4502
4503 let mut spacing = 16;
4504 let mut size = 512;
4505 let fb_width = framebuffer_size.width as i32;
4506 let num_layers: i32 = self.texture_resolver.render_target_pool
4507 .iter()
4508 .map(|texture| texture.get_render_target_layer_count() as i32)
4509 .sum();
4510
4511 if num_layers * (size + spacing) > fb_width {
4512 let factor = fb_width as f32 / (num_layers * (size + spacing)) as f32;
4513 size = (size as f32 * factor) as i32;
4514 spacing = (spacing as f32 * factor) as i32;
4515 }
4516
4517 let mut target_index = 0;
4518 for texture in &self.texture_resolver.render_target_pool {
4519 let dimensions = texture.get_dimensions();
4520 let src_rect = DeviceIntRect::new(DeviceIntPoint::zero(), dimensions.to_i32());
4521
4522 let layer_count = texture.get_render_target_layer_count();
4523 for layer_index in 0 .. layer_count {
4524 self.device
4525 .bind_read_target(Some((texture, layer_index as i32)));
4526 let x = fb_width - (spacing + size) * (target_index + 1);
4527 let y = spacing;
4528
4529 let dest_rect = rect(x, y, size, size);
4530 self.device.blit_render_target(src_rect, dest_rect);
4531 target_index += 1;
4532 }
4533 }
4534 }
4535
draw_texture_cache_debug(&mut self, framebuffer_size: DeviceUintSize)4536 fn draw_texture_cache_debug(&mut self, framebuffer_size: DeviceUintSize) {
4537 if !self.debug_flags.contains(DebugFlags::TEXTURE_CACHE_DBG) {
4538 return;
4539 }
4540
4541 let mut spacing = 16;
4542 let mut size = 512;
4543 let fb_width = framebuffer_size.width as i32;
4544 let num_layers: i32 = self.texture_resolver
4545 .cache_texture_map
4546 .iter()
4547 .map(|texture| texture.get_layer_count())
4548 .sum();
4549
4550 if num_layers * (size + spacing) > fb_width {
4551 let factor = fb_width as f32 / (num_layers * (size + spacing)) as f32;
4552 size = (size as f32 * factor) as i32;
4553 spacing = (spacing as f32 * factor) as i32;
4554 }
4555
4556 let mut i = 0;
4557 for texture in &self.texture_resolver.cache_texture_map {
4558 let y = spacing + if self.debug_flags.contains(DebugFlags::RENDER_TARGET_DBG) {
4559 528
4560 } else {
4561 0
4562 };
4563 let dimensions = texture.get_dimensions();
4564 let src_rect = DeviceIntRect::new(
4565 DeviceIntPoint::zero(),
4566 DeviceIntSize::new(dimensions.width as i32, dimensions.height as i32),
4567 );
4568
4569 let layer_count = texture.get_layer_count();
4570 for layer_index in 0 .. layer_count {
4571 self.device.bind_read_target(Some((texture, layer_index)));
4572
4573 let x = fb_width - (spacing + size) * (i as i32 + 1);
4574
4575 // If we have more targets than fit on one row in screen, just early exit.
4576 if x > fb_width {
4577 return;
4578 }
4579
4580 let dest_rect = rect(x, y, size, size);
4581 self.device.blit_render_target(src_rect, dest_rect);
4582 i += 1;
4583 }
4584 }
4585 }
4586
draw_epoch_debug(&mut self)4587 fn draw_epoch_debug(&mut self) {
4588 if !self.debug_flags.contains(DebugFlags::EPOCHS) {
4589 return;
4590 }
4591
4592 let dy = self.debug.line_height();
4593 let x0: f32 = 30.0;
4594 let y0: f32 = 30.0;
4595 let mut y = y0;
4596 let mut text_width = 0.0;
4597 for (pipeline, epoch) in &self.pipeline_info.epochs {
4598 y += dy;
4599 let w = self.debug.add_text(
4600 x0, y,
4601 &format!("{:?}: {:?}", pipeline, epoch),
4602 ColorU::new(255, 255, 0, 255),
4603 ).size.width;
4604 text_width = f32::max(text_width, w);
4605 }
4606
4607 let margin = 10.0;
4608 self.debug.add_quad(
4609 &x0 - margin,
4610 y0 - margin,
4611 x0 + text_width + margin,
4612 y + margin,
4613 ColorU::new(25, 25, 25, 200),
4614 ColorU::new(51, 51, 51, 200),
4615 );
4616 }
4617
4618 /// Pass-through to `Device::read_pixels_into`, used by Gecko's WR bindings.
read_pixels_into(&mut self, rect: DeviceUintRect, format: ReadPixelsFormat, output: &mut [u8])4619 pub fn read_pixels_into(&mut self, rect: DeviceUintRect, format: ReadPixelsFormat, output: &mut [u8]) {
4620 self.device.read_pixels_into(rect, format, output);
4621 }
4622
read_pixels_rgba8(&mut self, rect: DeviceUintRect) -> Vec<u8>4623 pub fn read_pixels_rgba8(&mut self, rect: DeviceUintRect) -> Vec<u8> {
4624 let mut pixels = vec![0; (rect.size.width * rect.size.height * 4) as usize];
4625 self.device.read_pixels_into(rect, ReadPixelsFormat::Rgba8, &mut pixels);
4626 pixels
4627 }
4628
read_gpu_cache(&mut self) -> (DeviceUintSize, Vec<u8>)4629 pub fn read_gpu_cache(&mut self) -> (DeviceUintSize, Vec<u8>) {
4630 let size = self.gpu_cache_texture.texture.get_dimensions();
4631 let mut texels = vec![0; (size.width * size.height * 16) as usize];
4632 self.device.begin_frame();
4633 self.device.bind_read_target(Some((&self.gpu_cache_texture.texture, 0)));
4634 self.device.read_pixels_into(
4635 DeviceUintRect::new(DeviceUintPoint::zero(), size),
4636 ReadPixelsFormat::Standard(ImageFormat::RGBAF32),
4637 &mut texels,
4638 );
4639 self.device.bind_read_target(None);
4640 self.device.end_frame();
4641 (size, texels)
4642 }
4643
4644 // De-initialize the Renderer safely, assuming the GL is still alive and active.
deinit(mut self)4645 pub fn deinit(mut self) {
4646 //Note: this is a fake frame, only needed because texture deletion is require to happen inside a frame
4647 self.device.begin_frame();
4648 self.gpu_cache_texture.deinit(&mut self.device);
4649 if let Some(dither_matrix_texture) = self.dither_matrix_texture {
4650 self.device.delete_texture(dither_matrix_texture);
4651 }
4652 self.node_data_texture.deinit(&mut self.device);
4653 self.local_clip_rects_texture.deinit(&mut self.device);
4654 self.render_task_texture.deinit(&mut self.device);
4655 self.device.delete_pbo(self.texture_cache_upload_pbo);
4656 self.texture_resolver.deinit(&mut self.device);
4657 self.device.delete_vao(self.prim_vao);
4658 self.device.delete_vao(self.clip_vao);
4659 self.device.delete_vao(self.blur_vao);
4660 self.debug.deinit(&mut self.device);
4661 self.cs_text_run.deinit(&mut self.device);
4662 self.cs_blur_a8.deinit(&mut self.device);
4663 self.cs_blur_rgba8.deinit(&mut self.device);
4664 self.brush_solid.deinit(&mut self.device);
4665 self.brush_line.deinit(&mut self.device);
4666 self.brush_blend.deinit(&mut self.device);
4667 self.brush_mix_blend.deinit(&mut self.device);
4668 self.brush_radial_gradient.deinit(&mut self.device);
4669 self.brush_linear_gradient.deinit(&mut self.device);
4670 self.cs_clip_rectangle.deinit(&mut self.device);
4671 self.cs_clip_box_shadow.deinit(&mut self.device);
4672 self.cs_clip_image.deinit(&mut self.device);
4673 self.cs_clip_border.deinit(&mut self.device);
4674 self.ps_text_run.deinit(&mut self.device);
4675 self.ps_text_run_dual_source.deinit(&mut self.device);
4676 for shader in self.brush_image {
4677 if let Some(shader) = shader {
4678 shader.deinit(&mut self.device);
4679 }
4680 }
4681 for shader in self.ps_image {
4682 if let Some(shader) = shader {
4683 shader.deinit(&mut self.device);
4684 }
4685 }
4686 for shader in self.brush_yuv_image {
4687 if let Some(shader) = shader {
4688 shader.deinit(&mut self.device);
4689 }
4690 }
4691 for (_, target) in self.output_targets {
4692 self.device.delete_fbo(target.fbo_id);
4693 }
4694 self.ps_border_corner.deinit(&mut self.device);
4695 self.ps_border_edge.deinit(&mut self.device);
4696 self.ps_hw_composite.deinit(&mut self.device);
4697 self.ps_split_composite.deinit(&mut self.device);
4698 #[cfg(feature = "capture")]
4699 self.device.delete_fbo(self.read_fbo);
4700 #[cfg(feature = "replay")]
4701 for (_, ext) in self.owned_external_images {
4702 self.device.delete_external_texture(ext);
4703 }
4704 self.device.end_frame();
4705 }
4706 }
4707
4708 pub enum ExternalImageSource<'a> {
4709 RawData(&'a [u8]), // raw buffers.
4710 NativeTexture(u32), // It's a gl::GLuint texture handle
4711 Invalid,
4712 }
4713
4714 /// The data that an external client should provide about
4715 /// an external image. The timestamp is used to test if
4716 /// the renderer should upload new texture data this
4717 /// frame. For instance, if providing video frames, the
4718 /// application could call wr.render() whenever a new
4719 /// video frame is ready. If the callback increments
4720 /// the returned timestamp for a given image, the renderer
4721 /// will know to re-upload the image data to the GPU.
4722 /// Note that the UV coords are supplied in texel-space!
4723 pub struct ExternalImage<'a> {
4724 pub uv: TexelRect,
4725 pub source: ExternalImageSource<'a>,
4726 }
4727
4728 /// The interfaces that an application can implement to support providing
4729 /// external image buffers.
4730 /// When the the application passes an external image to WR, it should kepp that
4731 /// external image life time. People could check the epoch id in RenderNotifier
4732 /// at the client side to make sure that the external image is not used by WR.
4733 /// Then, do the clean up for that external image.
4734 pub trait ExternalImageHandler {
4735 /// Lock the external image. Then, WR could start to read the image content.
4736 /// The WR client should not change the image content until the unlock()
4737 /// call.
lock(&mut self, key: ExternalImageId, channel_index: u8) -> ExternalImage4738 fn lock(&mut self, key: ExternalImageId, channel_index: u8) -> ExternalImage;
4739 /// Unlock the external image. The WR should not read the image content
4740 /// after this call.
unlock(&mut self, key: ExternalImageId, channel_index: u8)4741 fn unlock(&mut self, key: ExternalImageId, channel_index: u8);
4742 }
4743
4744 /// Allows callers to receive a texture with the contents of a specific
4745 /// pipeline copied to it. Lock should return the native texture handle
4746 /// and the size of the texture. Unlock will only be called if the lock()
4747 /// call succeeds, when WR has issued the GL commands to copy the output
4748 /// to the texture handle.
4749 pub trait OutputImageHandler {
lock(&mut self, pipeline_id: PipelineId) -> Option<(u32, DeviceIntSize)>4750 fn lock(&mut self, pipeline_id: PipelineId) -> Option<(u32, DeviceIntSize)>;
unlock(&mut self, pipeline_id: PipelineId)4751 fn unlock(&mut self, pipeline_id: PipelineId);
4752 }
4753
4754 pub trait ThreadListener {
thread_started(&self, thread_name: &str)4755 fn thread_started(&self, thread_name: &str);
thread_stopped(&self, thread_name: &str)4756 fn thread_stopped(&self, thread_name: &str);
4757 }
4758
4759 pub struct RendererOptions {
4760 pub device_pixel_ratio: f32,
4761 pub resource_override_path: Option<PathBuf>,
4762 pub enable_aa: bool,
4763 pub enable_dithering: bool,
4764 pub max_recorded_profiles: usize,
4765 pub debug: bool,
4766 pub enable_scrollbars: bool,
4767 pub precache_shaders: bool,
4768 pub renderer_kind: RendererKind,
4769 pub enable_subpixel_aa: bool,
4770 pub clear_color: Option<ColorF>,
4771 pub enable_clear_scissor: bool,
4772 pub max_texture_size: Option<u32>,
4773 pub scatter_gpu_cache_updates: bool,
4774 pub upload_method: UploadMethod,
4775 pub workers: Option<Arc<ThreadPool>>,
4776 pub blob_image_renderer: Option<Box<BlobImageRenderer>>,
4777 pub recorder: Option<Box<ApiRecordingReceiver>>,
4778 pub thread_listener: Option<Box<ThreadListener + Send + Sync>>,
4779 pub enable_render_on_scroll: bool,
4780 pub cached_programs: Option<Rc<ProgramCache>>,
4781 pub debug_flags: DebugFlags,
4782 pub renderer_id: Option<u64>,
4783 pub disable_dual_source_blending: bool,
4784 }
4785
4786 impl Default for RendererOptions {
default() -> Self4787 fn default() -> Self {
4788 RendererOptions {
4789 device_pixel_ratio: 1.0,
4790 resource_override_path: None,
4791 enable_aa: true,
4792 enable_dithering: true,
4793 debug_flags: DebugFlags::empty(),
4794 max_recorded_profiles: 0,
4795 debug: false,
4796 enable_scrollbars: false,
4797 precache_shaders: false,
4798 renderer_kind: RendererKind::Native,
4799 enable_subpixel_aa: false,
4800 clear_color: Some(ColorF::new(1.0, 1.0, 1.0, 1.0)),
4801 enable_clear_scissor: true,
4802 max_texture_size: None,
4803 // Scattered GPU cache updates haven't met a test that would show their superiority yet.
4804 scatter_gpu_cache_updates: false,
4805 // This is best as `Immediate` on Angle, or `Pixelbuffer(Dynamic)` on GL,
4806 // but we are unable to make this decision here, so picking the reasonable medium.
4807 upload_method: UploadMethod::PixelBuffer(VertexUsageHint::Stream),
4808 workers: None,
4809 blob_image_renderer: None,
4810 recorder: None,
4811 thread_listener: None,
4812 enable_render_on_scroll: true,
4813 renderer_id: None,
4814 cached_programs: None,
4815 disable_dual_source_blending: false,
4816 }
4817 }
4818 }
4819
4820 #[cfg(not(feature = "debugger"))]
4821 pub struct DebugServer;
4822
4823 #[cfg(not(feature = "debugger"))]
4824 impl DebugServer {
new(_: MsgSender<ApiMsg>) -> Self4825 pub fn new(_: MsgSender<ApiMsg>) -> Self {
4826 DebugServer
4827 }
4828
send(&mut self, _: String)4829 pub fn send(&mut self, _: String) {}
4830 }
4831
4832 // Some basic statistics about the rendered scene
4833 // that we can use in wrench reftests to ensure that
4834 // tests are batching and/or allocating on render
4835 // targets as we expect them to.
4836 pub struct RendererStats {
4837 pub total_draw_calls: usize,
4838 pub alpha_target_count: usize,
4839 pub color_target_count: usize,
4840 }
4841
4842 impl RendererStats {
empty() -> Self4843 pub fn empty() -> Self {
4844 RendererStats {
4845 total_draw_calls: 0,
4846 alpha_target_count: 0,
4847 color_target_count: 0,
4848 }
4849 }
4850 }
4851
4852
4853
4854 #[cfg(any(feature = "capture", feature = "replay"))]
4855 #[cfg_attr(feature = "capture", derive(Serialize))]
4856 #[cfg_attr(feature = "replay", derive(Deserialize))]
4857 struct PlainTexture {
4858 data: String,
4859 size: (u32, u32, i32),
4860 format: ImageFormat,
4861 filter: TextureFilter,
4862 render_target: Option<RenderTargetInfo>,
4863 }
4864
4865
4866 #[cfg(any(feature = "capture", feature = "replay"))]
4867 #[cfg_attr(feature = "capture", derive(Serialize))]
4868 #[cfg_attr(feature = "replay", derive(Deserialize))]
4869 struct PlainRenderer {
4870 gpu_cache: PlainTexture,
4871 gpu_cache_frame_id: FrameId,
4872 textures: Vec<PlainTexture>,
4873 external_images: Vec<ExternalCaptureImage>
4874 }
4875
4876 #[cfg(feature = "replay")]
4877 enum CapturedExternalImageData {
4878 NativeTexture(gl::GLuint),
4879 Buffer(Arc<Vec<u8>>),
4880 }
4881
4882 #[cfg(feature = "replay")]
4883 struct DummyExternalImageHandler {
4884 data: FastHashMap<(ExternalImageId, u8), (CapturedExternalImageData, TexelRect)>,
4885 }
4886
4887 #[cfg(feature = "replay")]
4888 impl ExternalImageHandler for DummyExternalImageHandler {
lock(&mut self, key: ExternalImageId, channel_index: u8) -> ExternalImage4889 fn lock(&mut self, key: ExternalImageId, channel_index: u8) -> ExternalImage {
4890 let (ref captured_data, ref uv) = self.data[&(key, channel_index)];
4891 ExternalImage {
4892 uv: *uv,
4893 source: match *captured_data {
4894 CapturedExternalImageData::NativeTexture(tid) => ExternalImageSource::NativeTexture(tid),
4895 CapturedExternalImageData::Buffer(ref arc) => ExternalImageSource::RawData(&*arc),
4896 }
4897 }
4898 }
unlock(&mut self, _key: ExternalImageId, _channel_index: u8)4899 fn unlock(&mut self, _key: ExternalImageId, _channel_index: u8) {}
4900 }
4901
4902 #[cfg(feature = "replay")]
4903 impl OutputImageHandler for () {
lock(&mut self, _: PipelineId) -> Option<(u32, DeviceIntSize)>4904 fn lock(&mut self, _: PipelineId) -> Option<(u32, DeviceIntSize)> {
4905 None
4906 }
unlock(&mut self, _: PipelineId)4907 fn unlock(&mut self, _: PipelineId) {
4908 unreachable!()
4909 }
4910 }
4911
4912 #[derive(Default)]
4913 pub struct PipelineInfo {
4914 pub epochs: FastHashMap<PipelineId, Epoch>,
4915 pub removed_pipelines: Vec<PipelineId>,
4916 }
4917
4918 impl Renderer {
4919 #[cfg(feature = "capture")]
save_texture( texture: &Texture, name: &str, root: &PathBuf, device: &mut Device ) -> PlainTexture4920 fn save_texture(
4921 texture: &Texture, name: &str, root: &PathBuf, device: &mut Device
4922 ) -> PlainTexture {
4923 use std::fs;
4924 use std::io::Write;
4925
4926 let short_path = format!("textures/{}.raw", name);
4927
4928 let bytes_per_pixel = texture.get_format().bytes_per_pixel();
4929 let read_format = ReadPixelsFormat::Standard(texture.get_format());
4930 let rect = DeviceUintRect::new(
4931 DeviceUintPoint::zero(),
4932 texture.get_dimensions(),
4933 );
4934
4935 let mut file = fs::File::create(root.join(&short_path))
4936 .expect(&format!("Unable to create {}", short_path));
4937 let bytes_per_layer = (rect.size.width * rect.size.height * bytes_per_pixel) as usize;
4938 let mut data = vec![0; bytes_per_layer];
4939
4940 //TODO: instead of reading from an FBO with `read_pixels*`, we could
4941 // read from textures directly with `get_tex_image*`.
4942
4943 for layer_id in 0 .. texture.get_layer_count() {
4944 device.attach_read_texture(texture, layer_id);
4945 #[cfg(feature = "png")]
4946 {
4947 let mut png_data;
4948 let (data_ref, format) = match texture.get_format() {
4949 ImageFormat::RGBAF32 => {
4950 png_data = vec![0; (rect.size.width * rect.size.height * 4) as usize];
4951 device.read_pixels_into(rect, ReadPixelsFormat::Rgba8, &mut png_data);
4952 (&png_data, ReadPixelsFormat::Rgba8)
4953 }
4954 fm => (&data, ReadPixelsFormat::Standard(fm)),
4955 };
4956 CaptureConfig::save_png(
4957 root.join(format!("textures/{}-{}.png", name, layer_id)),
4958 (rect.size.width, rect.size.height), format,
4959 data_ref,
4960 );
4961 }
4962 device.read_pixels_into(rect, read_format, &mut data);
4963 file.write_all(&data)
4964 .unwrap();
4965 }
4966
4967 PlainTexture {
4968 data: short_path,
4969 size: (rect.size.width, rect.size.height, texture.get_layer_count()),
4970 format: texture.get_format(),
4971 filter: texture.get_filter(),
4972 render_target: texture.get_render_target(),
4973 }
4974 }
4975
4976 #[cfg(feature = "replay")]
load_texture(texture: &mut Texture, plain: &PlainTexture, root: &PathBuf, device: &mut Device) -> Vec<u8>4977 fn load_texture(texture: &mut Texture, plain: &PlainTexture, root: &PathBuf, device: &mut Device) -> Vec<u8> {
4978 use std::fs::File;
4979 use std::io::Read;
4980
4981 let mut texels = Vec::new();
4982 assert_eq!(plain.format, texture.get_format());
4983 File::open(root.join(&plain.data))
4984 .expect(&format!("Unable to open texture at {}", plain.data))
4985 .read_to_end(&mut texels)
4986 .unwrap();
4987
4988 device.init_texture(
4989 texture, plain.size.0, plain.size.1,
4990 plain.filter, plain.render_target,
4991 plain.size.2, Some(texels.as_slice()),
4992 );
4993
4994 texels
4995 }
4996
4997 #[cfg(feature = "capture")]
save_capture( &mut self, config: CaptureConfig, deferred_images: Vec<ExternalCaptureImage>, )4998 fn save_capture(
4999 &mut self,
5000 config: CaptureConfig,
5001 deferred_images: Vec<ExternalCaptureImage>,
5002 ) {
5003 use std::fs;
5004 use std::io::Write;
5005 use api::{CaptureBits, ExternalImageData};
5006
5007 self.device.begin_frame();
5008 let _gm = self.gpu_profile.start_marker("read GPU data");
5009 self.device.bind_read_target_impl(self.read_fbo);
5010
5011 if !deferred_images.is_empty() {
5012 info!("saving external images");
5013 let mut arc_map = FastHashMap::<*const u8, String>::default();
5014 let mut tex_map = FastHashMap::<u32, String>::default();
5015 let handler = self.external_image_handler
5016 .as_mut()
5017 .expect("Unable to lock the external image handler!");
5018 for def in &deferred_images {
5019 info!("\t{}", def.short_path);
5020 let ExternalImageData { id, channel_index, image_type } = def.external;
5021 let ext_image = handler.lock(id, channel_index);
5022 let (data, short_path) = match ext_image.source {
5023 ExternalImageSource::RawData(data) => {
5024 let arc_id = arc_map.len() + 1;
5025 match arc_map.entry(data.as_ptr()) {
5026 Entry::Occupied(e) => {
5027 (None, e.get().clone())
5028 }
5029 Entry::Vacant(e) => {
5030 let short_path = format!("externals/d{}.raw", arc_id);
5031 (Some(data.to_vec()), e.insert(short_path).clone())
5032 }
5033 }
5034 }
5035 ExternalImageSource::NativeTexture(gl_id) => {
5036 let tex_id = tex_map.len() + 1;
5037 match tex_map.entry(gl_id) {
5038 Entry::Occupied(e) => {
5039 (None, e.get().clone())
5040 }
5041 Entry::Vacant(e) => {
5042 let target = match image_type {
5043 ExternalImageType::TextureHandle(target) => target,
5044 ExternalImageType::Buffer => unreachable!(),
5045 };
5046 info!("\t\tnative texture of target {:?}", target);
5047 let layer_index = 0; //TODO: what about layered textures?
5048 self.device.attach_read_texture_external(gl_id, target, layer_index);
5049 let data = self.device.read_pixels(&def.descriptor);
5050 let short_path = format!("externals/t{}.raw", tex_id);
5051 (Some(data), e.insert(short_path).clone())
5052 }
5053 }
5054 }
5055 ExternalImageSource::Invalid => {
5056 info!("\t\tinvalid source!");
5057 (None, String::new())
5058 }
5059 };
5060 if let Some(bytes) = data {
5061 fs::File::create(config.root.join(&short_path))
5062 .expect(&format!("Unable to create {}", short_path))
5063 .write_all(&bytes)
5064 .unwrap();
5065 }
5066 let plain = PlainExternalImage {
5067 data: short_path,
5068 id: def.external.id,
5069 channel_index: def.external.channel_index,
5070 uv: ext_image.uv,
5071 };
5072 config.serialize(&plain, &def.short_path);
5073 }
5074 for def in &deferred_images {
5075 handler.unlock(def.external.id, def.external.channel_index);
5076 }
5077 }
5078
5079 if config.bits.contains(CaptureBits::FRAME) {
5080 let path_textures = config.root.join("textures");
5081 if !path_textures.is_dir() {
5082 fs::create_dir(&path_textures).unwrap();
5083 }
5084
5085 info!("saving GPU cache");
5086 self.update_gpu_cache(); // flush pending updates
5087 let mut plain_self = PlainRenderer {
5088 gpu_cache: Self::save_texture(
5089 &self.gpu_cache_texture.texture,
5090 "gpu", &config.root, &mut self.device,
5091 ),
5092 gpu_cache_frame_id: self.gpu_cache_frame_id,
5093 textures: Vec::new(),
5094 external_images: deferred_images,
5095 };
5096
5097 info!("saving cached textures");
5098 for texture in &self.texture_resolver.cache_texture_map {
5099 let file_name = format!("cache-{}", plain_self.textures.len() + 1);
5100 info!("\t{}", file_name);
5101 let plain = Self::save_texture(texture, &file_name, &config.root, &mut self.device);
5102 plain_self.textures.push(plain);
5103 }
5104
5105 config.serialize(&plain_self, "renderer");
5106 }
5107
5108 self.device.bind_read_target(None);
5109 self.device.end_frame();
5110 info!("done.");
5111 }
5112
5113 #[cfg(feature = "replay")]
load_capture( &mut self, root: PathBuf, plain_externals: Vec<PlainExternalImage> )5114 fn load_capture(
5115 &mut self, root: PathBuf, plain_externals: Vec<PlainExternalImage>
5116 ) {
5117 use std::fs::File;
5118 use std::io::Read;
5119 use std::slice;
5120
5121 info!("loading external buffer-backed images");
5122 assert!(self.texture_resolver.external_images.is_empty());
5123 let mut raw_map = FastHashMap::<String, Arc<Vec<u8>>>::default();
5124 let mut image_handler = DummyExternalImageHandler {
5125 data: FastHashMap::default(),
5126 };
5127 // Note: this is a `SCENE` level population of the external image handlers
5128 // It would put both external buffers and texture into the map.
5129 // But latter are going to be overwritten later in this function
5130 // if we are in the `FRAME` level.
5131 for plain_ext in plain_externals {
5132 let data = match raw_map.entry(plain_ext.data) {
5133 Entry::Occupied(e) => e.get().clone(),
5134 Entry::Vacant(e) => {
5135 let mut buffer = Vec::new();
5136 File::open(root.join(e.key()))
5137 .expect(&format!("Unable to open {}", e.key()))
5138 .read_to_end(&mut buffer)
5139 .unwrap();
5140 e.insert(Arc::new(buffer)).clone()
5141 }
5142 };
5143 let key = (plain_ext.id, plain_ext.channel_index);
5144 let value = (CapturedExternalImageData::Buffer(data), plain_ext.uv);
5145 image_handler.data.insert(key, value);
5146 }
5147
5148 if let Some(renderer) = CaptureConfig::deserialize::<PlainRenderer, _>(&root, "renderer") {
5149 info!("loading cached textures");
5150 self.device.begin_frame();
5151
5152 for texture in self.texture_resolver.cache_texture_map.drain(..) {
5153 self.device.delete_texture(texture);
5154 }
5155 for texture in renderer.textures {
5156 info!("\t{}", texture.data);
5157 let mut t = self.device.create_texture(TextureTarget::Array, texture.format);
5158 Self::load_texture(&mut t, &texture, &root, &mut self.device);
5159 self.texture_resolver.cache_texture_map.push(t);
5160 }
5161
5162 info!("loading gpu cache");
5163 let gpu_cache_data = Self::load_texture(
5164 &mut self.gpu_cache_texture.texture,
5165 &renderer.gpu_cache,
5166 &root,
5167 &mut self.device,
5168 );
5169 match self.gpu_cache_texture.bus {
5170 CacheBus::PixelBuffer { ref mut rows, ref mut cpu_blocks, .. } => {
5171 let dim = self.gpu_cache_texture.texture.get_dimensions();
5172 let blocks = unsafe {
5173 slice::from_raw_parts(
5174 gpu_cache_data.as_ptr() as *const GpuBlockData,
5175 gpu_cache_data.len() / mem::size_of::<GpuBlockData>(),
5176 )
5177 };
5178 // fill up the CPU cache from the contents we just loaded
5179 rows.clear();
5180 cpu_blocks.clear();
5181 rows.extend((0 .. dim.height).map(|_| CacheRow::new()));
5182 cpu_blocks.extend_from_slice(blocks);
5183 }
5184 CacheBus::Scatter { .. } => {}
5185 }
5186 self.gpu_cache_frame_id = renderer.gpu_cache_frame_id;
5187
5188 info!("loading external texture-backed images");
5189 let mut native_map = FastHashMap::<String, gl::GLuint>::default();
5190 for ExternalCaptureImage { short_path, external, descriptor } in renderer.external_images {
5191 let target = match external.image_type {
5192 ExternalImageType::TextureHandle(target) => target,
5193 ExternalImageType::Buffer => continue,
5194 };
5195 let plain_ext = CaptureConfig::deserialize::<PlainExternalImage, _>(&root, &short_path)
5196 .expect(&format!("Unable to read {}.ron", short_path));
5197 let key = (external.id, external.channel_index);
5198
5199 let tid = match native_map.entry(plain_ext.data) {
5200 Entry::Occupied(e) => e.get().clone(),
5201 Entry::Vacant(e) => {
5202 //TODO: provide a way to query both the layer count and the filter from external images
5203 let (layer_count, filter) = (1, TextureFilter::Linear);
5204 let plain_tex = PlainTexture {
5205 data: e.key().clone(),
5206 size: (descriptor.width, descriptor.height, layer_count),
5207 format: descriptor.format,
5208 filter,
5209 render_target: None,
5210 };
5211 let mut t = self.device.create_texture(target, plain_tex.format);
5212 Self::load_texture(&mut t, &plain_tex, &root, &mut self.device);
5213 let extex = t.into_external();
5214 self.owned_external_images.insert(key, extex.clone());
5215 e.insert(extex.internal_id()).clone()
5216 }
5217 };
5218
5219 let value = (CapturedExternalImageData::NativeTexture(tid), plain_ext.uv);
5220 image_handler.data.insert(key, value);
5221 }
5222
5223 self.device.end_frame();
5224 }
5225
5226 self.output_image_handler = Some(Box::new(()) as Box<_>);
5227 self.external_image_handler = Some(Box::new(image_handler) as Box<_>);
5228 info!("done.");
5229 }
5230 }
5231