1/* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
4
5#include rect,render_task,gpu_cache,snap,transform
6
7#define EXTEND_MODE_CLAMP  0
8#define EXTEND_MODE_REPEAT 1
9
10#define SUBPX_DIR_NONE        0
11#define SUBPX_DIR_HORIZONTAL  1
12#define SUBPX_DIR_VERTICAL    2
13#define SUBPX_DIR_MIXED       3
14
15#define RASTER_LOCAL            0
16#define RASTER_SCREEN           1
17
18uniform sampler2DArray sPrevPassAlpha;
19uniform sampler2DArray sPrevPassColor;
20
21vec2 clamp_rect(vec2 pt, RectWithSize rect) {
22    return clamp(pt, rect.p0, rect.p0 + rect.size);
23}
24
25// TODO: convert back to RectWithEndPoint if driver issues are resolved, if ever.
26flat varying vec4 vClipMaskUvBounds;
27// XY and W are homogeneous coordinates, Z is the layer index
28varying vec4 vClipMaskUv;
29
30
31#ifdef WR_VERTEX_SHADER
32
33#define COLOR_MODE_FROM_PASS          0
34#define COLOR_MODE_ALPHA              1
35#define COLOR_MODE_SUBPX_CONST_COLOR  2
36#define COLOR_MODE_SUBPX_BG_PASS0     3
37#define COLOR_MODE_SUBPX_BG_PASS1     4
38#define COLOR_MODE_SUBPX_BG_PASS2     5
39#define COLOR_MODE_SUBPX_DUAL_SOURCE  6
40#define COLOR_MODE_BITMAP             7
41#define COLOR_MODE_COLOR_BITMAP       8
42#define COLOR_MODE_IMAGE              9
43
44uniform HIGHP_SAMPLER_FLOAT sampler2D sPrimitiveHeadersF;
45uniform HIGHP_SAMPLER_FLOAT isampler2D sPrimitiveHeadersI;
46
47// Instanced attributes
48in ivec4 aData;
49
50#define VECS_PER_PRIM_HEADER_F 2U
51#define VECS_PER_PRIM_HEADER_I 2U
52
53struct PrimitiveHeader {
54    RectWithSize local_rect;
55    RectWithSize local_clip_rect;
56    float z;
57    int specific_prim_address;
58    int render_task_index;
59    int clip_task_index;
60    int transform_id;
61    ivec3 user_data;
62};
63
64PrimitiveHeader fetch_prim_header(int index) {
65    PrimitiveHeader ph;
66
67    ivec2 uv_f = get_fetch_uv(index, VECS_PER_PRIM_HEADER_F);
68    vec4 local_rect = TEXEL_FETCH(sPrimitiveHeadersF, uv_f, 0, ivec2(0, 0));
69    vec4 local_clip_rect = TEXEL_FETCH(sPrimitiveHeadersF, uv_f, 0, ivec2(1, 0));
70    ph.local_rect = RectWithSize(local_rect.xy, local_rect.zw);
71    ph.local_clip_rect = RectWithSize(local_clip_rect.xy, local_clip_rect.zw);
72
73    ivec2 uv_i = get_fetch_uv(index, VECS_PER_PRIM_HEADER_I);
74    ivec4 data0 = TEXEL_FETCH(sPrimitiveHeadersI, uv_i, 0, ivec2(0, 0));
75    ivec4 data1 = TEXEL_FETCH(sPrimitiveHeadersI, uv_i, 0, ivec2(1, 0));
76    ph.z = float(data0.x);
77    ph.render_task_index = data0.y;
78    ph.specific_prim_address = data0.z;
79    ph.clip_task_index = data0.w;
80    ph.transform_id = data1.x;
81    ph.user_data = data1.yzw;
82
83    return ph;
84}
85
86struct VertexInfo {
87    vec2 local_pos;
88    vec2 snap_offset;
89    vec4 world_pos;
90};
91
92VertexInfo write_vertex(RectWithSize instance_rect,
93                        RectWithSize local_clip_rect,
94                        float z,
95                        Transform transform,
96                        PictureTask task,
97                        RectWithSize snap_rect) {
98
99    // Select the corner of the local rect that we are processing.
100    vec2 local_pos = instance_rect.p0 + instance_rect.size * aPosition.xy;
101
102    // Clamp to the two local clip rects.
103    vec2 clamped_local_pos = clamp_rect(local_pos, local_clip_rect);
104
105    // Compute the visible rect to snap against. This ensures segments along the
106    // edges are snapped consistently with other nearby primitives.
107    RectWithSize visible_rect = intersect_rects(local_clip_rect, snap_rect);
108
109    /// Compute the snapping offset.
110    vec2 snap_offset = compute_snap_offset(
111        clamped_local_pos,
112        transform.m,
113        visible_rect,
114        task.common_data.device_pixel_scale
115    );
116
117    // Transform the current vertex to world space.
118    vec4 world_pos = transform.m * vec4(clamped_local_pos, 0.0, 1.0);
119
120    // Convert the world positions to device pixel space.
121    vec2 device_pos = world_pos.xy * task.common_data.device_pixel_scale;
122
123    // Apply offsets for the render task to get correct screen location.
124    vec2 final_offset = snap_offset - task.content_origin + task.common_data.task_rect.p0;
125
126    gl_Position = uTransform * vec4(device_pos + final_offset * world_pos.w, z * world_pos.w, world_pos.w);
127
128    VertexInfo vi = VertexInfo(
129        clamped_local_pos,
130        snap_offset,
131        world_pos
132    );
133
134    return vi;
135}
136
137float cross2(vec2 v0, vec2 v1) {
138    return v0.x * v1.y - v0.y * v1.x;
139}
140
141// Return intersection of line (p0,p1) and line (p2,p3)
142vec2 intersect_lines(vec2 p0, vec2 p1, vec2 p2, vec2 p3) {
143    vec2 d0 = p0 - p1;
144    vec2 d1 = p2 - p3;
145
146    float s0 = cross2(p0, p1);
147    float s1 = cross2(p2, p3);
148
149    float d = cross2(d0, d1);
150    float nx = s0 * d1.x - d0.x * s1;
151    float ny = s0 * d1.y - d0.y * s1;
152
153    return vec2(nx / d, ny / d);
154}
155
156VertexInfo write_transform_vertex(RectWithSize local_segment_rect,
157                                  RectWithSize local_prim_rect,
158                                  RectWithSize local_clip_rect,
159                                  vec4 clip_edge_mask,
160                                  float z,
161                                  Transform transform,
162                                  PictureTask task) {
163    // Calculate a clip rect from local_rect + local clip
164    RectWithEndpoint clip_rect = to_rect_with_endpoint(local_clip_rect);
165    RectWithEndpoint segment_rect = to_rect_with_endpoint(local_segment_rect);
166    segment_rect.p0 = clamp(segment_rect.p0, clip_rect.p0, clip_rect.p1);
167    segment_rect.p1 = clamp(segment_rect.p1, clip_rect.p0, clip_rect.p1);
168
169    // Calculate a clip rect from local_rect + local clip
170    RectWithEndpoint prim_rect = to_rect_with_endpoint(local_prim_rect);
171    prim_rect.p0 = clamp(prim_rect.p0, clip_rect.p0, clip_rect.p1);
172    prim_rect.p1 = clamp(prim_rect.p1, clip_rect.p0, clip_rect.p1);
173
174    // As this is a transform shader, extrude by 2 (local space) pixels
175    // in each direction. This gives enough space around the edge to
176    // apply distance anti-aliasing. Technically, it:
177    // (a) slightly over-estimates the number of required pixels in the simple case.
178    // (b) might not provide enough edge in edge case perspective projections.
179    // However, it's fast and simple. If / when we ever run into issues, we
180    // can do some math on the projection matrix to work out a variable
181    // amount to extrude.
182
183    // Only extrude along edges where we are going to apply AA.
184    float extrude_amount = 2.0;
185    vec4 extrude_distance = vec4(extrude_amount) * clip_edge_mask;
186    local_segment_rect.p0 -= extrude_distance.xy;
187    local_segment_rect.size += extrude_distance.xy + extrude_distance.zw;
188
189    // Select the corner of the local rect that we are processing.
190    vec2 local_pos = local_segment_rect.p0 + local_segment_rect.size * aPosition.xy;
191
192    // Convert the world positions to device pixel space.
193    vec2 task_offset = task.common_data.task_rect.p0 - task.content_origin;
194
195    // Transform the current vertex to the world cpace.
196    vec4 world_pos = transform.m * vec4(local_pos, 0.0, 1.0);
197    vec4 final_pos = vec4(
198        world_pos.xy * task.common_data.device_pixel_scale + task_offset * world_pos.w,
199        z * world_pos.w,
200        world_pos.w
201    );
202
203    gl_Position = uTransform * final_pos;
204
205    init_transform_vs(mix(
206        vec4(prim_rect.p0, prim_rect.p1),
207        vec4(segment_rect.p0, segment_rect.p1),
208        clip_edge_mask
209    ));
210
211    VertexInfo vi = VertexInfo(
212        local_pos,
213        vec2(0.0),
214        world_pos
215    );
216
217    return vi;
218}
219
220void write_clip(vec4 world_pos, vec2 snap_offset, ClipArea area) {
221    vec2 uv = world_pos.xy * area.common_data.device_pixel_scale +
222        world_pos.w * (snap_offset + area.common_data.task_rect.p0 - area.screen_origin);
223    vClipMaskUvBounds = vec4(
224        area.common_data.task_rect.p0,
225        area.common_data.task_rect.p0 + area.common_data.task_rect.size
226    );
227    vClipMaskUv = vec4(uv, area.common_data.texture_layer_index, world_pos.w);
228}
229
230// Read the exta image data containing the homogeneous screen space coordinates
231// of the corners, interpolate between them, and return real screen space UV.
232vec2 get_image_quad_uv(int address, vec2 f) {
233    ImageResourceExtra extra_data = fetch_image_resource_extra(address);
234    vec4 x = mix(extra_data.st_tl, extra_data.st_tr, f.x);
235    vec4 y = mix(extra_data.st_bl, extra_data.st_br, f.x);
236    vec4 z = mix(x, y, f.y);
237    return z.xy / z.w;
238}
239#endif //WR_VERTEX_SHADER
240
241#ifdef WR_FRAGMENT_SHADER
242
243float do_clip() {
244    // check for the dummy bounds, which are given to the opaque objects
245    if (vClipMaskUvBounds.xy == vClipMaskUvBounds.zw) {
246        return 1.0;
247    }
248    // anything outside of the mask is considered transparent
249    //Note: we assume gl_FragCoord.w == interpolated(1 / vClipMaskUv.w)
250    vec2 mask_uv = vClipMaskUv.xy * gl_FragCoord.w;
251    bvec2 left = lessThanEqual(vClipMaskUvBounds.xy, mask_uv); // inclusive
252    bvec2 right = greaterThan(vClipMaskUvBounds.zw, mask_uv); // non-inclusive
253    // bail out if the pixel is outside the valid bounds
254    if (!all(bvec4(left, right))) {
255        return 0.0;
256    }
257    // finally, the slow path - fetch the mask value from an image
258    // Note the Z getting rounded to the nearest integer because the variable
259    // is still interpolated and becomes a subject of precision-caused
260    // fluctuations, see https://bugzilla.mozilla.org/show_bug.cgi?id=1491911
261    ivec3 tc = ivec3(mask_uv, vClipMaskUv.z + 0.5);
262    return texelFetch(sPrevPassAlpha, tc, 0).r;
263}
264
265#ifdef WR_FEATURE_DITHERING
266vec4 dither(vec4 color) {
267    const int matrix_mask = 7;
268
269    ivec2 pos = ivec2(gl_FragCoord.xy) & ivec2(matrix_mask);
270    float noise_normalized = (texelFetch(sDither, pos, 0).r * 255.0 + 0.5) / 64.0;
271    float noise = (noise_normalized - 0.5) / 256.0; // scale down to the unit length
272
273    return color + vec4(noise, noise, noise, 0);
274}
275#else
276vec4 dither(vec4 color) {
277    return color;
278}
279#endif //WR_FEATURE_DITHERING
280
281vec4 sample_gradient(int address, float offset, float gradient_repeat) {
282    // Modulo the offset if the gradient repeats.
283    float x = mix(offset, fract(offset), gradient_repeat);
284
285    // Calculate the color entry index to use for this offset:
286    //     offsets < 0 use the first color entry, 0
287    //     offsets from [0, 1) use the color entries in the range of [1, N-1)
288    //     offsets >= 1 use the last color entry, N-1
289    //     so transform the range [0, 1) -> [1, N-1)
290
291    // TODO(gw): In the future we might consider making the size of the
292    // LUT vary based on number / distribution of stops in the gradient.
293    const int GRADIENT_ENTRIES = 128;
294    x = 1.0 + x * float(GRADIENT_ENTRIES);
295
296    // Calculate the texel to index into the gradient color entries:
297    //     floor(x) is the gradient color entry index
298    //     fract(x) is the linear filtering factor between start and end
299    int lut_offset = 2 * int(floor(x));     // There is a [start, end] color per entry.
300
301    // Ensure we don't fetch outside the valid range of the LUT.
302    lut_offset = clamp(lut_offset, 0, 2 * (GRADIENT_ENTRIES + 1));
303
304    // Fetch the start and end color.
305    vec4 texels[2] = fetch_from_gpu_cache_2(address + lut_offset);
306
307    // Finally interpolate and apply dithering
308    return dither(mix(texels[0], texels[1], fract(x)));
309}
310
311#endif //WR_FRAGMENT_SHADER
312