1 // Copyright 2018 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "components/viz/service/frame_sinks/video_capture/video_capture_overlay.h"
6
7 #include <algorithm>
8 #include <cmath>
9 #include <utility>
10
11 #include "base/bind.h"
12 #include "base/numerics/safe_conversions.h"
13 #include "base/trace_event/trace_event.h"
14 #include "media/base/limits.h"
15 #include "media/base/video_frame.h"
16 #include "third_party/skia/include/core/SkBitmap.h"
17 #include "third_party/skia/include/core/SkFilterQuality.h"
18 #include "third_party/skia/include/core/SkImageInfo.h"
19 #include "ui/gfx/geometry/point.h"
20 #include "ui/gfx/geometry/rect_conversions.h"
21
22 using media::VideoFrame;
23 using media::VideoPixelFormat;
24
25 namespace viz {
26
27 VideoCaptureOverlay::FrameSource::~FrameSource() = default;
28
VideoCaptureOverlay(FrameSource * frame_source,mojo::PendingReceiver<mojom::FrameSinkVideoCaptureOverlay> receiver)29 VideoCaptureOverlay::VideoCaptureOverlay(
30 FrameSource* frame_source,
31 mojo::PendingReceiver<mojom::FrameSinkVideoCaptureOverlay> receiver)
32 : frame_source_(frame_source), receiver_(this, std::move(receiver)) {
33 DCHECK(frame_source_);
34 receiver_.set_disconnect_handler(
35 base::BindOnce(&FrameSource::OnOverlayConnectionLost,
36 base::Unretained(frame_source_), this));
37 }
38
39 VideoCaptureOverlay::~VideoCaptureOverlay() = default;
40
SetImageAndBounds(const SkBitmap & image,const gfx::RectF & bounds)41 void VideoCaptureOverlay::SetImageAndBounds(const SkBitmap& image,
42 const gfx::RectF& bounds) {
43 const gfx::Rect old_rect = ComputeSourceMutationRect();
44
45 image_ = image;
46 bounds_ = bounds;
47
48 // Reset the cached sprite since the source image has been changed.
49 sprite_ = nullptr;
50
51 const gfx::Rect new_rect = ComputeSourceMutationRect();
52 if (!new_rect.IsEmpty() || !old_rect.IsEmpty()) {
53 frame_source_->InvalidateRect(old_rect);
54 frame_source_->InvalidateRect(new_rect);
55 frame_source_->RequestRefreshFrame();
56 }
57 }
58
SetBounds(const gfx::RectF & bounds)59 void VideoCaptureOverlay::SetBounds(const gfx::RectF& bounds) {
60 if (bounds_ != bounds) {
61 const gfx::Rect old_rect = ComputeSourceMutationRect();
62 bounds_ = bounds;
63 const gfx::Rect new_rect = ComputeSourceMutationRect();
64 if (!new_rect.IsEmpty() || !old_rect.IsEmpty()) {
65 frame_source_->InvalidateRect(old_rect);
66 frame_source_->InvalidateRect(new_rect);
67 frame_source_->RequestRefreshFrame();
68 }
69 }
70 }
71
72 namespace {
73
74 // Scales a |relative| rect having coordinates in the range [0.0,1.0) by the
75 // given |span|, snapping all coordinates to even numbers.
ToAbsoluteBoundsForI420(const gfx::RectF & relative,const gfx::Rect & span)76 gfx::Rect ToAbsoluteBoundsForI420(const gfx::RectF& relative,
77 const gfx::Rect& span) {
78 const float absolute_left = std::fma(relative.x(), span.width(), span.x());
79 const float absolute_top = std::fma(relative.y(), span.height(), span.y());
80 const float absolute_right =
81 std::fma(relative.right(), span.width(), span.x());
82 const float absolute_bottom =
83 std::fma(relative.bottom(), span.height(), span.y());
84
85 // Compute the largest I420-friendly Rect that is fully-enclosed by the
86 // absolute rect. Use saturated_cast<> to restrict all extreme results [and
87 // Inf and NaN] to a safe range of integers.
88 const int snapped_left =
89 base::saturated_cast<int16_t>(std::ceil(absolute_left / 2.0f)) * 2;
90 const int snapped_top =
91 base::saturated_cast<int16_t>(std::ceil(absolute_top / 2.0f)) * 2;
92 const int snapped_right =
93 base::saturated_cast<int16_t>(std::floor(absolute_right / 2.0f)) * 2;
94 const int snapped_bottom =
95 base::saturated_cast<int16_t>(std::floor(absolute_bottom / 2.0f)) * 2;
96 return gfx::Rect(snapped_left, snapped_top,
97 std::max(0, snapped_right - snapped_left),
98 std::max(0, snapped_bottom - snapped_top));
99 }
100
101 // Shrinks the given |rect| by the minimum amount necessary to align its corners
102 // to even-numbered coordinates. |rect| is assumed to have non-negative values
103 // for its coordinates.
MinimallyShrinkRectForI420(const gfx::Rect & rect)104 gfx::Rect MinimallyShrinkRectForI420(const gfx::Rect& rect) {
105 DCHECK(gfx::Rect(0, 0, media::limits::kMaxDimension,
106 media::limits::kMaxDimension)
107 .Contains(rect));
108 const int left = rect.x() + (rect.x() % 2);
109 const int top = rect.y() + (rect.y() % 2);
110 const int right = rect.right() - (rect.right() % 2);
111 const int bottom = rect.bottom() - (rect.bottom() % 2);
112 return gfx::Rect(left, top, std::max(0, right - left),
113 std::max(0, bottom - top));
114 }
115
116 } // namespace
117
MakeRenderer(const gfx::Rect & region_in_frame,const VideoPixelFormat frame_format)118 VideoCaptureOverlay::OnceRenderer VideoCaptureOverlay::MakeRenderer(
119 const gfx::Rect& region_in_frame,
120 const VideoPixelFormat frame_format) {
121 // If there's no image set yet, punt.
122 if (image_.drawsNothing()) {
123 return VideoCaptureOverlay::OnceRenderer();
124 }
125
126 // Determine the bounds of the sprite to be blitted onto the video frame. The
127 // calculations here align to the 2x2 pixel-quads, since dealing with
128 // fractions or partial I420 chroma plane alpha-blending would greatly
129 // complexify the blitting algorithm later on. This introduces a little
130 // inaccuracy in the size and position of the overlay in the final result, but
131 // should be an acceptable trade-off for all use cases.
132 const gfx::Rect bounds_in_frame =
133 ToAbsoluteBoundsForI420(bounds_, region_in_frame);
134 // If the sprite's size will be unreasonably large, punt.
135 if (bounds_in_frame.width() > media::limits::kMaxDimension ||
136 bounds_in_frame.height() > media::limits::kMaxDimension) {
137 return VideoCaptureOverlay::OnceRenderer();
138 }
139
140 // Compute the blit rect: the region of the frame to be modified by future
141 // Sprite::Blit() calls. First, |region_in_frame| must be shrunk to have
142 // even-valued coordinates to ensure the final blit rect is I420-friendly.
143 // Then, the shrunk |region_in_frame| is used to clip |bounds_in_frame|.
144 gfx::Rect blit_rect = MinimallyShrinkRectForI420(region_in_frame);
145 blit_rect.Intersect(bounds_in_frame);
146 // If the two rects didn't intersect at all (i.e., everything has been
147 // clipped), punt.
148 if (blit_rect.IsEmpty()) {
149 return VideoCaptureOverlay::OnceRenderer();
150 }
151
152 // If the cached sprite does not match the computed scaled size and/or pixel
153 // format, create a new instance for this (and future) renderers.
154 if (!sprite_ || sprite_->size() != bounds_in_frame.size() ||
155 sprite_->format() != frame_format) {
156 sprite_ = base::MakeRefCounted<Sprite>(image_, bounds_in_frame.size(),
157 frame_format);
158 }
159
160 return base::BindOnce(&Sprite::Blit, sprite_, bounds_in_frame.origin(),
161 blit_rect);
162 }
163
164 // static
MakeCombinedRenderer(const std::vector<VideoCaptureOverlay * > & overlays,const gfx::Rect & region_in_frame,const VideoPixelFormat frame_format)165 VideoCaptureOverlay::OnceRenderer VideoCaptureOverlay::MakeCombinedRenderer(
166 const std::vector<VideoCaptureOverlay*>& overlays,
167 const gfx::Rect& region_in_frame,
168 const VideoPixelFormat frame_format) {
169 if (overlays.empty()) {
170 return VideoCaptureOverlay::OnceRenderer();
171 }
172
173 std::vector<OnceRenderer> renderers;
174 for (VideoCaptureOverlay* overlay : overlays) {
175 renderers.emplace_back(
176 overlay->MakeRenderer(region_in_frame, frame_format));
177 if (renderers.back().is_null()) {
178 renderers.pop_back();
179 }
180 }
181
182 if (renderers.empty()) {
183 return VideoCaptureOverlay::OnceRenderer();
184 }
185
186 return base::BindOnce(
187 [](std::vector<OnceRenderer> renderers, VideoFrame* frame) {
188 for (OnceRenderer& renderer : renderers) {
189 std::move(renderer).Run(frame);
190 }
191 },
192 std::move(renderers));
193 }
194
ComputeSourceMutationRect() const195 gfx::Rect VideoCaptureOverlay::ComputeSourceMutationRect() const {
196 if (!image_.drawsNothing() && !bounds_.IsEmpty()) {
197 const gfx::Size& source_size = frame_source_->GetSourceSize();
198 gfx::Rect result = gfx::ToEnclosingRect(
199 gfx::ScaleRect(bounds_, source_size.width(), source_size.height()));
200 result.Intersect(gfx::Rect(source_size));
201 return result;
202 }
203 return gfx::Rect();
204 }
205
Sprite(const SkBitmap & image,const gfx::Size & size,const VideoPixelFormat format)206 VideoCaptureOverlay::Sprite::Sprite(const SkBitmap& image,
207 const gfx::Size& size,
208 const VideoPixelFormat format)
209 : image_(image), size_(size), format_(format) {
210 DCHECK(!image_.isNull());
211 }
212
~Sprite()213 VideoCaptureOverlay::Sprite::~Sprite() {
214 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
215 }
216
217 namespace {
218
219 // Returns the pointer to the element at the |offset| position, given a pointer
220 // to the element for (0,0) in a row-major image plane.
221 template <typename Pointer>
PositionPointerInPlane(Pointer plane_begin,int stride,const gfx::Point & offset)222 Pointer PositionPointerInPlane(Pointer plane_begin,
223 int stride,
224 const gfx::Point& offset) {
225 return plane_begin + (offset.y() * stride) + offset.x();
226 }
227
228 // Returns the pointer to the element at the |offset| position, given a pointer
229 // to the element for (0,0) in a row-major bitmap with 4 elements per pixel.
230 template <typename Pointer>
PositionPointerARGB(Pointer pixels_begin,int stride,const gfx::Point & offset)231 Pointer PositionPointerARGB(Pointer pixels_begin,
232 int stride,
233 const gfx::Point& offset) {
234 return pixels_begin + (offset.y() * stride) + (4 * offset.x());
235 }
236
237 // Transforms the lower 8 bits of |value| from the [0,255] range to the
238 // normalized floating-point [0.0,1.0] range.
From255(uint8_t value)239 float From255(uint8_t value) {
240 return value / 255.0f;
241 }
242
243 // Transforms the value from the normalized floating-point [0.0,1.0] range to an
244 // unsigned int in the [0,255] range, capping any out-of-range values.
ToClamped255(float value)245 uint32_t ToClamped255(float value) {
246 value = std::fma(value, 255.0f, 0.5f /* rounding */);
247 return base::saturated_cast<uint8_t>(value);
248 }
249
250 } // namespace
251
Blit(const gfx::Point & position,const gfx::Rect & blit_rect,VideoFrame * frame)252 void VideoCaptureOverlay::Sprite::Blit(const gfx::Point& position,
253 const gfx::Rect& blit_rect,
254 VideoFrame* frame) {
255 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
256 DCHECK(frame);
257 DCHECK_EQ(format_, frame->format());
258 DCHECK(frame->visible_rect().Contains(blit_rect));
259 DCHECK(frame->ColorSpace().IsValid());
260
261 TRACE_EVENT2("gpu.capture", "VideoCaptureOverlay::Sprite::Blit", "x",
262 position.x(), "y", position.y());
263
264 if (!transformed_image_ || color_space_ != frame->ColorSpace()) {
265 color_space_ = frame->ColorSpace();
266 TransformImage();
267 }
268
269 // Compute the left-most and top-most pixel to source from the transformed
270 // image. This is usually (0,0) unless only part of the sprite is being
271 // blitted (i.e., cropped at the edge(s) of the video frame).
272 gfx::Point src_origin = blit_rect.origin() - position.OffsetFromOrigin();
273 DCHECK(gfx::Rect(size_).Contains(gfx::Rect(src_origin, blit_rect.size())));
274
275 // Blit the sprite (src) onto the video frame (dest). One of two algorithms is
276 // used, depending on the video frame's format, as the blending calculations
277 // and data layout/format are different.
278 switch (frame->format()) {
279 case media::PIXEL_FORMAT_I420: {
280 // Core assumption: All coordinates are aligned to even-numbered
281 // coordinates.
282 DCHECK_EQ(src_origin.x() % 2, 0);
283 DCHECK_EQ(src_origin.y() % 2, 0);
284 DCHECK_EQ(blit_rect.x() % 2, 0);
285 DCHECK_EQ(blit_rect.y() % 2, 0);
286 DCHECK_EQ(blit_rect.width() % 2, 0);
287 DCHECK_EQ(blit_rect.height() % 2, 0);
288
289 // Helper function to execute a "SrcOver" blit from |src| to |dst|, and
290 // store the results back in |dst|.
291 const auto BlitOntoPlane = [](const gfx::Size& blit_size, int src_stride,
292 const float* src, const float* under_weight,
293 int dst_stride, uint8_t* dst) {
294 for (int row = 0; row < blit_size.height(); ++row, src += src_stride,
295 under_weight += src_stride, dst += dst_stride) {
296 for (int col = 0; col < blit_size.width(); ++col) {
297 dst[col] = ToClamped255(
298 std::fma(From255(dst[col]), under_weight[col], src[col]));
299 }
300 }
301 };
302
303 // Blit the Y plane: |src| points to the pre-multiplied luma values, while
304 // |under_weight| points to the "one minus src alpha" values. Both have
305 // the same stride, |src_stride|.
306 int src_stride = size_.width();
307 const float* under_weight = PositionPointerInPlane(
308 transformed_image_.get(), src_stride, src_origin);
309 const int num_pixels = size_.GetArea();
310 const float* src = under_weight + num_pixels;
311 // Likewise, start |dst| at the upper-left-most pixel within the video
312 // frame's Y plane that will be SrcOver'ed.
313 int dst_stride = frame->stride(VideoFrame::kYPlane);
314 uint8_t* dst =
315 PositionPointerInPlane(frame->visible_data(VideoFrame::kYPlane),
316 dst_stride, blit_rect.origin());
317 BlitOntoPlane(blit_rect.size(), src_stride, src, under_weight, dst_stride,
318 dst);
319
320 // Blit the U and V planes similarly to the Y plane, but reduce all
321 // coordinates by 2x2.
322 src_stride = size_.width() / 2;
323 src_origin = gfx::Point(src_origin.x() / 2, src_origin.y() / 2);
324 under_weight = PositionPointerInPlane(
325 transformed_image_.get() + 2 * num_pixels, src_stride, src_origin);
326 const int num_chroma_pixels = size_.GetArea() / 4;
327 src = under_weight + num_chroma_pixels;
328 dst_stride = frame->stride(VideoFrame::kUPlane);
329 const gfx::Rect chroma_blit_rect(blit_rect.x() / 2, blit_rect.y() / 2,
330 blit_rect.width() / 2,
331 blit_rect.height() / 2);
332 dst = PositionPointerInPlane(frame->visible_data(VideoFrame::kUPlane),
333 dst_stride, chroma_blit_rect.origin());
334 BlitOntoPlane(chroma_blit_rect.size(), src_stride, src, under_weight,
335 dst_stride, dst);
336 src += num_chroma_pixels;
337 dst_stride = frame->stride(VideoFrame::kVPlane);
338 dst = PositionPointerInPlane(frame->visible_data(VideoFrame::kVPlane),
339 dst_stride, chroma_blit_rect.origin());
340 BlitOntoPlane(chroma_blit_rect.size(), src_stride, src, under_weight,
341 dst_stride, dst);
342
343 break;
344 }
345
346 case media::PIXEL_FORMAT_ARGB: {
347 // Start |src| at the upper-left-most pixel within |transformed_image_|
348 // that will be blitted.
349 const int src_stride = size_.width() * 4;
350 const float* src =
351 PositionPointerARGB(transformed_image_.get(), src_stride, src_origin);
352
353 // Likewise, start |dst| at the upper-left-most pixel within the video
354 // frame that will be SrcOver'ed.
355 const int dst_stride = frame->stride(VideoFrame::kARGBPlane);
356 DCHECK_EQ(dst_stride % sizeof(uint32_t), 0u);
357 uint8_t* dst =
358 PositionPointerARGB(frame->visible_data(VideoFrame::kARGBPlane),
359 dst_stride, blit_rect.origin());
360 DCHECK_EQ((dst - frame->visible_data(VideoFrame::kARGBPlane)) %
361 sizeof(uint32_t),
362 0u);
363
364 // Blend each sprite pixel over the corresponding pixel in the video
365 // frame, and store the result back in the video frame. Note that the
366 // video frame format does NOT have color values pre-multiplied by the
367 // alpha.
368 for (int row = 0; row < blit_rect.height();
369 ++row, src += src_stride, dst += dst_stride) {
370 uint32_t* dst_pixel = reinterpret_cast<uint32_t*>(dst);
371 for (int col = 0; col < blit_rect.width(); ++col) {
372 const int src_idx = 4 * col;
373 const float src_alpha = src[src_idx];
374 const float dst_weight =
375 From255(dst_pixel[col] >> 24) * (1.0f - src_alpha);
376 const float out_alpha = src_alpha + dst_weight;
377 float out_red = std::fma(From255(dst_pixel[col] >> 16), dst_weight,
378 src[src_idx + 1]);
379 float out_green = std::fma(From255(dst_pixel[col] >> 8), dst_weight,
380 src[src_idx + 2]);
381 float out_blue = std::fma(From255(dst_pixel[col] >> 0), dst_weight,
382 src[src_idx + 3]);
383 if (out_alpha != 0.0f) {
384 out_red /= out_alpha;
385 out_green /= out_alpha;
386 out_blue /= out_alpha;
387 }
388 dst_pixel[col] =
389 ((ToClamped255(out_alpha) << 24) | (ToClamped255(out_red) << 16) |
390 (ToClamped255(out_green) << 8) | (ToClamped255(out_blue) << 0));
391 }
392 }
393
394 break;
395 }
396
397 default:
398 NOTREACHED();
399 break;
400 }
401 }
402
TransformImage()403 void VideoCaptureOverlay::Sprite::TransformImage() {
404 TRACE_EVENT2("gpu.capture", "VideoCaptureOverlay::Sprite::TransformImage",
405 "width", size_.width(), "height", size_.height());
406
407 // Scale the source |image_| to match the format and size required. For the
408 // purposes of color space conversion, the alpha must not be pre-multiplied.
409 const SkImageInfo scaled_image_format =
410 SkImageInfo::Make(size_.width(), size_.height(), kN32_SkColorType,
411 kUnpremul_SkAlphaType, image_.refColorSpace());
412 SkBitmap scaled_image;
413 if (image_.info() == scaled_image_format) {
414 scaled_image = image_;
415 } else {
416 if (scaled_image.tryAllocPixels(scaled_image_format) &&
417 image_.pixmap().scalePixels(scaled_image.pixmap(),
418 kMedium_SkFilterQuality)) {
419 // Cache the scaled image, to avoid needing to re-scale in future calls to
420 // this method.
421 image_ = scaled_image;
422 } else {
423 // If the allocation, format conversion and/or scaling failed, just reset
424 // the |scaled_image|. This will be checked below.
425 scaled_image.reset();
426 }
427 }
428
429 // Populate |colors| and |alphas| from the |scaled_image|. If the image
430 // scaling operation failed, this sprite should draw nothing, and so fully
431 // transparent pixels will be generated instead.
432 const int num_pixels = size_.GetArea();
433 std::unique_ptr<float[]> alphas(new float[num_pixels]);
434 std::unique_ptr<gfx::ColorTransform::TriStim[]> colors(
435 new gfx::ColorTransform::TriStim[num_pixels]);
436 if (scaled_image.drawsNothing()) {
437 std::fill(alphas.get(), alphas.get() + num_pixels, 0.0f);
438 std::fill(colors.get(), colors.get() + num_pixels,
439 gfx::ColorTransform::TriStim());
440 } else {
441 int pos = 0;
442 for (int y = 0; y < size_.height(); ++y) {
443 const uint32_t* src = scaled_image.getAddr32(0, y);
444 for (int x = 0; x < size_.width(); ++x) {
445 const uint32_t pixel = src[x];
446 alphas[pos] = ((pixel >> SK_A32_SHIFT) & 0xff) / 255.0f;
447 colors[pos].SetPoint(((pixel >> SK_R32_SHIFT) & 0xff) / 255.0f,
448 ((pixel >> SK_G32_SHIFT) & 0xff) / 255.0f,
449 ((pixel >> SK_B32_SHIFT) & 0xff) / 255.0f);
450 ++pos;
451 }
452 }
453 }
454
455 // Transform the colors, if needed. This may perform RGB→YUV conversion.
456 gfx::ColorSpace image_color_space;
457 if (scaled_image.colorSpace()) {
458 image_color_space = gfx::ColorSpace(*scaled_image.colorSpace());
459 }
460 if (!image_color_space.IsValid()) {
461 // Assume a default linear color space, if no color space was provided.
462 image_color_space = gfx::ColorSpace(
463 gfx::ColorSpace::PrimaryID::BT709, gfx::ColorSpace::TransferID::LINEAR,
464 gfx::ColorSpace::MatrixID::RGB, gfx::ColorSpace::RangeID::FULL);
465 }
466 if (image_color_space != color_space_) {
467 const auto color_transform = gfx::ColorTransform::NewColorTransform(
468 image_color_space, color_space_,
469 gfx::ColorTransform::Intent::INTENT_ABSOLUTE);
470 color_transform->Transform(colors.get(), num_pixels);
471 }
472
473 switch (format_) {
474 case media::PIXEL_FORMAT_I420: {
475 // Produce 5 planes of data: The "one minus alpha" plane, the Y plane, the
476 // subsampled "one minus alpha" plane, the U plane, and the V plane.
477 // Pre-multiply the colors by the alpha to prevent extra work in multiple
478 // later Blit() calls.
479 DCHECK_EQ(size_.width() % 2, 0);
480 DCHECK_EQ(size_.height() % 2, 0);
481 const int num_chroma_pixels = size_.GetArea() / 4;
482 transformed_image_.reset(
483 new float[num_pixels * 2 + num_chroma_pixels * 3]);
484
485 // Copy the alpha values, and pre-multiply the luma values by the alpha.
486 float* out_1_minus_alpha = transformed_image_.get();
487 float* out_luma = out_1_minus_alpha + num_pixels;
488 for (int i = 0; i < num_pixels; ++i) {
489 const float alpha = alphas[i];
490 out_1_minus_alpha[i] = 1.0f - alpha;
491 out_luma[i] = colors[i].x() * alpha;
492 }
493
494 // Downscale the alpha, U, and V planes by 2x2, and pre-multiply the
495 // chroma values by the alpha.
496 float* out_uv_1_minus_alpha = out_luma + num_pixels;
497 float* out_u = out_uv_1_minus_alpha + num_chroma_pixels;
498 float* out_v = out_u + num_chroma_pixels;
499 const float* alpha_row0 = alphas.get();
500 const float* const alpha_row_end = alpha_row0 + num_pixels;
501 const gfx::ColorTransform::TriStim* color_row0 = colors.get();
502 while (alpha_row0 < alpha_row_end) {
503 const float* alpha_row1 = alpha_row0 + size_.width();
504 const gfx::ColorTransform::TriStim* color_row1 =
505 color_row0 + size_.width();
506 for (int col = 0; col < size_.width(); col += 2) {
507 // First, the downscaled alpha is the average of the four original
508 // alpha values:
509 //
510 // sum_of_alphas = a[r,c] + a[r,c+1] + a[r+1,c] + a[r+1,c+1];
511 // average_alpha = sum_of_alphas / 4
512 *(out_uv_1_minus_alpha++) =
513 std::fma(alpha_row0[col] + alpha_row0[col + 1] + alpha_row1[col] +
514 alpha_row1[col + 1],
515 -1.0f / 4.0f, 1.0f);
516 // Then, the downscaled chroma values are the weighted average of the
517 // four original chroma values (weighed by alpha):
518 //
519 // weighted_sum_of_chromas =
520 // c[r,c]*a[r,c] + c[r,c+1]*a[r,c+1] +
521 // c[r+1,c]*a[r+1,c] + c[r+1,c+1]*a[r+1,c+1]
522 // sum_of_weights = sum_of_alphas;
523 // average_chroma = weighted_sum_of_chromas / sum_of_weights
524 //
525 // But then, because the chroma is to be pre-multiplied by the alpha,
526 // the calculations simplify, as follows:
527 //
528 // premul_chroma = average_chroma * average_alpha
529 // = (weighted_sum_of_chromas / sum_of_alphas) *
530 // (sum_of_alphas / 4)
531 // = weighted_sum_of_chromas / 4
532 //
533 // This also automatically solves a special case, when sum_of_alphas
534 // is zero: With the simplified calculations, there is no longer a
535 // "divide-by-zero guard" needed; and the result in this case will be
536 // a zero chroma, which is perfectly acceptable behavior.
537 *(out_u++) = ((color_row0[col].y() * alpha_row0[col]) +
538 (color_row0[col + 1].y() * alpha_row0[col + 1]) +
539 (color_row1[col].y() * alpha_row1[col]) +
540 (color_row1[col + 1].y() * alpha_row1[col + 1])) /
541 4.0f;
542 *(out_v++) = ((color_row0[col].z() * alpha_row0[col]) +
543 (color_row0[col + 1].z() * alpha_row0[col + 1]) +
544 (color_row1[col].z() * alpha_row1[col]) +
545 (color_row1[col + 1].z() * alpha_row1[col + 1])) /
546 4.0f;
547 }
548 alpha_row0 = alpha_row1 + size_.width();
549 color_row0 = color_row1 + size_.width();
550 }
551
552 break;
553 }
554
555 case media::PIXEL_FORMAT_ARGB: {
556 // Produce ARGB pixels from |colors| and |alphas|. Pre-multiply the colors
557 // by the alpha to prevent extra work in multiple later Blit() calls.
558 transformed_image_.reset(new float[num_pixels * 4]);
559 float* out = transformed_image_.get();
560 for (int i = 0; i < num_pixels; ++i) {
561 const float alpha = alphas[i];
562 *(out++) = alpha;
563 *(out++) = colors[i].x() * alpha;
564 *(out++) = colors[i].y() * alpha;
565 *(out++) = colors[i].z() * alpha;
566 }
567 break;
568 }
569
570 default:
571 NOTREACHED();
572 break;
573 }
574 }
575
576 } // namespace viz
577