1 /*
2 * Copyright 2006 The Android Open Source Project
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "include/core/SkMaskFilter.h"
9 #include "include/core/SkRRect.h"
10 #include "include/core/SkStrokeRec.h"
11 #include "include/core/SkVertices.h"
12 #include "src/core/SkBlurMask.h"
13 #include "src/core/SkBlurPriv.h"
14 #include "src/core/SkGpuBlurUtils.h"
15 #include "src/core/SkMaskFilterBase.h"
16 #include "src/core/SkRRectPriv.h"
17 #include "src/core/SkReadBuffer.h"
18 #include "src/core/SkStringUtils.h"
19 #include "src/core/SkWriteBuffer.h"
20
21 #if SK_SUPPORT_GPU
22 #include "include/private/GrRecordingContext.h"
23 #include "src/gpu/GrClip.h"
24 #include "src/gpu/GrFragmentProcessor.h"
25 #include "src/gpu/GrRecordingContextPriv.h"
26 #include "src/gpu/GrRenderTargetContext.h"
27 #include "src/gpu/GrResourceProvider.h"
28 #include "src/gpu/GrShaderCaps.h"
29 #include "src/gpu/GrStyle.h"
30 #include "src/gpu/GrTextureProxy.h"
31 #include "src/gpu/effects/GrTextureDomain.h"
32 #include "src/gpu/effects/generated/GrCircleBlurFragmentProcessor.h"
33 #include "src/gpu/effects/generated/GrRRectBlurEffect.h"
34 #include "src/gpu/effects/generated/GrRectBlurEffect.h"
35 #include "src/gpu/effects/generated/GrSimpleTextureEffect.h"
36 #include "src/gpu/geometry/GrShape.h"
37 #include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
38 #include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
39 #include "src/gpu/glsl/GrGLSLProgramDataManager.h"
40 #include "src/gpu/glsl/GrGLSLUniformHandler.h"
41 #endif
42
43 class SkBlurMaskFilterImpl : public SkMaskFilterBase {
44 public:
45 SkBlurMaskFilterImpl(SkScalar sigma, SkBlurStyle, bool respectCTM);
46
47 // overrides from SkMaskFilter
48 SkMask::Format getFormat() const override;
49 bool filterMask(SkMask* dst, const SkMask& src, const SkMatrix&,
50 SkIPoint* margin) const override;
51
52 #if SK_SUPPORT_GPU
53 bool canFilterMaskGPU(const GrShape& shape,
54 const SkIRect& devSpaceShapeBounds,
55 const SkIRect& clipBounds,
56 const SkMatrix& ctm,
57 SkIRect* maskRect) const override;
58 bool directFilterMaskGPU(GrRecordingContext*,
59 GrRenderTargetContext* renderTargetContext,
60 GrPaint&&,
61 const GrClip&,
62 const SkMatrix& viewMatrix,
63 const GrShape& shape) const override;
64 sk_sp<GrTextureProxy> filterMaskGPU(GrRecordingContext*,
65 sk_sp<GrTextureProxy> srcProxy,
66 GrColorType srcColorType,
67 SkAlphaType srcAlphaType,
68 const SkMatrix& ctm,
69 const SkIRect& maskRect) const override;
70 #endif
71
72 void computeFastBounds(const SkRect&, SkRect*) const override;
73 bool asABlur(BlurRec*) const override;
74
75
76 protected:
77 FilterReturn filterRectsToNine(const SkRect[], int count, const SkMatrix&,
78 const SkIRect& clipBounds,
79 NinePatch*) const override;
80
81 FilterReturn filterRRectToNine(const SkRRect&, const SkMatrix&,
82 const SkIRect& clipBounds,
83 NinePatch*) const override;
84
85 bool filterRectMask(SkMask* dstM, const SkRect& r, const SkMatrix& matrix,
86 SkIPoint* margin, SkMask::CreateMode createMode) const;
87 bool filterRRectMask(SkMask* dstM, const SkRRect& r, const SkMatrix& matrix,
88 SkIPoint* margin, SkMask::CreateMode createMode) const;
89
ignoreXform() const90 bool ignoreXform() const { return !fRespectCTM; }
91
92 private:
93 SK_FLATTENABLE_HOOKS(SkBlurMaskFilterImpl)
94 // To avoid unseemly allocation requests (esp. for finite platforms like
95 // handset) we limit the radius so something manageable. (as opposed to
96 // a request like 10,000)
97 static const SkScalar kMAX_BLUR_SIGMA;
98
99 SkScalar fSigma;
100 SkBlurStyle fBlurStyle;
101 bool fRespectCTM;
102
103 SkBlurMaskFilterImpl(SkReadBuffer&);
104 void flatten(SkWriteBuffer&) const override;
105
computeXformedSigma(const SkMatrix & ctm) const106 SkScalar computeXformedSigma(const SkMatrix& ctm) const {
107 SkScalar xformedSigma = this->ignoreXform() ? fSigma : ctm.mapRadius(fSigma);
108 return SkMinScalar(xformedSigma, kMAX_BLUR_SIGMA);
109 }
110
111 friend class SkBlurMaskFilter;
112
113 typedef SkMaskFilter INHERITED;
114 friend void sk_register_blur_maskfilter_createproc();
115 };
116
117 const SkScalar SkBlurMaskFilterImpl::kMAX_BLUR_SIGMA = SkIntToScalar(128);
118
119 // linearly interpolate between y1 & y3 to match x2's position between x1 & x3
interp(SkScalar x1,SkScalar x2,SkScalar x3,SkScalar y1,SkScalar y3)120 static SkScalar interp(SkScalar x1, SkScalar x2, SkScalar x3, SkScalar y1, SkScalar y3) {
121 SkASSERT(x1 <= x2 && x2 <= x3);
122 SkASSERT(y1 <= y3);
123
124 SkScalar t = (x2 - x1) / (x3 - x1);
125 return y1 + t * (y3 - y1);
126 }
127
128 // Insert 'lower' and 'higher' into 'array1' and insert a new value at each matching insertion
129 // point in 'array2' that linearly interpolates between the existing values.
130 // Return a bit mask which contains a copy of 'inputMask' for all the cells between the two
131 // insertion points.
insert_into_arrays(SkScalar * array1,SkScalar * array2,SkScalar lower,SkScalar higher,int * num,uint32_t inputMask,int maskSize)132 static uint32_t insert_into_arrays(SkScalar* array1, SkScalar* array2,
133 SkScalar lower, SkScalar higher,
134 int* num, uint32_t inputMask, int maskSize) {
135 SkASSERT(lower < higher);
136 SkASSERT(lower >= array1[0] && higher <= array1[*num-1]);
137
138 int32_t skipMask = 0x0;
139 int i;
140 for (i = 0; i < *num; ++i) {
141 if (lower >= array1[i] && lower < array1[i+1]) {
142 if (!SkScalarNearlyEqual(lower, array1[i])) {
143 memmove(&array1[i+2], &array1[i+1], (*num-i-1)*sizeof(SkScalar));
144 array1[i+1] = lower;
145 memmove(&array2[i+2], &array2[i+1], (*num-i-1)*sizeof(SkScalar));
146 array2[i+1] = interp(array1[i], lower, array1[i+2], array2[i], array2[i+2]);
147 i++;
148 (*num)++;
149 }
150 break;
151 }
152 }
153 for ( ; i < *num; ++i) {
154 skipMask |= inputMask << (i*maskSize);
155 if (higher > array1[i] && higher <= array1[i+1]) {
156 if (!SkScalarNearlyEqual(higher, array1[i+1])) {
157 memmove(&array1[i+2], &array1[i+1], (*num-i-1)*sizeof(SkScalar));
158 array1[i+1] = higher;
159 memmove(&array2[i+2], &array2[i+1], (*num-i-1)*sizeof(SkScalar));
160 array2[i+1] = interp(array1[i], higher, array1[i+2], array2[i], array2[i+2]);
161 (*num)++;
162 }
163 break;
164 }
165 }
166
167 return skipMask;
168 }
169
SkComputeBlurredRRectParams(const SkRRect & srcRRect,const SkRRect & devRRect,const SkRect & occluder,SkScalar sigma,SkScalar xformedSigma,SkRRect * rrectToDraw,SkISize * widthHeight,SkScalar rectXs[kSkBlurRRectMaxDivisions],SkScalar rectYs[kSkBlurRRectMaxDivisions],SkScalar texXs[kSkBlurRRectMaxDivisions],SkScalar texYs[kSkBlurRRectMaxDivisions],int * numXs,int * numYs,uint32_t * skipMask)170 bool SkComputeBlurredRRectParams(const SkRRect& srcRRect, const SkRRect& devRRect,
171 const SkRect& occluder,
172 SkScalar sigma, SkScalar xformedSigma,
173 SkRRect* rrectToDraw,
174 SkISize* widthHeight,
175 SkScalar rectXs[kSkBlurRRectMaxDivisions],
176 SkScalar rectYs[kSkBlurRRectMaxDivisions],
177 SkScalar texXs[kSkBlurRRectMaxDivisions],
178 SkScalar texYs[kSkBlurRRectMaxDivisions],
179 int* numXs, int* numYs, uint32_t* skipMask) {
180 unsigned int devBlurRadius = 3*SkScalarCeilToInt(xformedSigma-1/6.0f);
181 SkScalar srcBlurRadius = 3.0f * sigma;
182
183 const SkRect& devOrig = devRRect.getBounds();
184 const SkVector& devRadiiUL = devRRect.radii(SkRRect::kUpperLeft_Corner);
185 const SkVector& devRadiiUR = devRRect.radii(SkRRect::kUpperRight_Corner);
186 const SkVector& devRadiiLR = devRRect.radii(SkRRect::kLowerRight_Corner);
187 const SkVector& devRadiiLL = devRRect.radii(SkRRect::kLowerLeft_Corner);
188
189 const int devLeft = SkScalarCeilToInt(SkTMax<SkScalar>(devRadiiUL.fX, devRadiiLL.fX));
190 const int devTop = SkScalarCeilToInt(SkTMax<SkScalar>(devRadiiUL.fY, devRadiiUR.fY));
191 const int devRight = SkScalarCeilToInt(SkTMax<SkScalar>(devRadiiUR.fX, devRadiiLR.fX));
192 const int devBot = SkScalarCeilToInt(SkTMax<SkScalar>(devRadiiLL.fY, devRadiiLR.fY));
193
194 // This is a conservative check for nine-patchability
195 if (devOrig.fLeft + devLeft + devBlurRadius >= devOrig.fRight - devRight - devBlurRadius ||
196 devOrig.fTop + devTop + devBlurRadius >= devOrig.fBottom - devBot - devBlurRadius) {
197 return false;
198 }
199
200 const SkVector& srcRadiiUL = srcRRect.radii(SkRRect::kUpperLeft_Corner);
201 const SkVector& srcRadiiUR = srcRRect.radii(SkRRect::kUpperRight_Corner);
202 const SkVector& srcRadiiLR = srcRRect.radii(SkRRect::kLowerRight_Corner);
203 const SkVector& srcRadiiLL = srcRRect.radii(SkRRect::kLowerLeft_Corner);
204
205 const SkScalar srcLeft = SkTMax<SkScalar>(srcRadiiUL.fX, srcRadiiLL.fX);
206 const SkScalar srcTop = SkTMax<SkScalar>(srcRadiiUL.fY, srcRadiiUR.fY);
207 const SkScalar srcRight = SkTMax<SkScalar>(srcRadiiUR.fX, srcRadiiLR.fX);
208 const SkScalar srcBot = SkTMax<SkScalar>(srcRadiiLL.fY, srcRadiiLR.fY);
209
210 int newRRWidth = 2*devBlurRadius + devLeft + devRight + 1;
211 int newRRHeight = 2*devBlurRadius + devTop + devBot + 1;
212 widthHeight->fWidth = newRRWidth + 2 * devBlurRadius;
213 widthHeight->fHeight = newRRHeight + 2 * devBlurRadius;
214
215 const SkRect srcProxyRect = srcRRect.getBounds().makeOutset(srcBlurRadius, srcBlurRadius);
216
217 rectXs[0] = srcProxyRect.fLeft;
218 rectXs[1] = srcProxyRect.fLeft + 2*srcBlurRadius + srcLeft;
219 rectXs[2] = srcProxyRect.fRight - 2*srcBlurRadius - srcRight;
220 rectXs[3] = srcProxyRect.fRight;
221
222 rectYs[0] = srcProxyRect.fTop;
223 rectYs[1] = srcProxyRect.fTop + 2*srcBlurRadius + srcTop;
224 rectYs[2] = srcProxyRect.fBottom - 2*srcBlurRadius - srcBot;
225 rectYs[3] = srcProxyRect.fBottom;
226
227 texXs[0] = 0.0f;
228 texXs[1] = 2.0f*devBlurRadius + devLeft;
229 texXs[2] = 2.0f*devBlurRadius + devLeft + 1;
230 texXs[3] = SkIntToScalar(widthHeight->fWidth);
231
232 texYs[0] = 0.0f;
233 texYs[1] = 2.0f*devBlurRadius + devTop;
234 texYs[2] = 2.0f*devBlurRadius + devTop + 1;
235 texYs[3] = SkIntToScalar(widthHeight->fHeight);
236
237 SkRect temp = occluder;
238
239 *numXs = 4;
240 *numYs = 4;
241 *skipMask = 0;
242 if (!temp.isEmpty() && (srcProxyRect.contains(temp) || temp.intersect(srcProxyRect))) {
243 *skipMask = insert_into_arrays(rectXs, texXs, temp.fLeft, temp.fRight, numXs, 0x1, 1);
244 *skipMask = insert_into_arrays(rectYs, texYs, temp.fTop, temp.fBottom,
245 numYs, *skipMask, *numXs-1);
246 }
247
248 const SkRect newRect = SkRect::MakeXYWH(SkIntToScalar(devBlurRadius),
249 SkIntToScalar(devBlurRadius),
250 SkIntToScalar(newRRWidth),
251 SkIntToScalar(newRRHeight));
252 SkVector newRadii[4];
253 newRadii[0] = { SkScalarCeilToScalar(devRadiiUL.fX), SkScalarCeilToScalar(devRadiiUL.fY) };
254 newRadii[1] = { SkScalarCeilToScalar(devRadiiUR.fX), SkScalarCeilToScalar(devRadiiUR.fY) };
255 newRadii[2] = { SkScalarCeilToScalar(devRadiiLR.fX), SkScalarCeilToScalar(devRadiiLR.fY) };
256 newRadii[3] = { SkScalarCeilToScalar(devRadiiLL.fX), SkScalarCeilToScalar(devRadiiLL.fY) };
257
258 rrectToDraw->setRectRadii(newRect, newRadii);
259 return true;
260 }
261
262 ///////////////////////////////////////////////////////////////////////////////
263
SkBlurMaskFilterImpl(SkScalar sigma,SkBlurStyle style,bool respectCTM)264 SkBlurMaskFilterImpl::SkBlurMaskFilterImpl(SkScalar sigma, SkBlurStyle style, bool respectCTM)
265 : fSigma(sigma)
266 , fBlurStyle(style)
267 , fRespectCTM(respectCTM) {
268 SkASSERT(fSigma > 0);
269 SkASSERT((unsigned)style <= kLastEnum_SkBlurStyle);
270 }
271
getFormat() const272 SkMask::Format SkBlurMaskFilterImpl::getFormat() const {
273 return SkMask::kA8_Format;
274 }
275
asABlur(BlurRec * rec) const276 bool SkBlurMaskFilterImpl::asABlur(BlurRec* rec) const {
277 if (this->ignoreXform()) {
278 return false;
279 }
280
281 if (rec) {
282 rec->fSigma = fSigma;
283 rec->fStyle = fBlurStyle;
284 }
285 return true;
286 }
287
filterMask(SkMask * dst,const SkMask & src,const SkMatrix & matrix,SkIPoint * margin) const288 bool SkBlurMaskFilterImpl::filterMask(SkMask* dst, const SkMask& src,
289 const SkMatrix& matrix,
290 SkIPoint* margin) const {
291 SkScalar sigma = this->computeXformedSigma(matrix);
292 return SkBlurMask::BoxBlur(dst, src, sigma, fBlurStyle, margin);
293 }
294
filterRectMask(SkMask * dst,const SkRect & r,const SkMatrix & matrix,SkIPoint * margin,SkMask::CreateMode createMode) const295 bool SkBlurMaskFilterImpl::filterRectMask(SkMask* dst, const SkRect& r,
296 const SkMatrix& matrix,
297 SkIPoint* margin, SkMask::CreateMode createMode) const {
298 SkScalar sigma = computeXformedSigma(matrix);
299
300 return SkBlurMask::BlurRect(sigma, dst, r, fBlurStyle, margin, createMode);
301 }
302
filterRRectMask(SkMask * dst,const SkRRect & r,const SkMatrix & matrix,SkIPoint * margin,SkMask::CreateMode createMode) const303 bool SkBlurMaskFilterImpl::filterRRectMask(SkMask* dst, const SkRRect& r,
304 const SkMatrix& matrix,
305 SkIPoint* margin, SkMask::CreateMode createMode) const {
306 SkScalar sigma = computeXformedSigma(matrix);
307
308 return SkBlurMask::BlurRRect(sigma, dst, r, fBlurStyle, margin, createMode);
309 }
310
311 #include "include/core/SkCanvas.h"
312
prepare_to_draw_into_mask(const SkRect & bounds,SkMask * mask)313 static bool prepare_to_draw_into_mask(const SkRect& bounds, SkMask* mask) {
314 SkASSERT(mask != nullptr);
315
316 mask->fBounds = bounds.roundOut();
317 mask->fRowBytes = SkAlign4(mask->fBounds.width());
318 mask->fFormat = SkMask::kA8_Format;
319 const size_t size = mask->computeImageSize();
320 mask->fImage = SkMask::AllocImage(size, SkMask::kZeroInit_Alloc);
321 if (nullptr == mask->fImage) {
322 return false;
323 }
324 return true;
325 }
326
draw_rrect_into_mask(const SkRRect rrect,SkMask * mask)327 static bool draw_rrect_into_mask(const SkRRect rrect, SkMask* mask) {
328 if (!prepare_to_draw_into_mask(rrect.rect(), mask)) {
329 return false;
330 }
331
332 // FIXME: This code duplicates code in draw_rects_into_mask, below. Is there a
333 // clean way to share more code?
334 SkBitmap bitmap;
335 bitmap.installMaskPixels(*mask);
336
337 SkCanvas canvas(bitmap);
338 canvas.translate(-SkIntToScalar(mask->fBounds.left()),
339 -SkIntToScalar(mask->fBounds.top()));
340
341 SkPaint paint;
342 paint.setAntiAlias(true);
343 canvas.drawRRect(rrect, paint);
344 return true;
345 }
346
draw_rects_into_mask(const SkRect rects[],int count,SkMask * mask)347 static bool draw_rects_into_mask(const SkRect rects[], int count, SkMask* mask) {
348 if (!prepare_to_draw_into_mask(rects[0], mask)) {
349 return false;
350 }
351
352 SkBitmap bitmap;
353 bitmap.installPixels(SkImageInfo::Make(mask->fBounds.width(),
354 mask->fBounds.height(),
355 kAlpha_8_SkColorType,
356 kPremul_SkAlphaType),
357 mask->fImage, mask->fRowBytes);
358
359 SkCanvas canvas(bitmap);
360 canvas.translate(-SkIntToScalar(mask->fBounds.left()),
361 -SkIntToScalar(mask->fBounds.top()));
362
363 SkPaint paint;
364 paint.setAntiAlias(true);
365
366 if (1 == count) {
367 canvas.drawRect(rects[0], paint);
368 } else {
369 // todo: do I need a fast way to do this?
370 SkPath path;
371 path.addRect(rects[0]);
372 path.addRect(rects[1]);
373 path.setFillType(SkPath::kEvenOdd_FillType);
374 canvas.drawPath(path, paint);
375 }
376 return true;
377 }
378
rect_exceeds(const SkRect & r,SkScalar v)379 static bool rect_exceeds(const SkRect& r, SkScalar v) {
380 return r.fLeft < -v || r.fTop < -v || r.fRight > v || r.fBottom > v ||
381 r.width() > v || r.height() > v;
382 }
383
384 #include "src/core/SkMaskCache.h"
385
copy_mask_to_cacheddata(SkMask * mask)386 static SkCachedData* copy_mask_to_cacheddata(SkMask* mask) {
387 const size_t size = mask->computeTotalImageSize();
388 SkCachedData* data = SkResourceCache::NewCachedData(size);
389 if (data) {
390 memcpy(data->writable_data(), mask->fImage, size);
391 SkMask::FreeImage(mask->fImage);
392 mask->fImage = (uint8_t*)data->data();
393 }
394 return data;
395 }
396
find_cached_rrect(SkMask * mask,SkScalar sigma,SkBlurStyle style,const SkRRect & rrect)397 static SkCachedData* find_cached_rrect(SkMask* mask, SkScalar sigma, SkBlurStyle style,
398 const SkRRect& rrect) {
399 return SkMaskCache::FindAndRef(sigma, style, rrect, mask);
400 }
401
add_cached_rrect(SkMask * mask,SkScalar sigma,SkBlurStyle style,const SkRRect & rrect)402 static SkCachedData* add_cached_rrect(SkMask* mask, SkScalar sigma, SkBlurStyle style,
403 const SkRRect& rrect) {
404 SkCachedData* cache = copy_mask_to_cacheddata(mask);
405 if (cache) {
406 SkMaskCache::Add(sigma, style, rrect, *mask, cache);
407 }
408 return cache;
409 }
410
find_cached_rects(SkMask * mask,SkScalar sigma,SkBlurStyle style,const SkRect rects[],int count)411 static SkCachedData* find_cached_rects(SkMask* mask, SkScalar sigma, SkBlurStyle style,
412 const SkRect rects[], int count) {
413 return SkMaskCache::FindAndRef(sigma, style, rects, count, mask);
414 }
415
add_cached_rects(SkMask * mask,SkScalar sigma,SkBlurStyle style,const SkRect rects[],int count)416 static SkCachedData* add_cached_rects(SkMask* mask, SkScalar sigma, SkBlurStyle style,
417 const SkRect rects[], int count) {
418 SkCachedData* cache = copy_mask_to_cacheddata(mask);
419 if (cache) {
420 SkMaskCache::Add(sigma, style, rects, count, *mask, cache);
421 }
422 return cache;
423 }
424
425 static const bool c_analyticBlurRRect{true};
426
427 SkMaskFilterBase::FilterReturn
filterRRectToNine(const SkRRect & rrect,const SkMatrix & matrix,const SkIRect & clipBounds,NinePatch * patch) const428 SkBlurMaskFilterImpl::filterRRectToNine(const SkRRect& rrect, const SkMatrix& matrix,
429 const SkIRect& clipBounds,
430 NinePatch* patch) const {
431 SkASSERT(patch != nullptr);
432 switch (rrect.getType()) {
433 case SkRRect::kEmpty_Type:
434 // Nothing to draw.
435 return kFalse_FilterReturn;
436
437 case SkRRect::kRect_Type:
438 // We should have caught this earlier.
439 SkASSERT(false);
440 // Fall through.
441 case SkRRect::kOval_Type:
442 // The nine patch special case does not handle ovals, and we
443 // already have code for rectangles.
444 return kUnimplemented_FilterReturn;
445
446 // These three can take advantage of this fast path.
447 case SkRRect::kSimple_Type:
448 case SkRRect::kNinePatch_Type:
449 case SkRRect::kComplex_Type:
450 break;
451 }
452
453 // TODO: report correct metrics for innerstyle, where we do not grow the
454 // total bounds, but we do need an inset the size of our blur-radius
455 if (kInner_SkBlurStyle == fBlurStyle) {
456 return kUnimplemented_FilterReturn;
457 }
458
459 // TODO: take clipBounds into account to limit our coordinates up front
460 // for now, just skip too-large src rects (to take the old code path).
461 if (rect_exceeds(rrect.rect(), SkIntToScalar(32767))) {
462 return kUnimplemented_FilterReturn;
463 }
464
465 SkIPoint margin;
466 SkMask srcM, dstM;
467 srcM.fBounds = rrect.rect().roundOut();
468 srcM.fFormat = SkMask::kA8_Format;
469 srcM.fRowBytes = 0;
470
471 bool filterResult = false;
472 if (c_analyticBlurRRect) {
473 // special case for fast round rect blur
474 // don't actually do the blur the first time, just compute the correct size
475 filterResult = this->filterRRectMask(&dstM, rrect, matrix, &margin,
476 SkMask::kJustComputeBounds_CreateMode);
477 }
478
479 if (!filterResult) {
480 filterResult = this->filterMask(&dstM, srcM, matrix, &margin);
481 }
482
483 if (!filterResult) {
484 return kFalse_FilterReturn;
485 }
486
487 // Now figure out the appropriate width and height of the smaller round rectangle
488 // to stretch. It will take into account the larger radius per side as well as double
489 // the margin, to account for inner and outer blur.
490 const SkVector& UL = rrect.radii(SkRRect::kUpperLeft_Corner);
491 const SkVector& UR = rrect.radii(SkRRect::kUpperRight_Corner);
492 const SkVector& LR = rrect.radii(SkRRect::kLowerRight_Corner);
493 const SkVector& LL = rrect.radii(SkRRect::kLowerLeft_Corner);
494
495 const SkScalar leftUnstretched = SkTMax(UL.fX, LL.fX) + SkIntToScalar(2 * margin.fX);
496 const SkScalar rightUnstretched = SkTMax(UR.fX, LR.fX) + SkIntToScalar(2 * margin.fX);
497
498 // Extra space in the middle to ensure an unchanging piece for stretching. Use 3 to cover
499 // any fractional space on either side plus 1 for the part to stretch.
500 const SkScalar stretchSize = SkIntToScalar(3);
501
502 const SkScalar totalSmallWidth = leftUnstretched + rightUnstretched + stretchSize;
503 if (totalSmallWidth >= rrect.rect().width()) {
504 // There is no valid piece to stretch.
505 return kUnimplemented_FilterReturn;
506 }
507
508 const SkScalar topUnstretched = SkTMax(UL.fY, UR.fY) + SkIntToScalar(2 * margin.fY);
509 const SkScalar bottomUnstretched = SkTMax(LL.fY, LR.fY) + SkIntToScalar(2 * margin.fY);
510
511 const SkScalar totalSmallHeight = topUnstretched + bottomUnstretched + stretchSize;
512 if (totalSmallHeight >= rrect.rect().height()) {
513 // There is no valid piece to stretch.
514 return kUnimplemented_FilterReturn;
515 }
516
517 SkRect smallR = SkRect::MakeWH(totalSmallWidth, totalSmallHeight);
518
519 SkRRect smallRR;
520 SkVector radii[4];
521 radii[SkRRect::kUpperLeft_Corner] = UL;
522 radii[SkRRect::kUpperRight_Corner] = UR;
523 radii[SkRRect::kLowerRight_Corner] = LR;
524 radii[SkRRect::kLowerLeft_Corner] = LL;
525 smallRR.setRectRadii(smallR, radii);
526
527 const SkScalar sigma = this->computeXformedSigma(matrix);
528 SkCachedData* cache = find_cached_rrect(&patch->fMask, sigma, fBlurStyle, smallRR);
529 if (!cache) {
530 bool analyticBlurWorked = false;
531 if (c_analyticBlurRRect) {
532 analyticBlurWorked =
533 this->filterRRectMask(&patch->fMask, smallRR, matrix, &margin,
534 SkMask::kComputeBoundsAndRenderImage_CreateMode);
535 }
536
537 if (!analyticBlurWorked) {
538 if (!draw_rrect_into_mask(smallRR, &srcM)) {
539 return kFalse_FilterReturn;
540 }
541
542 SkAutoMaskFreeImage amf(srcM.fImage);
543
544 if (!this->filterMask(&patch->fMask, srcM, matrix, &margin)) {
545 return kFalse_FilterReturn;
546 }
547 }
548 cache = add_cached_rrect(&patch->fMask, sigma, fBlurStyle, smallRR);
549 }
550
551 patch->fMask.fBounds.offsetTo(0, 0);
552 patch->fOuterRect = dstM.fBounds;
553 patch->fCenter.fX = SkScalarCeilToInt(leftUnstretched) + 1;
554 patch->fCenter.fY = SkScalarCeilToInt(topUnstretched) + 1;
555 SkASSERT(nullptr == patch->fCache);
556 patch->fCache = cache; // transfer ownership to patch
557 return kTrue_FilterReturn;
558 }
559
560 // Use the faster analytic blur approach for ninepatch rects
561 static const bool c_analyticBlurNinepatch{true};
562
563 SkMaskFilterBase::FilterReturn
filterRectsToNine(const SkRect rects[],int count,const SkMatrix & matrix,const SkIRect & clipBounds,NinePatch * patch) const564 SkBlurMaskFilterImpl::filterRectsToNine(const SkRect rects[], int count,
565 const SkMatrix& matrix,
566 const SkIRect& clipBounds,
567 NinePatch* patch) const {
568 if (count < 1 || count > 2) {
569 return kUnimplemented_FilterReturn;
570 }
571
572 // TODO: report correct metrics for innerstyle, where we do not grow the
573 // total bounds, but we do need an inset the size of our blur-radius
574 if (kInner_SkBlurStyle == fBlurStyle || kOuter_SkBlurStyle == fBlurStyle) {
575 return kUnimplemented_FilterReturn;
576 }
577
578 // TODO: take clipBounds into account to limit our coordinates up front
579 // for now, just skip too-large src rects (to take the old code path).
580 if (rect_exceeds(rects[0], SkIntToScalar(32767))) {
581 return kUnimplemented_FilterReturn;
582 }
583
584 SkIPoint margin;
585 SkMask srcM, dstM;
586 srcM.fBounds = rects[0].roundOut();
587 srcM.fFormat = SkMask::kA8_Format;
588 srcM.fRowBytes = 0;
589
590 bool filterResult = false;
591 if (count == 1 && c_analyticBlurNinepatch) {
592 // special case for fast rect blur
593 // don't actually do the blur the first time, just compute the correct size
594 filterResult = this->filterRectMask(&dstM, rects[0], matrix, &margin,
595 SkMask::kJustComputeBounds_CreateMode);
596 } else {
597 filterResult = this->filterMask(&dstM, srcM, matrix, &margin);
598 }
599
600 if (!filterResult) {
601 return kFalse_FilterReturn;
602 }
603
604 /*
605 * smallR is the smallest version of 'rect' that will still guarantee that
606 * we get the same blur results on all edges, plus 1 center row/col that is
607 * representative of the extendible/stretchable edges of the ninepatch.
608 * Since our actual edge may be fractional we inset 1 more to be sure we
609 * don't miss any interior blur.
610 * x is an added pixel of blur, and { and } are the (fractional) edge
611 * pixels from the original rect.
612 *
613 * x x { x x .... x x } x x
614 *
615 * Thus, in this case, we inset by a total of 5 (on each side) beginning
616 * with our outer-rect (dstM.fBounds)
617 */
618 SkRect smallR[2];
619 SkIPoint center;
620
621 // +2 is from +1 for each edge (to account for possible fractional edges
622 int smallW = dstM.fBounds.width() - srcM.fBounds.width() + 2;
623 int smallH = dstM.fBounds.height() - srcM.fBounds.height() + 2;
624 SkIRect innerIR;
625
626 if (1 == count) {
627 innerIR = srcM.fBounds;
628 center.set(smallW, smallH);
629 } else {
630 SkASSERT(2 == count);
631 rects[1].roundIn(&innerIR);
632 center.set(smallW + (innerIR.left() - srcM.fBounds.left()),
633 smallH + (innerIR.top() - srcM.fBounds.top()));
634 }
635
636 // +1 so we get a clean, stretchable, center row/col
637 smallW += 1;
638 smallH += 1;
639
640 // we want the inset amounts to be integral, so we don't change any
641 // fractional phase on the fRight or fBottom of our smallR.
642 const SkScalar dx = SkIntToScalar(innerIR.width() - smallW);
643 const SkScalar dy = SkIntToScalar(innerIR.height() - smallH);
644 if (dx < 0 || dy < 0) {
645 // we're too small, relative to our blur, to break into nine-patch,
646 // so we ask to have our normal filterMask() be called.
647 return kUnimplemented_FilterReturn;
648 }
649
650 smallR[0].setLTRB(rects[0].left(), rects[0].top(),
651 rects[0].right() - dx, rects[0].bottom() - dy);
652 if (smallR[0].width() < 2 || smallR[0].height() < 2) {
653 return kUnimplemented_FilterReturn;
654 }
655 if (2 == count) {
656 smallR[1].setLTRB(rects[1].left(), rects[1].top(),
657 rects[1].right() - dx, rects[1].bottom() - dy);
658 SkASSERT(!smallR[1].isEmpty());
659 }
660
661 const SkScalar sigma = this->computeXformedSigma(matrix);
662 SkCachedData* cache = find_cached_rects(&patch->fMask, sigma, fBlurStyle, smallR, count);
663 if (!cache) {
664 if (count > 1 || !c_analyticBlurNinepatch) {
665 if (!draw_rects_into_mask(smallR, count, &srcM)) {
666 return kFalse_FilterReturn;
667 }
668
669 SkAutoMaskFreeImage amf(srcM.fImage);
670
671 if (!this->filterMask(&patch->fMask, srcM, matrix, &margin)) {
672 return kFalse_FilterReturn;
673 }
674 } else {
675 if (!this->filterRectMask(&patch->fMask, smallR[0], matrix, &margin,
676 SkMask::kComputeBoundsAndRenderImage_CreateMode)) {
677 return kFalse_FilterReturn;
678 }
679 }
680 cache = add_cached_rects(&patch->fMask, sigma, fBlurStyle, smallR, count);
681 }
682 patch->fMask.fBounds.offsetTo(0, 0);
683 patch->fOuterRect = dstM.fBounds;
684 patch->fCenter = center;
685 SkASSERT(nullptr == patch->fCache);
686 patch->fCache = cache; // transfer ownership to patch
687 return kTrue_FilterReturn;
688 }
689
computeFastBounds(const SkRect & src,SkRect * dst) const690 void SkBlurMaskFilterImpl::computeFastBounds(const SkRect& src,
691 SkRect* dst) const {
692 SkScalar pad = 3.0f * fSigma;
693
694 dst->setLTRB(src.fLeft - pad, src.fTop - pad,
695 src.fRight + pad, src.fBottom + pad);
696 }
697
CreateProc(SkReadBuffer & buffer)698 sk_sp<SkFlattenable> SkBlurMaskFilterImpl::CreateProc(SkReadBuffer& buffer) {
699 const SkScalar sigma = buffer.readScalar();
700 SkBlurStyle style = buffer.read32LE(kLastEnum_SkBlurStyle);
701
702 uint32_t flags = buffer.read32LE(0x3); // historically we only recorded 2 bits
703 bool respectCTM = !(flags & 1); // historically we stored ignoreCTM in low bit
704
705 if (buffer.isVersionLT(SkPicturePriv::kRemoveOccluderFromBlurMaskFilter)) {
706 SkRect unused;
707 buffer.readRect(&unused);
708 }
709
710 return SkMaskFilter::MakeBlur((SkBlurStyle)style, sigma, respectCTM);
711 }
712
flatten(SkWriteBuffer & buffer) const713 void SkBlurMaskFilterImpl::flatten(SkWriteBuffer& buffer) const {
714 buffer.writeScalar(fSigma);
715 buffer.writeUInt(fBlurStyle);
716 buffer.writeUInt(!fRespectCTM); // historically we recorded ignoreCTM
717 }
718
719
720 #if SK_SUPPORT_GPU
721
directFilterMaskGPU(GrRecordingContext * context,GrRenderTargetContext * renderTargetContext,GrPaint && paint,const GrClip & clip,const SkMatrix & viewMatrix,const GrShape & shape) const722 bool SkBlurMaskFilterImpl::directFilterMaskGPU(GrRecordingContext* context,
723 GrRenderTargetContext* renderTargetContext,
724 GrPaint&& paint,
725 const GrClip& clip,
726 const SkMatrix& viewMatrix,
727 const GrShape& shape) const {
728 SkASSERT(renderTargetContext);
729
730 if (fBlurStyle != kNormal_SkBlurStyle) {
731 return false;
732 }
733
734 if (!viewMatrix.isScaleTranslate()) {
735 return false;
736 }
737
738 // TODO: we could handle blurred stroked circles
739 if (!shape.style().isSimpleFill()) {
740 return false;
741 }
742
743 SkScalar xformedSigma = this->computeXformedSigma(viewMatrix);
744 if (xformedSigma <= 0) {
745 return false;
746 }
747
748 SkRRect srcRRect;
749 bool inverted;
750 if (!shape.asRRect(&srcRRect, nullptr, nullptr, &inverted) || inverted) {
751 return false;
752 }
753
754 SkRRect devRRect;
755 if (!srcRRect.transform(viewMatrix, &devRRect)) {
756 return false;
757 }
758
759 if (!SkRRectPriv::AllCornersCircular(devRRect)) {
760 return false;
761 }
762
763 GrProxyProvider* proxyProvider = context->priv().proxyProvider();
764 std::unique_ptr<GrFragmentProcessor> fp;
765
766 if (devRRect.isRect() || SkRRectPriv::IsCircle(devRRect)) {
767 if (devRRect.isRect()) {
768 fp = GrRectBlurEffect::Make(proxyProvider, *context->priv().caps()->shaderCaps(),
769 devRRect.rect(), xformedSigma);
770 } else {
771 fp = GrCircleBlurFragmentProcessor::Make(proxyProvider, devRRect.rect(), xformedSigma);
772 }
773
774 if (!fp) {
775 return false;
776 }
777 paint.addCoverageFragmentProcessor(std::move(fp));
778
779 SkRect srcProxyRect = srcRRect.rect();
780 SkScalar outsetX = 3.0f*fSigma;
781 SkScalar outsetY = 3.0f*fSigma;
782 if (this->ignoreXform()) {
783 // When we're ignoring the CTM the padding added to the source rect also needs to ignore
784 // the CTM. The matrix passed in here is guaranteed to be just scale and translate so we
785 // can just grab the X and Y scales off the matrix and pre-undo the scale.
786 outsetX /= SkScalarAbs(viewMatrix.getScaleX());
787 outsetY /= SkScalarAbs(viewMatrix.getScaleY());
788 }
789 srcProxyRect.outset(outsetX, outsetY);
790
791 renderTargetContext->drawRect(clip, std::move(paint), GrAA::kNo, viewMatrix, srcProxyRect);
792 return true;
793 }
794
795 fp = GrRRectBlurEffect::Make(context, fSigma, xformedSigma, srcRRect, devRRect);
796 if (!fp) {
797 return false;
798 }
799
800 if (!this->ignoreXform()) {
801 SkRect srcProxyRect = srcRRect.rect();
802 srcProxyRect.outset(3.0f*fSigma, 3.0f*fSigma);
803
804 SkVertices::Builder builder(SkVertices::kTriangles_VertexMode, 4, 6, 0);
805 srcProxyRect.toQuad(builder.positions());
806
807 static const uint16_t fullIndices[6] = { 0, 1, 2, 0, 2, 3 };
808 memcpy(builder.indices(), fullIndices, sizeof(fullIndices));
809 sk_sp<SkVertices> vertices = builder.detach();
810
811 paint.addCoverageFragmentProcessor(std::move(fp));
812 renderTargetContext->drawVertices(clip, std::move(paint), viewMatrix, std::move(vertices),
813 nullptr, 0);
814 } else {
815 SkMatrix inverse;
816 if (!viewMatrix.invert(&inverse)) {
817 return false;
818 }
819
820 float extra=3.f*SkScalarCeilToScalar(xformedSigma-1/6.0f);
821 SkRect proxyRect = devRRect.rect();
822 proxyRect.outset(extra, extra);
823
824 paint.addCoverageFragmentProcessor(std::move(fp));
825 renderTargetContext->fillRectWithLocalMatrix(clip, std::move(paint), GrAA::kNo,
826 SkMatrix::I(), proxyRect, inverse);
827 }
828
829 return true;
830 }
831
canFilterMaskGPU(const GrShape & shape,const SkIRect & devSpaceShapeBounds,const SkIRect & clipBounds,const SkMatrix & ctm,SkIRect * maskRect) const832 bool SkBlurMaskFilterImpl::canFilterMaskGPU(const GrShape& shape,
833 const SkIRect& devSpaceShapeBounds,
834 const SkIRect& clipBounds,
835 const SkMatrix& ctm,
836 SkIRect* maskRect) const {
837 SkScalar xformedSigma = this->computeXformedSigma(ctm);
838 if (xformedSigma <= 0) {
839 maskRect->setEmpty();
840 return false;
841 }
842
843 if (maskRect) {
844 float sigma3 = 3 * SkScalarToFloat(xformedSigma);
845
846 // Outset srcRect and clipRect by 3 * sigma, to compute affected blur area.
847 SkIRect clipRect = clipBounds.makeOutset(sigma3, sigma3);
848 SkIRect srcRect = devSpaceShapeBounds.makeOutset(sigma3, sigma3);
849
850 if (!srcRect.intersect(clipRect)) {
851 srcRect.setEmpty();
852 }
853 *maskRect = srcRect;
854 }
855
856 // We prefer to blur paths with small blur radii on the CPU.
857 if (ctm.rectStaysRect()) {
858 static const SkScalar kMIN_GPU_BLUR_SIZE = SkIntToScalar(64);
859 static const SkScalar kMIN_GPU_BLUR_SIGMA = SkIntToScalar(32);
860
861 if (devSpaceShapeBounds.width() <= kMIN_GPU_BLUR_SIZE &&
862 devSpaceShapeBounds.height() <= kMIN_GPU_BLUR_SIZE &&
863 xformedSigma <= kMIN_GPU_BLUR_SIGMA) {
864 return false;
865 }
866 }
867
868 return true;
869 }
870
filterMaskGPU(GrRecordingContext * context,sk_sp<GrTextureProxy> srcProxy,GrColorType srcColorType,SkAlphaType srcAlphaType,const SkMatrix & ctm,const SkIRect & maskRect) const871 sk_sp<GrTextureProxy> SkBlurMaskFilterImpl::filterMaskGPU(GrRecordingContext* context,
872 sk_sp<GrTextureProxy> srcProxy,
873 GrColorType srcColorType,
874 SkAlphaType srcAlphaType,
875 const SkMatrix& ctm,
876 const SkIRect& maskRect) const {
877 // 'maskRect' isn't snapped to the UL corner but the mask in 'src' is.
878 const SkIRect clipRect = SkIRect::MakeWH(maskRect.width(), maskRect.height());
879
880 SkScalar xformedSigma = this->computeXformedSigma(ctm);
881 SkASSERT(xformedSigma > 0);
882
883 // If we're doing a normal blur, we can clobber the pathTexture in the
884 // gaussianBlur. Otherwise, we need to save it for later compositing.
885 bool isNormalBlur = (kNormal_SkBlurStyle == fBlurStyle);
886 auto renderTargetContext = SkGpuBlurUtils::GaussianBlur(context,
887 srcProxy,
888 srcColorType,
889 srcAlphaType,
890 SkIPoint::Make(0, 0),
891 nullptr,
892 clipRect,
893 SkIRect::EmptyIRect(),
894 xformedSigma,
895 xformedSigma,
896 GrTextureDomain::kIgnore_Mode);
897 if (!renderTargetContext) {
898 return nullptr;
899 }
900
901 if (!isNormalBlur) {
902 GrPaint paint;
903 // Blend pathTexture over blurTexture.
904 paint.addCoverageFragmentProcessor(GrSimpleTextureEffect::Make(std::move(srcProxy),
905 srcColorType,
906 SkMatrix::I()));
907 if (kInner_SkBlurStyle == fBlurStyle) {
908 // inner: dst = dst * src
909 paint.setCoverageSetOpXPFactory(SkRegion::kIntersect_Op);
910 } else if (kSolid_SkBlurStyle == fBlurStyle) {
911 // solid: dst = src + dst - src * dst
912 // = src + (1 - src) * dst
913 paint.setCoverageSetOpXPFactory(SkRegion::kUnion_Op);
914 } else if (kOuter_SkBlurStyle == fBlurStyle) {
915 // outer: dst = dst * (1 - src)
916 // = 0 * src + (1 - src) * dst
917 paint.setCoverageSetOpXPFactory(SkRegion::kDifference_Op);
918 } else {
919 paint.setCoverageSetOpXPFactory(SkRegion::kReplace_Op);
920 }
921
922 renderTargetContext->drawRect(GrNoClip(), std::move(paint), GrAA::kNo, SkMatrix::I(),
923 SkRect::Make(clipRect));
924 }
925
926 return renderTargetContext->asTextureProxyRef();
927 }
928
929 #endif // SK_SUPPORT_GPU
930
sk_register_blur_maskfilter_createproc()931 void sk_register_blur_maskfilter_createproc() { SK_REGISTER_FLATTENABLE(SkBlurMaskFilterImpl); }
932
MakeBlur(SkBlurStyle style,SkScalar sigma,bool respectCTM)933 sk_sp<SkMaskFilter> SkMaskFilter::MakeBlur(SkBlurStyle style, SkScalar sigma, bool respectCTM) {
934 if (SkScalarIsFinite(sigma) && sigma > 0) {
935 return sk_sp<SkMaskFilter>(new SkBlurMaskFilterImpl(sigma, style, respectCTM));
936 }
937 return nullptr;
938 }
939