1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "SkArenaAlloc.h"
9 #include "SkBitmapController.h"
10 #include "SkBitmapProcShader.h"
11 #include "SkBitmapProvider.h"
12 #include "SkEmptyShader.h"
13 #include "SkImage_Base.h"
14 #include "SkImageShader.h"
15 #include "SkPM4fPriv.h"
16 #include "SkReadBuffer.h"
17 #include "SkWriteBuffer.h"
18 #include "../jumper/SkJumper.h"
19
20 /**
21 * We are faster in clamp, so always use that tiling when we can.
22 */
optimize(SkShader::TileMode tm,int dimension)23 static SkShader::TileMode optimize(SkShader::TileMode tm, int dimension) {
24 SkASSERT(dimension > 0);
25 #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
26 // need to update frameworks/base/libs/hwui/tests/unit/SkiaBehaviorTests.cpp:55 to allow
27 // for transforming to clamp.
28 return tm;
29 #else
30 return dimension == 1 ? SkShader::kClamp_TileMode : tm;
31 #endif
32 }
33
SkImageShader(sk_sp<SkImage> img,TileMode tmx,TileMode tmy,const SkMatrix * localMatrix,bool clampAsIfUnpremul)34 SkImageShader::SkImageShader(sk_sp<SkImage> img,
35 TileMode tmx, TileMode tmy,
36 const SkMatrix* localMatrix,
37 bool clampAsIfUnpremul)
38 : INHERITED(localMatrix)
39 , fImage(std::move(img))
40 , fTileModeX(optimize(tmx, fImage->width()))
41 , fTileModeY(optimize(tmy, fImage->height()))
42 , fClampAsIfUnpremul(clampAsIfUnpremul)
43 {}
44
45 // fClampAsIfUnpremul is always false when constructed through public APIs,
46 // so there's no need to read or write it here.
47
CreateProc(SkReadBuffer & buffer)48 sk_sp<SkFlattenable> SkImageShader::CreateProc(SkReadBuffer& buffer) {
49 const TileMode tx = (TileMode)buffer.readUInt();
50 const TileMode ty = (TileMode)buffer.readUInt();
51 SkMatrix localMatrix;
52 buffer.readMatrix(&localMatrix);
53 sk_sp<SkImage> img = buffer.readImage();
54 if (!img) {
55 return nullptr;
56 }
57 return SkImageShader::Make(std::move(img), tx, ty, &localMatrix);
58 }
59
flatten(SkWriteBuffer & buffer) const60 void SkImageShader::flatten(SkWriteBuffer& buffer) const {
61 buffer.writeUInt(fTileModeX);
62 buffer.writeUInt(fTileModeY);
63 buffer.writeMatrix(this->getLocalMatrix());
64 buffer.writeImage(fImage.get());
65 SkASSERT(fClampAsIfUnpremul == false);
66 }
67
isOpaque() const68 bool SkImageShader::isOpaque() const {
69 return fImage->isOpaque() && fTileModeX != kDecal_TileMode && fTileModeY != kDecal_TileMode;
70 }
71
legacy_shader_can_handle(const SkMatrix & a,const SkMatrix & b)72 static bool legacy_shader_can_handle(const SkMatrix& a, const SkMatrix& b) {
73 SkMatrix m = SkMatrix::Concat(a, b);
74 if (!m.isScaleTranslate()) {
75 return false;
76 }
77
78 SkMatrix inv;
79 if (!m.invert(&inv)) {
80 return false;
81 }
82
83 // legacy code uses SkFixed 32.32, so ensure the inverse doesn't map device coordinates
84 // out of range.
85 const SkScalar max_dev_coord = 32767.0f;
86 SkRect src;
87 SkAssertResult(inv.mapRect(&src, SkRect::MakeWH(max_dev_coord, max_dev_coord)));
88
89 // take 1/2 of max signed 32bits so we have room to subtract coordinates
90 const SkScalar max_fixed32dot32 = SK_MaxS32 * 0.5f;
91 if (!SkRect::MakeLTRB(-max_fixed32dot32, -max_fixed32dot32,
92 max_fixed32dot32, max_fixed32dot32).contains(src)) {
93 return false;
94 }
95
96 // legacy shader impl should be able to handle these matrices
97 return true;
98 }
99
IsRasterPipelineOnly(const SkMatrix & ctm,SkColorType ct,SkAlphaType at,SkShader::TileMode tx,SkShader::TileMode ty,const SkMatrix & localM)100 bool SkImageShader::IsRasterPipelineOnly(const SkMatrix& ctm, SkColorType ct, SkAlphaType at,
101 SkShader::TileMode tx, SkShader::TileMode ty,
102 const SkMatrix& localM) {
103 if (ct != kN32_SkColorType) {
104 return true;
105 }
106 if (at == kUnpremul_SkAlphaType) {
107 return true;
108 }
109 #ifndef SK_SUPPORT_LEGACY_TILED_BITMAPS
110 if (tx != ty) {
111 return true;
112 }
113 #endif
114 if (tx == kDecal_TileMode || ty == kDecal_TileMode) {
115 return true;
116 }
117 if (!legacy_shader_can_handle(ctm, localM)) {
118 return true;
119 }
120 return false;
121 }
122
onIsRasterPipelineOnly(const SkMatrix & ctm) const123 bool SkImageShader::onIsRasterPipelineOnly(const SkMatrix& ctm) const {
124 SkBitmapProvider provider(fImage.get(), nullptr);
125 return IsRasterPipelineOnly(ctm, provider.info().colorType(), provider.info().alphaType(),
126 fTileModeX, fTileModeY, this->getLocalMatrix());
127 }
128
onMakeContext(const ContextRec & rec,SkArenaAlloc * alloc) const129 SkShaderBase::Context* SkImageShader::onMakeContext(const ContextRec& rec,
130 SkArenaAlloc* alloc) const {
131 return SkBitmapProcLegacyShader::MakeContext(*this, fTileModeX, fTileModeY,
132 SkBitmapProvider(fImage.get(), rec.fDstColorSpace),
133 rec, alloc);
134 }
135
onIsAImage(SkMatrix * texM,TileMode xy[]) const136 SkImage* SkImageShader::onIsAImage(SkMatrix* texM, TileMode xy[]) const {
137 if (texM) {
138 *texM = this->getLocalMatrix();
139 }
140 if (xy) {
141 xy[0] = (TileMode)fTileModeX;
142 xy[1] = (TileMode)fTileModeY;
143 }
144 return const_cast<SkImage*>(fImage.get());
145 }
146
147 #ifdef SK_SUPPORT_LEGACY_SHADER_ISABITMAP
onIsABitmap(SkBitmap * texture,SkMatrix * texM,TileMode xy[]) const148 bool SkImageShader::onIsABitmap(SkBitmap* texture, SkMatrix* texM, TileMode xy[]) const {
149 const SkBitmap* bm = as_IB(fImage)->onPeekBitmap();
150 if (!bm) {
151 return false;
152 }
153
154 if (texture) {
155 *texture = *bm;
156 }
157 if (texM) {
158 *texM = this->getLocalMatrix();
159 }
160 if (xy) {
161 xy[0] = (TileMode)fTileModeX;
162 xy[1] = (TileMode)fTileModeY;
163 }
164 return true;
165 }
166 #endif
167
bitmap_is_too_big(int w,int h)168 static bool bitmap_is_too_big(int w, int h) {
169 // SkBitmapProcShader stores bitmap coordinates in a 16bit buffer, as it
170 // communicates between its matrix-proc and its sampler-proc. Until we can
171 // widen that, we have to reject bitmaps that are larger.
172 //
173 static const int kMaxSize = 65535;
174
175 return w > kMaxSize || h > kMaxSize;
176 }
177
Make(sk_sp<SkImage> image,TileMode tx,TileMode ty,const SkMatrix * localMatrix,bool clampAsIfUnpremul)178 sk_sp<SkShader> SkImageShader::Make(sk_sp<SkImage> image,
179 TileMode tx, TileMode ty,
180 const SkMatrix* localMatrix,
181 bool clampAsIfUnpremul) {
182 if (!image || bitmap_is_too_big(image->width(), image->height())) {
183 return sk_make_sp<SkEmptyShader>();
184 }
185 return sk_sp<SkShader>{ new SkImageShader(image, tx,ty, localMatrix, clampAsIfUnpremul) };
186 }
187
188 #ifndef SK_IGNORE_TO_STRING
toString(SkString * str) const189 void SkImageShader::toString(SkString* str) const {
190 const char* gTileModeName[SkShader::kTileModeCount] = {
191 "clamp", "repeat", "mirror"
192 };
193
194 str->appendf("ImageShader: ((%s %s) ", gTileModeName[fTileModeX], gTileModeName[fTileModeY]);
195 fImage->toString(str);
196 this->INHERITED::toString(str);
197 str->append(")");
198 }
199 #endif
200
201 ///////////////////////////////////////////////////////////////////////////////////////////////////
202
203 #if SK_SUPPORT_GPU
204
205 #include "GrColorSpaceInfo.h"
206 #include "GrContext.h"
207 #include "GrContextPriv.h"
208 #include "SkGr.h"
209 #include "effects/GrBicubicEffect.h"
210 #include "effects/GrSimpleTextureEffect.h"
211
tile_mode_to_wrap_mode(const SkShader::TileMode tileMode)212 static GrSamplerState::WrapMode tile_mode_to_wrap_mode(const SkShader::TileMode tileMode) {
213 switch (tileMode) {
214 case SkShader::TileMode::kClamp_TileMode:
215 return GrSamplerState::WrapMode::kClamp;
216 case SkShader::TileMode::kRepeat_TileMode:
217 return GrSamplerState::WrapMode::kRepeat;
218 case SkShader::TileMode::kMirror_TileMode:
219 return GrSamplerState::WrapMode::kMirrorRepeat;
220 case SkShader::kDecal_TileMode:
221 // TODO: depending on caps, we should extend WrapMode for decal...
222 return GrSamplerState::WrapMode::kClamp;
223 }
224 SK_ABORT("Unknown tile mode.");
225 return GrSamplerState::WrapMode::kClamp;
226 }
227
asFragmentProcessor(const GrFPArgs & args) const228 std::unique_ptr<GrFragmentProcessor> SkImageShader::asFragmentProcessor(
229 const GrFPArgs& args) const {
230 SkMatrix lm = this->getLocalMatrix();
231 SkMatrix lmInverse;
232 if (!lm.invert(&lmInverse)) {
233 return nullptr;
234 }
235 if (args.fLocalMatrix) {
236 SkMatrix inv;
237 if (!args.fLocalMatrix->invert(&inv)) {
238 return nullptr;
239 }
240 lmInverse.postConcat(inv);
241 lm.preConcat(*args.fLocalMatrix);
242 }
243
244 GrSamplerState::WrapMode wrapModes[] = {tile_mode_to_wrap_mode(fTileModeX),
245 tile_mode_to_wrap_mode(fTileModeY)};
246
247 // Must set wrap and filter on the sampler before requesting a texture. In two places below
248 // we check the matrix scale factors to determine how to interpret the filter quality setting.
249 // This completely ignores the complexity of the drawVertices case where explicit local coords
250 // are provided by the caller.
251 bool doBicubic;
252 GrSamplerState::Filter textureFilterMode = GrSkFilterQualityToGrFilterMode(
253 args.fFilterQuality, *args.fViewMatrix, lm,
254 args.fContext->contextPriv().sharpenMipmappedTextures(), &doBicubic);
255 GrSamplerState samplerState(wrapModes, textureFilterMode);
256 sk_sp<SkColorSpace> texColorSpace;
257 SkScalar scaleAdjust[2] = { 1.0f, 1.0f };
258 sk_sp<GrTextureProxy> proxy(as_IB(fImage)->asTextureProxyRef(
259 args.fContext, samplerState, args.fDstColorSpaceInfo->colorSpace(), &texColorSpace,
260 scaleAdjust));
261 if (!proxy) {
262 return nullptr;
263 }
264
265 GrPixelConfig config = proxy->config();
266 bool isAlphaOnly = GrPixelConfigIsAlphaOnly(config);
267
268 lmInverse.postScale(scaleAdjust[0], scaleAdjust[1]);
269
270 std::unique_ptr<GrFragmentProcessor> inner;
271 if (doBicubic) {
272 inner = GrBicubicEffect::Make(std::move(proxy), lmInverse, wrapModes);
273 } else {
274 inner = GrSimpleTextureEffect::Make(std::move(proxy), lmInverse, samplerState);
275 }
276 inner = GrColorSpaceXformEffect::Make(std::move(inner), texColorSpace.get(), config,
277 args.fDstColorSpaceInfo->colorSpace());
278 if (isAlphaOnly) {
279 return inner;
280 }
281 return GrFragmentProcessor::MulChildByInputAlpha(std::move(inner));
282 }
283
284 #endif
285
286 ///////////////////////////////////////////////////////////////////////////////////////////////////
287 #include "SkImagePriv.h"
288
SkMakeBitmapShader(const SkBitmap & src,SkShader::TileMode tmx,SkShader::TileMode tmy,const SkMatrix * localMatrix,SkCopyPixelsMode cpm)289 sk_sp<SkShader> SkMakeBitmapShader(const SkBitmap& src, SkShader::TileMode tmx,
290 SkShader::TileMode tmy, const SkMatrix* localMatrix,
291 SkCopyPixelsMode cpm) {
292 return SkImageShader::Make(SkMakeImageFromRasterBitmap(src, cpm),
293 tmx, tmy, localMatrix);
294 }
295
296 SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_START(SkShaderBase)
SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkImageShader)297 SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkImageShader)
298 SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_END
299
300 bool SkImageShader::onAppendStages(const StageRec& rec) const {
301 SkRasterPipeline* p = rec.fPipeline;
302 SkArenaAlloc* alloc = rec.fAlloc;
303
304 SkMatrix matrix;
305 if (!this->computeTotalInverse(rec.fCTM, rec.fLocalM, &matrix)) {
306 return false;
307 }
308 auto quality = rec.fPaint.getFilterQuality();
309
310 SkBitmapProvider provider(fImage.get(), rec.fDstCS);
311 SkDefaultBitmapController controller;
312 std::unique_ptr<SkBitmapController::State> state {
313 controller.requestBitmap(provider, matrix, quality)
314 };
315 if (!state) {
316 return false;
317 }
318
319 const SkPixmap& pm = state->pixmap();
320 matrix = state->invMatrix();
321 quality = state->quality();
322 auto info = pm.info();
323
324 // When the matrix is just an integer translate, bilerp == nearest neighbor.
325 if (quality == kLow_SkFilterQuality &&
326 matrix.getType() <= SkMatrix::kTranslate_Mask &&
327 matrix.getTranslateX() == (int)matrix.getTranslateX() &&
328 matrix.getTranslateY() == (int)matrix.getTranslateY()) {
329 quality = kNone_SkFilterQuality;
330 }
331
332 // See skia:4649 and the GM image_scale_aligned.
333 if (quality == kNone_SkFilterQuality) {
334 if (matrix.getScaleX() >= 0) {
335 matrix.setTranslateX(nextafterf(matrix.getTranslateX(),
336 floorf(matrix.getTranslateX())));
337 }
338 if (matrix.getScaleY() >= 0) {
339 matrix.setTranslateY(nextafterf(matrix.getTranslateY(),
340 floorf(matrix.getTranslateY())));
341 }
342 }
343
344 p->append_seed_shader();
345
346 struct MiscCtx {
347 std::unique_ptr<SkBitmapController::State> state;
348 SkColor4f paint_color;
349 };
350 auto misc = alloc->make<MiscCtx>();
351 misc->state = std::move(state); // Extend lifetime to match the pipeline's.
352 misc->paint_color = SkColor4f_from_SkColor(rec.fPaint.getColor(), rec.fDstCS);
353 p->append_matrix(alloc, matrix);
354
355 auto gather = alloc->make<SkJumper_GatherCtx>();
356 gather->pixels = pm.addr();
357 gather->stride = pm.rowBytesAsPixels();
358 gather->width = pm.width();
359 gather->height = pm.height();
360
361 auto limit_x = alloc->make<SkJumper_TileCtx>(),
362 limit_y = alloc->make<SkJumper_TileCtx>();
363 limit_x->scale = pm.width();
364 limit_x->invScale = 1.0f / pm.width();
365 limit_y->scale = pm.height();
366 limit_y->invScale = 1.0f / pm.height();
367
368 bool is_srgb = rec.fDstCS && (!info.colorSpace() || info.gammaCloseToSRGB());
369
370 SkJumper_DecalTileCtx* decal_ctx = nullptr;
371 bool decal_x_and_y = fTileModeX == kDecal_TileMode && fTileModeY == kDecal_TileMode;
372 if (fTileModeX == kDecal_TileMode || fTileModeY == kDecal_TileMode) {
373 decal_ctx = alloc->make<SkJumper_DecalTileCtx>();
374 decal_ctx->limit_x = limit_x->scale;
375 decal_ctx->limit_y = limit_y->scale;
376 }
377
378 auto append_tiling_and_gather = [&] {
379 if (decal_x_and_y) {
380 p->append(SkRasterPipeline::decal_x_and_y, decal_ctx);
381 } else {
382 switch (fTileModeX) {
383 case kClamp_TileMode: /* The gather_xxx stage will clamp for us. */ break;
384 case kMirror_TileMode: p->append(SkRasterPipeline::mirror_x, limit_x); break;
385 case kRepeat_TileMode: p->append(SkRasterPipeline::repeat_x, limit_x); break;
386 case kDecal_TileMode: p->append(SkRasterPipeline::decal_x, decal_ctx); break;
387 }
388 switch (fTileModeY) {
389 case kClamp_TileMode: /* The gather_xxx stage will clamp for us. */ break;
390 case kMirror_TileMode: p->append(SkRasterPipeline::mirror_y, limit_y); break;
391 case kRepeat_TileMode: p->append(SkRasterPipeline::repeat_y, limit_y); break;
392 case kDecal_TileMode: p->append(SkRasterPipeline::decal_y, decal_ctx); break;
393 }
394 }
395
396 void* ctx = gather;
397 switch (info.colorType()) {
398 case kAlpha_8_SkColorType: p->append(SkRasterPipeline::gather_a8, ctx); break;
399 case kGray_8_SkColorType: p->append(SkRasterPipeline::gather_g8, ctx); break;
400 case kRGB_565_SkColorType: p->append(SkRasterPipeline::gather_565, ctx); break;
401 case kARGB_4444_SkColorType: p->append(SkRasterPipeline::gather_4444, ctx); break;
402 case kBGRA_8888_SkColorType: p->append(SkRasterPipeline::gather_bgra, ctx); break;
403 case kRGBA_8888_SkColorType: p->append(SkRasterPipeline::gather_8888, ctx); break;
404 case kRGBA_1010102_SkColorType: p->append(SkRasterPipeline::gather_1010102, ctx); break;
405 case kRGBA_F16_SkColorType: p->append(SkRasterPipeline::gather_f16, ctx); break;
406
407 case kRGB_888x_SkColorType: p->append(SkRasterPipeline::gather_8888, ctx);
408 p->append(SkRasterPipeline::force_opaque ); break;
409 case kRGB_101010x_SkColorType: p->append(SkRasterPipeline::gather_1010102, ctx);
410 p->append(SkRasterPipeline::force_opaque ); break;
411
412 default: SkASSERT(false);
413 }
414 if (decal_ctx) {
415 p->append(SkRasterPipeline::check_decal_mask, decal_ctx);
416 }
417 if (is_srgb) {
418 p->append(SkRasterPipeline::from_srgb);
419 }
420 };
421
422 auto append_misc = [&] {
423 if (info.colorType() == kAlpha_8_SkColorType) {
424 p->append(SkRasterPipeline::set_rgb, &misc->paint_color);
425 }
426 if (info.colorType() == kAlpha_8_SkColorType ||
427 info.alphaType() == kUnpremul_SkAlphaType) {
428 p->append(SkRasterPipeline::premul);
429 }
430 if (quality > kLow_SkFilterQuality) {
431 // Bicubic filtering naturally produces out of range values on both sides.
432 p->append(SkRasterPipeline::clamp_0);
433 p->append(fClampAsIfUnpremul ? SkRasterPipeline::clamp_1
434 : SkRasterPipeline::clamp_a);
435 }
436 append_gamut_transform(p, alloc, info.colorSpace(), rec.fDstCS,
437 fClampAsIfUnpremul ? kUnpremul_SkAlphaType : kPremul_SkAlphaType);
438 return true;
439 };
440
441 if (quality == kLow_SkFilterQuality &&
442 (info.colorType() == kRGBA_8888_SkColorType ||
443 info.colorType() == kBGRA_8888_SkColorType) &&
444 fTileModeX == SkShader::kClamp_TileMode &&
445 fTileModeY == SkShader::kClamp_TileMode &&
446 !is_srgb) {
447
448 p->append(SkRasterPipeline::bilerp_clamp_8888, gather);
449 if (info.colorType() == kBGRA_8888_SkColorType) {
450 p->append(SkRasterPipeline::swap_rb);
451 }
452 return append_misc();
453 }
454
455 SkJumper_SamplerCtx* sampler = nullptr;
456 if (quality != kNone_SkFilterQuality) {
457 sampler = alloc->make<SkJumper_SamplerCtx>();
458 }
459
460 auto sample = [&](SkRasterPipeline::StockStage setup_x,
461 SkRasterPipeline::StockStage setup_y) {
462 p->append(setup_x, sampler);
463 p->append(setup_y, sampler);
464 append_tiling_and_gather();
465 p->append(SkRasterPipeline::accumulate, sampler);
466 };
467
468 if (quality == kNone_SkFilterQuality) {
469 append_tiling_and_gather();
470
471 } else if (quality == kLow_SkFilterQuality) {
472 p->append(SkRasterPipeline::save_xy, sampler);
473
474 sample(SkRasterPipeline::bilinear_nx, SkRasterPipeline::bilinear_ny);
475 sample(SkRasterPipeline::bilinear_px, SkRasterPipeline::bilinear_ny);
476 sample(SkRasterPipeline::bilinear_nx, SkRasterPipeline::bilinear_py);
477 sample(SkRasterPipeline::bilinear_px, SkRasterPipeline::bilinear_py);
478
479 p->append(SkRasterPipeline::move_dst_src);
480
481 } else {
482 p->append(SkRasterPipeline::save_xy, sampler);
483
484 sample(SkRasterPipeline::bicubic_n3x, SkRasterPipeline::bicubic_n3y);
485 sample(SkRasterPipeline::bicubic_n1x, SkRasterPipeline::bicubic_n3y);
486 sample(SkRasterPipeline::bicubic_p1x, SkRasterPipeline::bicubic_n3y);
487 sample(SkRasterPipeline::bicubic_p3x, SkRasterPipeline::bicubic_n3y);
488
489 sample(SkRasterPipeline::bicubic_n3x, SkRasterPipeline::bicubic_n1y);
490 sample(SkRasterPipeline::bicubic_n1x, SkRasterPipeline::bicubic_n1y);
491 sample(SkRasterPipeline::bicubic_p1x, SkRasterPipeline::bicubic_n1y);
492 sample(SkRasterPipeline::bicubic_p3x, SkRasterPipeline::bicubic_n1y);
493
494 sample(SkRasterPipeline::bicubic_n3x, SkRasterPipeline::bicubic_p1y);
495 sample(SkRasterPipeline::bicubic_n1x, SkRasterPipeline::bicubic_p1y);
496 sample(SkRasterPipeline::bicubic_p1x, SkRasterPipeline::bicubic_p1y);
497 sample(SkRasterPipeline::bicubic_p3x, SkRasterPipeline::bicubic_p1y);
498
499 sample(SkRasterPipeline::bicubic_n3x, SkRasterPipeline::bicubic_p3y);
500 sample(SkRasterPipeline::bicubic_n1x, SkRasterPipeline::bicubic_p3y);
501 sample(SkRasterPipeline::bicubic_p1x, SkRasterPipeline::bicubic_p3y);
502 sample(SkRasterPipeline::bicubic_p3x, SkRasterPipeline::bicubic_p3y);
503
504 p->append(SkRasterPipeline::move_dst_src);
505 }
506
507 return append_misc();
508 }
509