1 /*
2  * Copyright 2014 The Android Open Source Project
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef SkColor_opts_SSE2_DEFINED
9 #define SkColor_opts_SSE2_DEFINED
10 
11 #include <emmintrin.h>
12 
13 #define ASSERT_EQ(a,b) SkASSERT(0xffff == _mm_movemask_epi8(_mm_cmpeq_epi8((a), (b))))
14 
15 // Because no _mm_mul_epi32() in SSE2, we emulate it here.
16 // Multiplies 4 32-bit integers from a by 4 32-bit intergers from b.
17 // The 4 multiplication results should be represented within 32-bit
18 // integers, otherwise they would be overflow.
Multiply32_SSE2(const __m128i & a,const __m128i & b)19 static inline  __m128i Multiply32_SSE2(const __m128i& a, const __m128i& b) {
20     // Calculate results of a0 * b0 and a2 * b2.
21     __m128i r1 = _mm_mul_epu32(a, b);
22     // Calculate results of a1 * b1 and a3 * b3.
23     __m128i r2 = _mm_mul_epu32(_mm_srli_si128(a, 4), _mm_srli_si128(b, 4));
24     // Shuffle results to [63..0] and interleave the results.
25     __m128i r = _mm_unpacklo_epi32(_mm_shuffle_epi32(r1, _MM_SHUFFLE(0,0,2,0)),
26                                    _mm_shuffle_epi32(r2, _MM_SHUFFLE(0,0,2,0)));
27     return r;
28 }
29 
SkAlpha255To256_SSE2(const __m128i & alpha)30 static inline __m128i SkAlpha255To256_SSE2(const __m128i& alpha) {
31     return _mm_add_epi32(alpha, _mm_set1_epi32(1));
32 }
33 
34 // See #define SkAlphaMulAlpha(a, b)  SkMulDiv255Round(a, b) in SkXfermode.cpp.
SkAlphaMulAlpha_SSE2(const __m128i & a,const __m128i & b)35 static inline __m128i SkAlphaMulAlpha_SSE2(const __m128i& a,
36                                            const __m128i& b) {
37     __m128i prod = _mm_mullo_epi16(a, b);
38     prod = _mm_add_epi32(prod, _mm_set1_epi32(128));
39     prod = _mm_add_epi32(prod, _mm_srli_epi32(prod, 8));
40     prod = _mm_srli_epi32(prod, 8);
41 
42     return prod;
43 }
44 
45 // Portable version SkAlphaMulQ is in SkColorPriv.h.
SkAlphaMulQ_SSE2(const __m128i & c,const __m128i & scale)46 static inline __m128i SkAlphaMulQ_SSE2(const __m128i& c, const __m128i& scale) {
47     const __m128i mask = _mm_set1_epi32(0xFF00FF);
48     __m128i s = _mm_or_si128(_mm_slli_epi32(scale, 16), scale);
49 
50     // uint32_t rb = ((c & mask) * scale) >> 8
51     __m128i rb = _mm_and_si128(mask, c);
52     rb = _mm_mullo_epi16(rb, s);
53     rb = _mm_srli_epi16(rb, 8);
54 
55     // uint32_t ag = ((c >> 8) & mask) * scale
56     __m128i ag = _mm_srli_epi16(c, 8);
57     ASSERT_EQ(ag, _mm_and_si128(mask, ag));  // ag = _mm_srli_epi16(c, 8) did this for us.
58     ag = _mm_mullo_epi16(ag, s);
59 
60     // (rb & mask) | (ag & ~mask)
61     ASSERT_EQ(rb, _mm_and_si128(mask, rb));  // rb = _mm_srli_epi16(rb, 8) did this for us.
62     ag = _mm_andnot_si128(mask, ag);
63     return _mm_or_si128(rb, ag);
64 }
65 
66 // Fast path for SkAlphaMulQ_SSE2 with a constant scale factor.
SkAlphaMulQ_SSE2(const __m128i & c,const unsigned scale)67 static inline __m128i SkAlphaMulQ_SSE2(const __m128i& c, const unsigned scale) {
68     const __m128i mask = _mm_set1_epi32(0xFF00FF);
69     __m128i s = _mm_set1_epi16(scale << 8); // Move scale factor to upper byte of word.
70 
71     // With mulhi, red and blue values are already in the right place and
72     // don't need to be divided by 256.
73     __m128i rb = _mm_and_si128(mask, c);
74     rb = _mm_mulhi_epu16(rb, s);
75 
76     __m128i ag = _mm_andnot_si128(mask, c);
77     ag = _mm_mulhi_epu16(ag, s);     // Alpha and green values are in the higher byte of each word.
78     ag = _mm_andnot_si128(mask, ag);
79 
80     return _mm_or_si128(rb, ag);
81 }
82 
83 // Portable version SkFastFourByteInterp256 is in SkColorPriv.h.
SkFastFourByteInterp256_SSE2(const __m128i & src,const __m128i & dst,const unsigned src_scale)84 static inline __m128i SkFastFourByteInterp256_SSE2(const __m128i& src, const __m128i& dst, const unsigned src_scale) {
85     // Computes dst + (((src - dst)*src_scale)>>8)
86     const __m128i mask = _mm_set1_epi32(0x00FF00FF);
87 
88     // Unpack the 16x8-bit source into 2 8x16-bit splayed halves.
89     __m128i src_rb = _mm_and_si128(mask, src);
90     __m128i src_ag = _mm_srli_epi16(src, 8);
91     __m128i dst_rb = _mm_and_si128(mask, dst);
92     __m128i dst_ag = _mm_srli_epi16(dst, 8);
93 
94     // Compute scaled differences.
95     __m128i diff_rb = _mm_sub_epi16(src_rb, dst_rb);
96     __m128i diff_ag = _mm_sub_epi16(src_ag, dst_ag);
97     __m128i s = _mm_set1_epi16(src_scale);
98     diff_rb = _mm_mullo_epi16(diff_rb, s);
99     diff_ag = _mm_mullo_epi16(diff_ag, s);
100 
101     // Pack the differences back together.
102     diff_rb = _mm_srli_epi16(diff_rb, 8);
103     diff_ag = _mm_andnot_si128(mask, diff_ag);
104     __m128i diff = _mm_or_si128(diff_rb, diff_ag);
105 
106     // Add difference to destination.
107     return _mm_add_epi8(dst, diff);
108 }
109 
110 // Portable version SkPMLerp is in SkColorPriv.h
SkPMLerp_SSE2(const __m128i & src,const __m128i & dst,const unsigned scale)111 static inline __m128i SkPMLerp_SSE2(const __m128i& src, const __m128i& dst, const unsigned scale) {
112 #ifdef SK_SUPPORT_LEGACY_BROKEN_LERP
113     return _mm_add_epi8(SkAlphaMulQ_SSE2(src, scale), SkAlphaMulQ_SSE2(dst, 256 - scale));
114 #else
115     return SkFastFourByteInterp256_SSE2(src, dst, scale);
116 #endif
117 }
118 
SkGetPackedA32_SSE2(const __m128i & src)119 static inline __m128i SkGetPackedA32_SSE2(const __m128i& src) {
120 #if SK_A32_SHIFT == 24                // It's very common (universal?) that alpha is the top byte.
121     return _mm_srli_epi32(src, 24);   // You'd hope the compiler would remove the left shift then,
122 #else                                 // but I've seen Clang just do a dumb left shift of zero. :(
123     __m128i a = _mm_slli_epi32(src, (24 - SK_A32_SHIFT));
124     return _mm_srli_epi32(a, 24);
125 #endif
126 }
127 
SkGetPackedR32_SSE2(const __m128i & src)128 static inline __m128i SkGetPackedR32_SSE2(const __m128i& src) {
129     __m128i r = _mm_slli_epi32(src, (24 - SK_R32_SHIFT));
130     return _mm_srli_epi32(r, 24);
131 }
132 
SkGetPackedG32_SSE2(const __m128i & src)133 static inline __m128i SkGetPackedG32_SSE2(const __m128i& src) {
134     __m128i g = _mm_slli_epi32(src, (24 - SK_G32_SHIFT));
135     return _mm_srli_epi32(g, 24);
136 }
137 
SkGetPackedB32_SSE2(const __m128i & src)138 static inline __m128i SkGetPackedB32_SSE2(const __m128i& src) {
139     __m128i b = _mm_slli_epi32(src, (24 - SK_B32_SHIFT));
140     return _mm_srli_epi32(b, 24);
141 }
142 
SkMul16ShiftRound_SSE2(const __m128i & a,const __m128i & b,int shift)143 static inline __m128i SkMul16ShiftRound_SSE2(const __m128i& a,
144                                              const __m128i& b, int shift) {
145     __m128i prod = _mm_mullo_epi16(a, b);
146     prod = _mm_add_epi16(prod, _mm_set1_epi16(1 << (shift - 1)));
147     prod = _mm_add_epi16(prod, _mm_srli_epi16(prod, shift));
148     prod = _mm_srli_epi16(prod, shift);
149 
150     return prod;
151 }
152 
SkPackRGB16_SSE2(const __m128i & r,const __m128i & g,const __m128i & b)153 static inline __m128i SkPackRGB16_SSE2(const __m128i& r,
154                                        const __m128i& g, const __m128i& b) {
155     __m128i dr = _mm_slli_epi16(r, SK_R16_SHIFT);
156     __m128i dg = _mm_slli_epi16(g, SK_G16_SHIFT);
157     __m128i db = _mm_slli_epi16(b, SK_B16_SHIFT);
158 
159     __m128i c = _mm_or_si128(dr, dg);
160     return _mm_or_si128(c, db);
161 }
162 
SkPackARGB32_SSE2(const __m128i & a,const __m128i & r,const __m128i & g,const __m128i & b)163 static inline __m128i SkPackARGB32_SSE2(const __m128i& a, const __m128i& r,
164                                         const __m128i& g, const __m128i& b) {
165     __m128i da = _mm_slli_epi32(a, SK_A32_SHIFT);
166     __m128i dr = _mm_slli_epi32(r, SK_R32_SHIFT);
167     __m128i dg = _mm_slli_epi32(g, SK_G32_SHIFT);
168     __m128i db = _mm_slli_epi32(b, SK_B32_SHIFT);
169 
170     __m128i c = _mm_or_si128(da, dr);
171     c = _mm_or_si128(c, dg);
172     return _mm_or_si128(c, db);
173 }
174 
SkPacked16ToR32_SSE2(const __m128i & src)175 static inline __m128i SkPacked16ToR32_SSE2(const __m128i& src) {
176     __m128i r = _mm_srli_epi32(src, SK_R16_SHIFT);
177     r = _mm_and_si128(r, _mm_set1_epi32(SK_R16_MASK));
178     r = _mm_or_si128(_mm_slli_epi32(r, (8 - SK_R16_BITS)),
179                      _mm_srli_epi32(r, (2 * SK_R16_BITS - 8)));
180 
181     return r;
182 }
183 
SkPacked16ToG32_SSE2(const __m128i & src)184 static inline __m128i SkPacked16ToG32_SSE2(const __m128i& src) {
185     __m128i g = _mm_srli_epi32(src, SK_G16_SHIFT);
186     g = _mm_and_si128(g, _mm_set1_epi32(SK_G16_MASK));
187     g = _mm_or_si128(_mm_slli_epi32(g, (8 - SK_G16_BITS)),
188                      _mm_srli_epi32(g, (2 * SK_G16_BITS - 8)));
189 
190     return g;
191 }
192 
SkPacked16ToB32_SSE2(const __m128i & src)193 static inline __m128i SkPacked16ToB32_SSE2(const __m128i& src) {
194     __m128i b = _mm_srli_epi32(src, SK_B16_SHIFT);
195     b = _mm_and_si128(b, _mm_set1_epi32(SK_B16_MASK));
196     b = _mm_or_si128(_mm_slli_epi32(b, (8 - SK_B16_BITS)),
197                      _mm_srli_epi32(b, (2 * SK_B16_BITS - 8)));
198 
199     return b;
200 }
201 
SkPixel16ToPixel32_SSE2(const __m128i & src)202 static inline __m128i SkPixel16ToPixel32_SSE2(const __m128i& src) {
203     __m128i r = SkPacked16ToR32_SSE2(src);
204     __m128i g = SkPacked16ToG32_SSE2(src);
205     __m128i b = SkPacked16ToB32_SSE2(src);
206 
207     return SkPackARGB32_SSE2(_mm_set1_epi32(0xFF), r, g, b);
208 }
209 
SkPixel32ToPixel16_ToU16_SSE2(const __m128i & src_pixel1,const __m128i & src_pixel2)210 static inline __m128i SkPixel32ToPixel16_ToU16_SSE2(const __m128i& src_pixel1,
211                                                     const __m128i& src_pixel2) {
212     // Calculate result r.
213     __m128i r1 = _mm_srli_epi32(src_pixel1,
214                                 SK_R32_SHIFT + (8 - SK_R16_BITS));
215     r1 = _mm_and_si128(r1, _mm_set1_epi32(SK_R16_MASK));
216     __m128i r2 = _mm_srli_epi32(src_pixel2,
217                                 SK_R32_SHIFT + (8 - SK_R16_BITS));
218     r2 = _mm_and_si128(r2, _mm_set1_epi32(SK_R16_MASK));
219     __m128i r = _mm_packs_epi32(r1, r2);
220 
221     // Calculate result g.
222     __m128i g1 = _mm_srli_epi32(src_pixel1,
223                                 SK_G32_SHIFT + (8 - SK_G16_BITS));
224     g1 = _mm_and_si128(g1, _mm_set1_epi32(SK_G16_MASK));
225     __m128i g2 = _mm_srli_epi32(src_pixel2,
226                                 SK_G32_SHIFT + (8 - SK_G16_BITS));
227     g2 = _mm_and_si128(g2, _mm_set1_epi32(SK_G16_MASK));
228     __m128i g = _mm_packs_epi32(g1, g2);
229 
230     // Calculate result b.
231     __m128i b1 = _mm_srli_epi32(src_pixel1,
232                                 SK_B32_SHIFT + (8 - SK_B16_BITS));
233     b1 = _mm_and_si128(b1, _mm_set1_epi32(SK_B16_MASK));
234     __m128i b2 = _mm_srli_epi32(src_pixel2,
235                                 SK_B32_SHIFT + (8 - SK_B16_BITS));
236     b2 = _mm_and_si128(b2, _mm_set1_epi32(SK_B16_MASK));
237     __m128i b = _mm_packs_epi32(b1, b2);
238 
239     // Store 8 16-bit colors in dst.
240     __m128i d_pixel = SkPackRGB16_SSE2(r, g, b);
241 
242     return d_pixel;
243 }
244 
245 // Portable version is SkPMSrcOver in SkColorPriv.h.
SkPMSrcOver_SSE2(const __m128i & src,const __m128i & dst)246 static inline __m128i SkPMSrcOver_SSE2(const __m128i& src, const __m128i& dst) {
247     return _mm_add_epi32(src,
248                          SkAlphaMulQ_SSE2(dst, _mm_sub_epi32(_mm_set1_epi32(256),
249                                                              SkGetPackedA32_SSE2(src))));
250 }
251 
252 // Fast path for SkBlendARGB32_SSE2 with a constant alpha factor.
SkBlendARGB32_SSE2(const __m128i & src,const __m128i & dst,const unsigned aa)253 static inline __m128i SkBlendARGB32_SSE2(const __m128i& src, const __m128i& dst,
254                                          const unsigned aa) {
255     unsigned alpha = SkAlpha255To256(aa);
256 #ifdef SK_SUPPORT_LEGACY_BROKEN_LERP
257      __m128i src_scale = _mm_set1_epi32(alpha);
258      // SkAlpha255To256(255 - SkAlphaMul(SkGetPackedA32(src), src_scale))
259      __m128i dst_scale = SkGetPackedA32_SSE2(src);
260      dst_scale = _mm_mullo_epi16(dst_scale, src_scale);
261      dst_scale = _mm_srli_epi16(dst_scale, 8);
262      dst_scale = _mm_sub_epi32(_mm_set1_epi32(256), dst_scale);
263 
264      __m128i result = SkAlphaMulQ_SSE2(src, alpha);
265      return _mm_add_epi8(result, SkAlphaMulQ_SSE2(dst, dst_scale));
266 #else
267     __m128i src_scale = _mm_set1_epi16(alpha);
268     // SkAlphaMulInv256(SkGetPackedA32(src), src_scale)
269     __m128i dst_scale = SkGetPackedA32_SSE2(src);
270     // High words in dst_scale are 0, so it's safe to multiply with 16-bit src_scale.
271     dst_scale = _mm_mullo_epi16(dst_scale, src_scale);
272     dst_scale = _mm_sub_epi32(_mm_set1_epi32(0xFFFF), dst_scale);
273     dst_scale = _mm_add_epi32(dst_scale, _mm_srli_epi32(dst_scale, 8));
274     dst_scale = _mm_srli_epi32(dst_scale, 8);
275     // Duplicate scales into 2x16-bit pattern per pixel.
276     dst_scale = _mm_shufflelo_epi16(dst_scale, _MM_SHUFFLE(2, 2, 0, 0));
277     dst_scale = _mm_shufflehi_epi16(dst_scale, _MM_SHUFFLE(2, 2, 0, 0));
278 
279     const __m128i mask = _mm_set1_epi32(0x00FF00FF);
280 
281     // Unpack the 16x8-bit source/destination into 2 8x16-bit splayed halves.
282     __m128i src_rb = _mm_and_si128(mask, src);
283     __m128i src_ag = _mm_srli_epi16(src, 8);
284     __m128i dst_rb = _mm_and_si128(mask, dst);
285     __m128i dst_ag = _mm_srli_epi16(dst, 8);
286 
287     // Scale them.
288     src_rb = _mm_mullo_epi16(src_rb, src_scale);
289     src_ag = _mm_mullo_epi16(src_ag, src_scale);
290     dst_rb = _mm_mullo_epi16(dst_rb, dst_scale);
291     dst_ag = _mm_mullo_epi16(dst_ag, dst_scale);
292 
293     // Add the scaled source and destination.
294     dst_rb = _mm_add_epi16(src_rb, dst_rb);
295     dst_ag = _mm_add_epi16(src_ag, dst_ag);
296 
297     // Unsplay the halves back together.
298     dst_rb = _mm_srli_epi16(dst_rb, 8);
299     dst_ag = _mm_andnot_si128(mask, dst_ag);
300     return _mm_or_si128(dst_rb, dst_ag);
301 #endif
302 }
303 
304 #undef ASSERT_EQ
305 #endif // SkColor_opts_SSE2_DEFINED
306