1 // Copyright 2015 Google Inc. All Rights Reserved.
2 //
3 // Use of this source code is governed by a BSD-style license
4 // that can be found in the COPYING file in the root of the source
5 // tree. An additional intellectual property rights grant can be found
6 // in the file PATENTS. All contributing project authors may
7 // be found in the AUTHORS file in the root of the source tree.
8 // -----------------------------------------------------------------------------
9 //
10 // SSE2 Rescaling functions
11 //
12 // Author: Skal (pascal.massimino@gmail.com)
13 
14 #include "src/dsp/dsp.h"
15 
16 #if defined(WEBP_USE_SSE2) && !defined(WEBP_REDUCE_SIZE)
17 #include <emmintrin.h>
18 
19 #include <assert.h>
20 #include "src/utils/rescaler_utils.h"
21 #include "src/utils/utils.h"
22 
23 //------------------------------------------------------------------------------
24 // Implementations of critical functions ImportRow / ExportRow
25 
26 #define ROUNDER (WEBP_RESCALER_ONE >> 1)
27 #define MULT_FIX(x, y) (((uint64_t)(x) * (y) + ROUNDER) >> WEBP_RESCALER_RFIX)
28 #define MULT_FIX_FLOOR(x, y) (((uint64_t)(x) * (y)) >> WEBP_RESCALER_RFIX)
29 
30 // input: 8 bytes ABCDEFGH -> output: A0E0B0F0C0G0D0H0
LoadTwoPixels_SSE2(const uint8_t * const src,__m128i * out)31 static void LoadTwoPixels_SSE2(const uint8_t* const src, __m128i* out) {
32   const __m128i zero = _mm_setzero_si128();
33   const __m128i A = _mm_loadl_epi64((const __m128i*)(src));  // ABCDEFGH
34   const __m128i B = _mm_unpacklo_epi8(A, zero);              // A0B0C0D0E0F0G0H0
35   const __m128i C = _mm_srli_si128(B, 8);                    // E0F0G0H0
36   *out = _mm_unpacklo_epi16(B, C);
37 }
38 
39 // input: 8 bytes ABCDEFGH -> output: A0B0C0D0E0F0G0H0
LoadEightPixels_SSE2(const uint8_t * const src,__m128i * out)40 static void LoadEightPixels_SSE2(const uint8_t* const src, __m128i* out) {
41   const __m128i zero = _mm_setzero_si128();
42   const __m128i A = _mm_loadl_epi64((const __m128i*)(src));  // ABCDEFGH
43   *out = _mm_unpacklo_epi8(A, zero);
44 }
45 
RescalerImportRowExpand_SSE2(WebPRescaler * const wrk,const uint8_t * src)46 static void RescalerImportRowExpand_SSE2(WebPRescaler* const wrk,
47                                          const uint8_t* src) {
48   rescaler_t* frow = wrk->frow;
49   const rescaler_t* const frow_end = frow + wrk->dst_width * wrk->num_channels;
50   const int x_add = wrk->x_add;
51   int accum = x_add;
52   __m128i cur_pixels;
53 
54   // SSE2 implementation only works with 16b signed arithmetic at max.
55   if (wrk->src_width < 8 || accum >= (1 << 15)) {
56     WebPRescalerImportRowExpand_C(wrk, src);
57     return;
58   }
59 
60   assert(!WebPRescalerInputDone(wrk));
61   assert(wrk->x_expand);
62   if (wrk->num_channels == 4) {
63     LoadTwoPixels_SSE2(src, &cur_pixels);
64     src += 4;
65     while (1) {
66       const __m128i mult = _mm_set1_epi32(((x_add - accum) << 16) | accum);
67       const __m128i out = _mm_madd_epi16(cur_pixels, mult);
68       _mm_storeu_si128((__m128i*)frow, out);
69       frow += 4;
70       if (frow >= frow_end) break;
71       accum -= wrk->x_sub;
72       if (accum < 0) {
73         LoadTwoPixels_SSE2(src, &cur_pixels);
74         src += 4;
75         accum += x_add;
76       }
77     }
78   } else {
79     int left;
80     const uint8_t* const src_limit = src + wrk->src_width - 8;
81     LoadEightPixels_SSE2(src, &cur_pixels);
82     src += 7;
83     left = 7;
84     while (1) {
85       const __m128i mult = _mm_cvtsi32_si128(((x_add - accum) << 16) | accum);
86       const __m128i out = _mm_madd_epi16(cur_pixels, mult);
87       assert(sizeof(*frow) == sizeof(uint32_t));
88       WebPUint32ToMem((uint8_t*)frow, _mm_cvtsi128_si32(out));
89       frow += 1;
90       if (frow >= frow_end) break;
91       accum -= wrk->x_sub;
92       if (accum < 0) {
93         if (--left) {
94           cur_pixels = _mm_srli_si128(cur_pixels, 2);
95         } else if (src <= src_limit) {
96           LoadEightPixels_SSE2(src, &cur_pixels);
97           src += 7;
98           left = 7;
99         } else {   // tail
100           cur_pixels = _mm_srli_si128(cur_pixels, 2);
101           cur_pixels = _mm_insert_epi16(cur_pixels, src[1], 1);
102           src += 1;
103           left = 1;
104         }
105         accum += x_add;
106       }
107     }
108   }
109   assert(accum == 0);
110 }
111 
RescalerImportRowShrink_SSE2(WebPRescaler * const wrk,const uint8_t * src)112 static void RescalerImportRowShrink_SSE2(WebPRescaler* const wrk,
113                                          const uint8_t* src) {
114   const int x_sub = wrk->x_sub;
115   int accum = 0;
116   const __m128i zero = _mm_setzero_si128();
117   const __m128i mult0 = _mm_set1_epi16(x_sub);
118   const __m128i mult1 = _mm_set1_epi32(wrk->fx_scale);
119   const __m128i rounder = _mm_set_epi32(0, ROUNDER, 0, ROUNDER);
120   __m128i sum = zero;
121   rescaler_t* frow = wrk->frow;
122   const rescaler_t* const frow_end = wrk->frow + 4 * wrk->dst_width;
123 
124   if (wrk->num_channels != 4 || wrk->x_add > (x_sub << 7)) {
125     WebPRescalerImportRowShrink_C(wrk, src);
126     return;
127   }
128   assert(!WebPRescalerInputDone(wrk));
129   assert(!wrk->x_expand);
130 
131   for (; frow < frow_end; frow += 4) {
132     __m128i base = zero;
133     accum += wrk->x_add;
134     while (accum > 0) {
135       const __m128i A = _mm_cvtsi32_si128(WebPMemToUint32(src));
136       src += 4;
137       base = _mm_unpacklo_epi8(A, zero);
138       // To avoid overflow, we need: base * x_add / x_sub < 32768
139       // => x_add < x_sub << 7. That's a 1/128 reduction ratio limit.
140       sum = _mm_add_epi16(sum, base);
141       accum -= x_sub;
142     }
143     {    // Emit next horizontal pixel.
144       const __m128i mult = _mm_set1_epi16(-accum);
145       const __m128i frac0 = _mm_mullo_epi16(base, mult);  // 16b x 16b -> 32b
146       const __m128i frac1 = _mm_mulhi_epu16(base, mult);
147       const __m128i frac = _mm_unpacklo_epi16(frac0, frac1);  // frac is 32b
148       const __m128i A0 = _mm_mullo_epi16(sum, mult0);
149       const __m128i A1 = _mm_mulhi_epu16(sum, mult0);
150       const __m128i B0 = _mm_unpacklo_epi16(A0, A1);      // sum * x_sub
151       const __m128i frow_out = _mm_sub_epi32(B0, frac);   // sum * x_sub - frac
152       const __m128i D0 = _mm_srli_epi64(frac, 32);
153       const __m128i D1 = _mm_mul_epu32(frac, mult1);      // 32b x 16b -> 64b
154       const __m128i D2 = _mm_mul_epu32(D0, mult1);
155       const __m128i E1 = _mm_add_epi64(D1, rounder);
156       const __m128i E2 = _mm_add_epi64(D2, rounder);
157       const __m128i F1 = _mm_shuffle_epi32(E1, 1 | (3 << 2));
158       const __m128i F2 = _mm_shuffle_epi32(E2, 1 | (3 << 2));
159       const __m128i G = _mm_unpacklo_epi32(F1, F2);
160       sum = _mm_packs_epi32(G, zero);
161       _mm_storeu_si128((__m128i*)frow, frow_out);
162     }
163   }
164   assert(accum == 0);
165 }
166 
167 //------------------------------------------------------------------------------
168 // Row export
169 
170 // load *src as epi64, multiply by mult and store result in [out0 ... out3]
LoadDispatchAndMult_SSE2(const rescaler_t * const src,const __m128i * const mult,__m128i * const out0,__m128i * const out1,__m128i * const out2,__m128i * const out3)171 static WEBP_INLINE void LoadDispatchAndMult_SSE2(const rescaler_t* const src,
172                                                  const __m128i* const mult,
173                                                  __m128i* const out0,
174                                                  __m128i* const out1,
175                                                  __m128i* const out2,
176                                                  __m128i* const out3) {
177   const __m128i A0 = _mm_loadu_si128((const __m128i*)(src + 0));
178   const __m128i A1 = _mm_loadu_si128((const __m128i*)(src + 4));
179   const __m128i A2 = _mm_srli_epi64(A0, 32);
180   const __m128i A3 = _mm_srli_epi64(A1, 32);
181   if (mult != NULL) {
182     *out0 = _mm_mul_epu32(A0, *mult);
183     *out1 = _mm_mul_epu32(A1, *mult);
184     *out2 = _mm_mul_epu32(A2, *mult);
185     *out3 = _mm_mul_epu32(A3, *mult);
186   } else {
187     *out0 = A0;
188     *out1 = A1;
189     *out2 = A2;
190     *out3 = A3;
191   }
192 }
193 
ProcessRow_SSE2(const __m128i * const A0,const __m128i * const A1,const __m128i * const A2,const __m128i * const A3,const __m128i * const mult,uint8_t * const dst)194 static WEBP_INLINE void ProcessRow_SSE2(const __m128i* const A0,
195                                         const __m128i* const A1,
196                                         const __m128i* const A2,
197                                         const __m128i* const A3,
198                                         const __m128i* const mult,
199                                         uint8_t* const dst) {
200   const __m128i rounder = _mm_set_epi32(0, ROUNDER, 0, ROUNDER);
201   const __m128i mask = _mm_set_epi32(0xffffffffu, 0, 0xffffffffu, 0);
202   const __m128i B0 = _mm_mul_epu32(*A0, *mult);
203   const __m128i B1 = _mm_mul_epu32(*A1, *mult);
204   const __m128i B2 = _mm_mul_epu32(*A2, *mult);
205   const __m128i B3 = _mm_mul_epu32(*A3, *mult);
206   const __m128i C0 = _mm_add_epi64(B0, rounder);
207   const __m128i C1 = _mm_add_epi64(B1, rounder);
208   const __m128i C2 = _mm_add_epi64(B2, rounder);
209   const __m128i C3 = _mm_add_epi64(B3, rounder);
210   const __m128i D0 = _mm_srli_epi64(C0, WEBP_RESCALER_RFIX);
211   const __m128i D1 = _mm_srli_epi64(C1, WEBP_RESCALER_RFIX);
212 #if (WEBP_RESCALER_RFIX < 32)
213   const __m128i D2 =
214       _mm_and_si128(_mm_slli_epi64(C2, 32 - WEBP_RESCALER_RFIX), mask);
215   const __m128i D3 =
216       _mm_and_si128(_mm_slli_epi64(C3, 32 - WEBP_RESCALER_RFIX), mask);
217 #else
218   const __m128i D2 = _mm_and_si128(C2, mask);
219   const __m128i D3 = _mm_and_si128(C3, mask);
220 #endif
221   const __m128i E0 = _mm_or_si128(D0, D2);
222   const __m128i E1 = _mm_or_si128(D1, D3);
223   const __m128i F = _mm_packs_epi32(E0, E1);
224   const __m128i G = _mm_packus_epi16(F, F);
225   _mm_storel_epi64((__m128i*)dst, G);
226 }
227 
ProcessRow_Floor_SSE2(const __m128i * const A0,const __m128i * const A1,const __m128i * const A2,const __m128i * const A3,const __m128i * const mult,uint8_t * const dst)228 static WEBP_INLINE void ProcessRow_Floor_SSE2(const __m128i* const A0,
229                                               const __m128i* const A1,
230                                               const __m128i* const A2,
231                                               const __m128i* const A3,
232                                               const __m128i* const mult,
233                                               uint8_t* const dst) {
234   const __m128i mask = _mm_set_epi32(0xffffffffu, 0, 0xffffffffu, 0);
235   const __m128i B0 = _mm_mul_epu32(*A0, *mult);
236   const __m128i B1 = _mm_mul_epu32(*A1, *mult);
237   const __m128i B2 = _mm_mul_epu32(*A2, *mult);
238   const __m128i B3 = _mm_mul_epu32(*A3, *mult);
239   const __m128i D0 = _mm_srli_epi64(B0, WEBP_RESCALER_RFIX);
240   const __m128i D1 = _mm_srli_epi64(B1, WEBP_RESCALER_RFIX);
241 #if (WEBP_RESCALER_RFIX < 32)
242   const __m128i D2 =
243       _mm_and_si128(_mm_slli_epi64(B2, 32 - WEBP_RESCALER_RFIX), mask);
244   const __m128i D3 =
245       _mm_and_si128(_mm_slli_epi64(B3, 32 - WEBP_RESCALER_RFIX), mask);
246 #else
247   const __m128i D2 = _mm_and_si128(B2, mask);
248   const __m128i D3 = _mm_and_si128(B3, mask);
249 #endif
250   const __m128i E0 = _mm_or_si128(D0, D2);
251   const __m128i E1 = _mm_or_si128(D1, D3);
252   const __m128i F = _mm_packs_epi32(E0, E1);
253   const __m128i G = _mm_packus_epi16(F, F);
254   _mm_storel_epi64((__m128i*)dst, G);
255 }
256 
RescalerExportRowExpand_SSE2(WebPRescaler * const wrk)257 static void RescalerExportRowExpand_SSE2(WebPRescaler* const wrk) {
258   int x_out;
259   uint8_t* const dst = wrk->dst;
260   rescaler_t* const irow = wrk->irow;
261   const int x_out_max = wrk->dst_width * wrk->num_channels;
262   const rescaler_t* const frow = wrk->frow;
263   const __m128i mult = _mm_set_epi32(0, wrk->fy_scale, 0, wrk->fy_scale);
264 
265   assert(!WebPRescalerOutputDone(wrk));
266   assert(wrk->y_accum <= 0 && wrk->y_sub + wrk->y_accum >= 0);
267   assert(wrk->y_expand);
268   if (wrk->y_accum == 0) {
269     for (x_out = 0; x_out + 8 <= x_out_max; x_out += 8) {
270       __m128i A0, A1, A2, A3;
271       LoadDispatchAndMult_SSE2(frow + x_out, NULL, &A0, &A1, &A2, &A3);
272       ProcessRow_SSE2(&A0, &A1, &A2, &A3, &mult, dst + x_out);
273     }
274     for (; x_out < x_out_max; ++x_out) {
275       const uint32_t J = frow[x_out];
276       const int v = (int)MULT_FIX(J, wrk->fy_scale);
277       assert(v >= 0 && v <= 255);
278       dst[x_out] = v;
279     }
280   } else {
281     const uint32_t B = WEBP_RESCALER_FRAC(-wrk->y_accum, wrk->y_sub);
282     const uint32_t A = (uint32_t)(WEBP_RESCALER_ONE - B);
283     const __m128i mA = _mm_set_epi32(0, A, 0, A);
284     const __m128i mB = _mm_set_epi32(0, B, 0, B);
285     const __m128i rounder = _mm_set_epi32(0, ROUNDER, 0, ROUNDER);
286     for (x_out = 0; x_out + 8 <= x_out_max; x_out += 8) {
287       __m128i A0, A1, A2, A3, B0, B1, B2, B3;
288       LoadDispatchAndMult_SSE2(frow + x_out, &mA, &A0, &A1, &A2, &A3);
289       LoadDispatchAndMult_SSE2(irow + x_out, &mB, &B0, &B1, &B2, &B3);
290       {
291         const __m128i C0 = _mm_add_epi64(A0, B0);
292         const __m128i C1 = _mm_add_epi64(A1, B1);
293         const __m128i C2 = _mm_add_epi64(A2, B2);
294         const __m128i C3 = _mm_add_epi64(A3, B3);
295         const __m128i D0 = _mm_add_epi64(C0, rounder);
296         const __m128i D1 = _mm_add_epi64(C1, rounder);
297         const __m128i D2 = _mm_add_epi64(C2, rounder);
298         const __m128i D3 = _mm_add_epi64(C3, rounder);
299         const __m128i E0 = _mm_srli_epi64(D0, WEBP_RESCALER_RFIX);
300         const __m128i E1 = _mm_srli_epi64(D1, WEBP_RESCALER_RFIX);
301         const __m128i E2 = _mm_srli_epi64(D2, WEBP_RESCALER_RFIX);
302         const __m128i E3 = _mm_srli_epi64(D3, WEBP_RESCALER_RFIX);
303         ProcessRow_SSE2(&E0, &E1, &E2, &E3, &mult, dst + x_out);
304       }
305     }
306     for (; x_out < x_out_max; ++x_out) {
307       const uint64_t I = (uint64_t)A * frow[x_out]
308                        + (uint64_t)B * irow[x_out];
309       const uint32_t J = (uint32_t)((I + ROUNDER) >> WEBP_RESCALER_RFIX);
310       const int v = (int)MULT_FIX(J, wrk->fy_scale);
311       assert(v >= 0 && v <= 255);
312       dst[x_out] = v;
313     }
314   }
315 }
316 
RescalerExportRowShrink_SSE2(WebPRescaler * const wrk)317 static void RescalerExportRowShrink_SSE2(WebPRescaler* const wrk) {
318   int x_out;
319   uint8_t* const dst = wrk->dst;
320   rescaler_t* const irow = wrk->irow;
321   const int x_out_max = wrk->dst_width * wrk->num_channels;
322   const rescaler_t* const frow = wrk->frow;
323   const uint32_t yscale = wrk->fy_scale * (-wrk->y_accum);
324   assert(!WebPRescalerOutputDone(wrk));
325   assert(wrk->y_accum <= 0);
326   assert(!wrk->y_expand);
327   if (yscale) {
328     const int scale_xy = wrk->fxy_scale;
329     const __m128i mult_xy = _mm_set_epi32(0, scale_xy, 0, scale_xy);
330     const __m128i mult_y = _mm_set_epi32(0, yscale, 0, yscale);
331     const __m128i rounder = _mm_set_epi32(0, ROUNDER, 0, ROUNDER);
332     for (x_out = 0; x_out + 8 <= x_out_max; x_out += 8) {
333       __m128i A0, A1, A2, A3, B0, B1, B2, B3;
334       LoadDispatchAndMult_SSE2(irow + x_out, NULL, &A0, &A1, &A2, &A3);
335       LoadDispatchAndMult_SSE2(frow + x_out, &mult_y, &B0, &B1, &B2, &B3);
336       {
337         const __m128i C0 = _mm_add_epi64(B0, rounder);
338         const __m128i C1 = _mm_add_epi64(B1, rounder);
339         const __m128i C2 = _mm_add_epi64(B2, rounder);
340         const __m128i C3 = _mm_add_epi64(B3, rounder);
341         const __m128i D0 = _mm_srli_epi64(C0, WEBP_RESCALER_RFIX);   // = frac
342         const __m128i D1 = _mm_srli_epi64(C1, WEBP_RESCALER_RFIX);
343         const __m128i D2 = _mm_srli_epi64(C2, WEBP_RESCALER_RFIX);
344         const __m128i D3 = _mm_srli_epi64(C3, WEBP_RESCALER_RFIX);
345         const __m128i E0 = _mm_sub_epi64(A0, D0);   // irow[x] - frac
346         const __m128i E1 = _mm_sub_epi64(A1, D1);
347         const __m128i E2 = _mm_sub_epi64(A2, D2);
348         const __m128i E3 = _mm_sub_epi64(A3, D3);
349         const __m128i F2 = _mm_slli_epi64(D2, 32);
350         const __m128i F3 = _mm_slli_epi64(D3, 32);
351         const __m128i G0 = _mm_or_si128(D0, F2);
352         const __m128i G1 = _mm_or_si128(D1, F3);
353         _mm_storeu_si128((__m128i*)(irow + x_out + 0), G0);
354         _mm_storeu_si128((__m128i*)(irow + x_out + 4), G1);
355         ProcessRow_Floor_SSE2(&E0, &E1, &E2, &E3, &mult_xy, dst + x_out);
356       }
357     }
358     for (; x_out < x_out_max; ++x_out) {
359       const uint32_t frac = (int)MULT_FIX(frow[x_out], yscale);
360       const int v = (int)MULT_FIX_FLOOR(irow[x_out] - frac, wrk->fxy_scale);
361       assert(v >= 0 && v <= 255);
362       dst[x_out] = v;
363       irow[x_out] = frac;   // new fractional start
364     }
365   } else {
366     const uint32_t scale = wrk->fxy_scale;
367     const __m128i mult = _mm_set_epi32(0, scale, 0, scale);
368     const __m128i zero = _mm_setzero_si128();
369     for (x_out = 0; x_out + 8 <= x_out_max; x_out += 8) {
370       __m128i A0, A1, A2, A3;
371       LoadDispatchAndMult_SSE2(irow + x_out, NULL, &A0, &A1, &A2, &A3);
372       _mm_storeu_si128((__m128i*)(irow + x_out + 0), zero);
373       _mm_storeu_si128((__m128i*)(irow + x_out + 4), zero);
374       ProcessRow_SSE2(&A0, &A1, &A2, &A3, &mult, dst + x_out);
375     }
376     for (; x_out < x_out_max; ++x_out) {
377       const int v = (int)MULT_FIX(irow[x_out], scale);
378       assert(v >= 0 && v <= 255);
379       dst[x_out] = v;
380       irow[x_out] = 0;
381     }
382   }
383 }
384 
385 #undef MULT_FIX_FLOOR
386 #undef MULT_FIX
387 #undef ROUNDER
388 
389 //------------------------------------------------------------------------------
390 
391 extern void WebPRescalerDspInitSSE2(void);
392 
WebPRescalerDspInitSSE2(void)393 WEBP_TSAN_IGNORE_FUNCTION void WebPRescalerDspInitSSE2(void) {
394   WebPRescalerImportRowExpand = RescalerImportRowExpand_SSE2;
395   WebPRescalerImportRowShrink = RescalerImportRowShrink_SSE2;
396   WebPRescalerExportRowExpand = RescalerExportRowExpand_SSE2;
397   WebPRescalerExportRowShrink = RescalerExportRowShrink_SSE2;
398 }
399 
400 #else  // !WEBP_USE_SSE2
401 
402 WEBP_DSP_INIT_STUB(WebPRescalerDspInitSSE2)
403 
404 #endif  // WEBP_USE_SSE2
405