1 /*
2  *  Copyright (c) 2012 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <smmintrin.h> /* SSE4.1 */
12 
13 #include "./vp8_rtcd.h"
14 #include "vp8/common/entropy.h" /* vp8_default_inv_zig_zag */
15 #include "vp8/encoder/block.h"
16 
17 #define SELECT_EOB(i, z, x, y, q)                         \
18   do {                                                    \
19     short boost = *zbin_boost_ptr;                        \
20     /* Technically _mm_extract_epi16() returns an int: */ \
21     /* https://bugs.llvm.org/show_bug.cgi?id=41657 */     \
22     short x_z = (short)_mm_extract_epi16(x, z);           \
23     short y_z = (short)_mm_extract_epi16(y, z);           \
24     int cmp = (x_z < boost) | (y_z == 0);                 \
25     zbin_boost_ptr++;                                     \
26     if (cmp) break;                                       \
27     q = _mm_insert_epi16(q, y_z, z);                      \
28     eob = i;                                              \
29     zbin_boost_ptr = b->zrun_zbin_boost;                  \
30   } while (0)
31 
vp8_regular_quantize_b_sse4_1(BLOCK * b,BLOCKD * d)32 void vp8_regular_quantize_b_sse4_1(BLOCK *b, BLOCKD *d) {
33   char eob = 0;
34   short *zbin_boost_ptr = b->zrun_zbin_boost;
35 
36   __m128i x0, x1, y0, y1, x_minus_zbin0, x_minus_zbin1, dqcoeff0, dqcoeff1;
37   __m128i quant_shift0 = _mm_load_si128((__m128i *)(b->quant_shift));
38   __m128i quant_shift1 = _mm_load_si128((__m128i *)(b->quant_shift + 8));
39   __m128i z0 = _mm_load_si128((__m128i *)(b->coeff));
40   __m128i z1 = _mm_load_si128((__m128i *)(b->coeff + 8));
41   __m128i zbin_extra = _mm_cvtsi32_si128(b->zbin_extra);
42   __m128i zbin0 = _mm_load_si128((__m128i *)(b->zbin));
43   __m128i zbin1 = _mm_load_si128((__m128i *)(b->zbin + 8));
44   __m128i round0 = _mm_load_si128((__m128i *)(b->round));
45   __m128i round1 = _mm_load_si128((__m128i *)(b->round + 8));
46   __m128i quant0 = _mm_load_si128((__m128i *)(b->quant));
47   __m128i quant1 = _mm_load_si128((__m128i *)(b->quant + 8));
48   __m128i dequant0 = _mm_load_si128((__m128i *)(d->dequant));
49   __m128i dequant1 = _mm_load_si128((__m128i *)(d->dequant + 8));
50   __m128i qcoeff0 = _mm_setzero_si128();
51   __m128i qcoeff1 = _mm_setzero_si128();
52 
53   /* Duplicate to all lanes. */
54   zbin_extra = _mm_shufflelo_epi16(zbin_extra, 0);
55   zbin_extra = _mm_unpacklo_epi16(zbin_extra, zbin_extra);
56 
57   /* x = abs(z) */
58   x0 = _mm_abs_epi16(z0);
59   x1 = _mm_abs_epi16(z1);
60 
61   /* zbin[] + zbin_extra */
62   zbin0 = _mm_add_epi16(zbin0, zbin_extra);
63   zbin1 = _mm_add_epi16(zbin1, zbin_extra);
64 
65   /* In C x is compared to zbin where zbin = zbin[] + boost + extra. Rebalance
66    * the equation because boost is the only value which can change:
67    * x - (zbin[] + extra) >= boost */
68   x_minus_zbin0 = _mm_sub_epi16(x0, zbin0);
69   x_minus_zbin1 = _mm_sub_epi16(x1, zbin1);
70 
71   /* All the remaining calculations are valid whether they are done now with
72    * simd or later inside the loop one at a time. */
73   x0 = _mm_add_epi16(x0, round0);
74   x1 = _mm_add_epi16(x1, round1);
75 
76   y0 = _mm_mulhi_epi16(x0, quant0);
77   y1 = _mm_mulhi_epi16(x1, quant1);
78 
79   y0 = _mm_add_epi16(y0, x0);
80   y1 = _mm_add_epi16(y1, x1);
81 
82   /* Instead of shifting each value independently we convert the scaling
83    * factor with 1 << (16 - shift) so we can use multiply/return high half. */
84   y0 = _mm_mulhi_epi16(y0, quant_shift0);
85   y1 = _mm_mulhi_epi16(y1, quant_shift1);
86 
87   /* Restore the sign. */
88   y0 = _mm_sign_epi16(y0, z0);
89   y1 = _mm_sign_epi16(y1, z1);
90 
91   /* The loop gets unrolled anyway. Avoid the vp8_default_zig_zag1d lookup. */
92   SELECT_EOB(1, 0, x_minus_zbin0, y0, qcoeff0);
93   SELECT_EOB(2, 1, x_minus_zbin0, y0, qcoeff0);
94   SELECT_EOB(3, 4, x_minus_zbin0, y0, qcoeff0);
95   SELECT_EOB(4, 0, x_minus_zbin1, y1, qcoeff1);
96   SELECT_EOB(5, 5, x_minus_zbin0, y0, qcoeff0);
97   SELECT_EOB(6, 2, x_minus_zbin0, y0, qcoeff0);
98   SELECT_EOB(7, 3, x_minus_zbin0, y0, qcoeff0);
99   SELECT_EOB(8, 6, x_minus_zbin0, y0, qcoeff0);
100   SELECT_EOB(9, 1, x_minus_zbin1, y1, qcoeff1);
101   SELECT_EOB(10, 4, x_minus_zbin1, y1, qcoeff1);
102   SELECT_EOB(11, 5, x_minus_zbin1, y1, qcoeff1);
103   SELECT_EOB(12, 2, x_minus_zbin1, y1, qcoeff1);
104   SELECT_EOB(13, 7, x_minus_zbin0, y0, qcoeff0);
105   SELECT_EOB(14, 3, x_minus_zbin1, y1, qcoeff1);
106   SELECT_EOB(15, 6, x_minus_zbin1, y1, qcoeff1);
107   SELECT_EOB(16, 7, x_minus_zbin1, y1, qcoeff1);
108 
109   _mm_store_si128((__m128i *)(d->qcoeff), qcoeff0);
110   _mm_store_si128((__m128i *)(d->qcoeff + 8), qcoeff1);
111 
112   dqcoeff0 = _mm_mullo_epi16(qcoeff0, dequant0);
113   dqcoeff1 = _mm_mullo_epi16(qcoeff1, dequant1);
114 
115   _mm_store_si128((__m128i *)(d->dqcoeff), dqcoeff0);
116   _mm_store_si128((__m128i *)(d->dqcoeff + 8), dqcoeff1);
117 
118   *d->eob = eob;
119 }
120