1 /*
2 * Copyright (c) 2017, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #include <tmmintrin.h>
13
14 #include "./aom_dsp_rtcd.h"
15
16 // -----------------------------------------------------------------------------
17 /*
18 ; ------------------------------------------
19 ; input: x, y, z, result
20 ;
21 ; trick from pascal
22 ; (x+2y+z+2)>>2 can be calculated as:
23 ; result = avg(x,z)
24 ; result -= xor(x,z) & 1
25 ; result = avg(result,y)
26 ; ------------------------------------------
27 */
avg3_epu16(const __m128i * x,const __m128i * y,const __m128i * z)28 static INLINE __m128i avg3_epu16(const __m128i *x, const __m128i *y,
29 const __m128i *z) {
30 const __m128i one = _mm_set1_epi16(1);
31 const __m128i a = _mm_avg_epu16(*x, *z);
32 const __m128i b =
33 _mm_subs_epu16(a, _mm_and_si128(_mm_xor_si128(*x, *z), one));
34 return _mm_avg_epu16(b, *y);
35 }
36
37 DECLARE_ALIGNED(16, static const uint8_t, rotate_right_epu16[16]) = {
38 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1
39 };
40
rotr_epu16(__m128i * a,const __m128i * rotrw)41 static INLINE __m128i rotr_epu16(__m128i *a, const __m128i *rotrw) {
42 *a = _mm_shuffle_epi8(*a, *rotrw);
43 return *a;
44 }
45
aom_highbd_d117_predictor_8x8_ssse3(uint16_t * dst,ptrdiff_t stride,const uint16_t * above,const uint16_t * left,int bd)46 void aom_highbd_d117_predictor_8x8_ssse3(uint16_t *dst, ptrdiff_t stride,
47 const uint16_t *above,
48 const uint16_t *left, int bd) {
49 const __m128i rotrw = _mm_load_si128((const __m128i *)rotate_right_epu16);
50 const __m128i XABCDEFG = _mm_loadu_si128((const __m128i *)(above - 1));
51 const __m128i ABCDEFGH = _mm_load_si128((const __m128i *)above);
52 const __m128i IJKLMNOP = _mm_load_si128((const __m128i *)left);
53 const __m128i IXABCDEF =
54 _mm_alignr_epi8(XABCDEFG, _mm_slli_si128(IJKLMNOP, 14), 14);
55 const __m128i avg3 = avg3_epu16(&ABCDEFGH, &XABCDEFG, &IXABCDEF);
56 const __m128i avg2 = _mm_avg_epu16(ABCDEFGH, XABCDEFG);
57 const __m128i XIJKLMNO =
58 _mm_alignr_epi8(IJKLMNOP, _mm_slli_si128(XABCDEFG, 14), 14);
59 const __m128i JKLMNOP0 = _mm_srli_si128(IJKLMNOP, 2);
60 __m128i avg3_left = avg3_epu16(&XIJKLMNO, &IJKLMNOP, &JKLMNOP0);
61 __m128i rowa = avg2;
62 __m128i rowb = avg3;
63 int i;
64 (void)bd;
65 for (i = 0; i < 8; i += 2) {
66 _mm_store_si128((__m128i *)dst, rowa);
67 dst += stride;
68 _mm_store_si128((__m128i *)dst, rowb);
69 dst += stride;
70 rowa = _mm_alignr_epi8(rowa, rotr_epu16(&avg3_left, &rotrw), 14);
71 rowb = _mm_alignr_epi8(rowb, rotr_epu16(&avg3_left, &rotrw), 14);
72 }
73 }
74
aom_highbd_d117_predictor_16x16_ssse3(uint16_t * dst,ptrdiff_t stride,const uint16_t * above,const uint16_t * left,int bd)75 void aom_highbd_d117_predictor_16x16_ssse3(uint16_t *dst, ptrdiff_t stride,
76 const uint16_t *above,
77 const uint16_t *left, int bd) {
78 const __m128i rotrw = _mm_load_si128((const __m128i *)rotate_right_epu16);
79 const __m128i B0 = _mm_loadu_si128((const __m128i *)(above - 1));
80 const __m128i A0 = _mm_load_si128((const __m128i *)above);
81 const __m128i B1 = _mm_loadu_si128((const __m128i *)(above + 7));
82 const __m128i A1 = _mm_load_si128((const __m128i *)(above + 8));
83 const __m128i avg2_0 = _mm_avg_epu16(A0, B0);
84 const __m128i avg2_1 = _mm_avg_epu16(A1, B1);
85 const __m128i L0 = _mm_load_si128((const __m128i *)left);
86 const __m128i L1 = _mm_load_si128((const __m128i *)(left + 8));
87 const __m128i C0 = _mm_alignr_epi8(B0, _mm_slli_si128(L0, 14), 14);
88 const __m128i C1 = _mm_alignr_epi8(B1, B0, 14);
89 const __m128i avg3_0 = avg3_epu16(&A0, &B0, &C0);
90 const __m128i avg3_1 = avg3_epu16(&A1, &B1, &C1);
91 const __m128i XL0 = _mm_alignr_epi8(L0, _mm_slli_si128(B0, 14), 14);
92 const __m128i XL1 = _mm_alignr_epi8(L1, L0, 14);
93 const __m128i L0_ = _mm_alignr_epi8(L1, L0, 2);
94 const __m128i L1_ = _mm_srli_si128(L1, 2);
95 __m128i rowa_0 = avg2_0;
96 __m128i rowa_1 = avg2_1;
97 __m128i rowb_0 = avg3_0;
98 __m128i rowb_1 = avg3_1;
99 __m128i avg3_left[2];
100 int i, j;
101 (void)bd;
102 avg3_left[0] = avg3_epu16(&XL0, &L0, &L0_);
103 avg3_left[1] = avg3_epu16(&XL1, &L1, &L1_);
104 for (i = 0; i < 2; ++i) {
105 __m128i avg_left = avg3_left[i];
106 for (j = 0; j < 8; j += 2) {
107 _mm_store_si128((__m128i *)dst, rowa_0);
108 _mm_store_si128((__m128i *)(dst + 8), rowa_1);
109 dst += stride;
110 _mm_store_si128((__m128i *)dst, rowb_0);
111 _mm_store_si128((__m128i *)(dst + 8), rowb_1);
112 dst += stride;
113 rowa_1 = _mm_alignr_epi8(rowa_1, rowa_0, 14);
114 rowa_0 = _mm_alignr_epi8(rowa_0, rotr_epu16(&avg_left, &rotrw), 14);
115 rowb_1 = _mm_alignr_epi8(rowb_1, rowb_0, 14);
116 rowb_0 = _mm_alignr_epi8(rowb_0, rotr_epu16(&avg_left, &rotrw), 14);
117 }
118 }
119 }
120
aom_highbd_d117_predictor_32x32_ssse3(uint16_t * dst,ptrdiff_t stride,const uint16_t * above,const uint16_t * left,int bd)121 void aom_highbd_d117_predictor_32x32_ssse3(uint16_t *dst, ptrdiff_t stride,
122 const uint16_t *above,
123 const uint16_t *left, int bd) {
124 const __m128i rotrw = _mm_load_si128((const __m128i *)rotate_right_epu16);
125 const __m128i A0 = _mm_load_si128((const __m128i *)above);
126 const __m128i A1 = _mm_load_si128((const __m128i *)(above + 8));
127 const __m128i A2 = _mm_load_si128((const __m128i *)(above + 16));
128 const __m128i A3 = _mm_load_si128((const __m128i *)(above + 24));
129 const __m128i B0 = _mm_loadu_si128((const __m128i *)(above - 1));
130 const __m128i B1 = _mm_loadu_si128((const __m128i *)(above + 7));
131 const __m128i B2 = _mm_loadu_si128((const __m128i *)(above + 15));
132 const __m128i B3 = _mm_loadu_si128((const __m128i *)(above + 23));
133 const __m128i avg2_0 = _mm_avg_epu16(A0, B0);
134 const __m128i avg2_1 = _mm_avg_epu16(A1, B1);
135 const __m128i avg2_2 = _mm_avg_epu16(A2, B2);
136 const __m128i avg2_3 = _mm_avg_epu16(A3, B3);
137 const __m128i L0 = _mm_load_si128((const __m128i *)left);
138 const __m128i L1 = _mm_load_si128((const __m128i *)(left + 8));
139 const __m128i L2 = _mm_load_si128((const __m128i *)(left + 16));
140 const __m128i L3 = _mm_load_si128((const __m128i *)(left + 24));
141 const __m128i C0 = _mm_alignr_epi8(B0, _mm_slli_si128(L0, 14), 14);
142 const __m128i C1 = _mm_alignr_epi8(B1, B0, 14);
143 const __m128i C2 = _mm_alignr_epi8(B2, B1, 14);
144 const __m128i C3 = _mm_alignr_epi8(B3, B2, 14);
145 const __m128i avg3_0 = avg3_epu16(&A0, &B0, &C0);
146 const __m128i avg3_1 = avg3_epu16(&A1, &B1, &C1);
147 const __m128i avg3_2 = avg3_epu16(&A2, &B2, &C2);
148 const __m128i avg3_3 = avg3_epu16(&A3, &B3, &C3);
149 const __m128i XL0 = _mm_alignr_epi8(L0, _mm_slli_si128(B0, 14), 14);
150 const __m128i XL1 = _mm_alignr_epi8(L1, L0, 14);
151 const __m128i XL2 = _mm_alignr_epi8(L2, L1, 14);
152 const __m128i XL3 = _mm_alignr_epi8(L3, L2, 14);
153 const __m128i L0_ = _mm_alignr_epi8(L1, L0, 2);
154 const __m128i L1_ = _mm_alignr_epi8(L2, L1, 2);
155 const __m128i L2_ = _mm_alignr_epi8(L3, L2, 2);
156 const __m128i L3_ = _mm_srli_si128(L3, 2);
157 __m128i rowa_0 = avg2_0;
158 __m128i rowa_1 = avg2_1;
159 __m128i rowa_2 = avg2_2;
160 __m128i rowa_3 = avg2_3;
161 __m128i rowb_0 = avg3_0;
162 __m128i rowb_1 = avg3_1;
163 __m128i rowb_2 = avg3_2;
164 __m128i rowb_3 = avg3_3;
165 __m128i avg3_left[4];
166 int i, j;
167 (void)bd;
168 avg3_left[0] = avg3_epu16(&XL0, &L0, &L0_);
169 avg3_left[1] = avg3_epu16(&XL1, &L1, &L1_);
170 avg3_left[2] = avg3_epu16(&XL2, &L2, &L2_);
171 avg3_left[3] = avg3_epu16(&XL3, &L3, &L3_);
172 for (i = 0; i < 4; ++i) {
173 __m128i avg_left = avg3_left[i];
174 for (j = 0; j < 8; j += 2) {
175 _mm_store_si128((__m128i *)dst, rowa_0);
176 _mm_store_si128((__m128i *)(dst + 8), rowa_1);
177 _mm_store_si128((__m128i *)(dst + 16), rowa_2);
178 _mm_store_si128((__m128i *)(dst + 24), rowa_3);
179 dst += stride;
180 _mm_store_si128((__m128i *)dst, rowb_0);
181 _mm_store_si128((__m128i *)(dst + 8), rowb_1);
182 _mm_store_si128((__m128i *)(dst + 16), rowb_2);
183 _mm_store_si128((__m128i *)(dst + 24), rowb_3);
184 dst += stride;
185 rowa_3 = _mm_alignr_epi8(rowa_3, rowa_2, 14);
186 rowa_2 = _mm_alignr_epi8(rowa_2, rowa_1, 14);
187 rowa_1 = _mm_alignr_epi8(rowa_1, rowa_0, 14);
188 rowa_0 = _mm_alignr_epi8(rowa_0, rotr_epu16(&avg_left, &rotrw), 14);
189 rowb_3 = _mm_alignr_epi8(rowb_3, rowb_2, 14);
190 rowb_2 = _mm_alignr_epi8(rowb_2, rowb_1, 14);
191 rowb_1 = _mm_alignr_epi8(rowb_1, rowb_0, 14);
192 rowb_0 = _mm_alignr_epi8(rowb_0, rotr_epu16(&avg_left, &rotrw), 14);
193 }
194 }
195 }
196
aom_highbd_d135_predictor_8x8_ssse3(uint16_t * dst,ptrdiff_t stride,const uint16_t * above,const uint16_t * left,int bd)197 void aom_highbd_d135_predictor_8x8_ssse3(uint16_t *dst, ptrdiff_t stride,
198 const uint16_t *above,
199 const uint16_t *left, int bd) {
200 const __m128i rotrw = _mm_load_si128((const __m128i *)rotate_right_epu16);
201 const __m128i XABCDEFG = _mm_loadu_si128((const __m128i *)(above - 1));
202 const __m128i ABCDEFGH = _mm_load_si128((const __m128i *)above);
203 const __m128i BCDEFGH0 = _mm_srli_si128(ABCDEFGH, 2);
204 const __m128i IJKLMNOP = _mm_load_si128((const __m128i *)left);
205 const __m128i XIJKLMNO =
206 _mm_alignr_epi8(IJKLMNOP, _mm_slli_si128(XABCDEFG, 14), 14);
207 const __m128i AXIJKLMN =
208 _mm_alignr_epi8(XIJKLMNO, _mm_slli_si128(ABCDEFGH, 14), 14);
209 const __m128i avg3 = avg3_epu16(&XABCDEFG, &ABCDEFGH, &BCDEFGH0);
210 __m128i avg3_left = avg3_epu16(&IJKLMNOP, &XIJKLMNO, &AXIJKLMN);
211 __m128i rowa = avg3;
212 int i;
213 (void)bd;
214 for (i = 0; i < 8; ++i) {
215 rowa = _mm_alignr_epi8(rowa, rotr_epu16(&avg3_left, &rotrw), 14);
216 _mm_store_si128((__m128i *)dst, rowa);
217 dst += stride;
218 }
219 }
220
aom_highbd_d135_predictor_16x16_ssse3(uint16_t * dst,ptrdiff_t stride,const uint16_t * above,const uint16_t * left,int bd)221 void aom_highbd_d135_predictor_16x16_ssse3(uint16_t *dst, ptrdiff_t stride,
222 const uint16_t *above,
223 const uint16_t *left, int bd) {
224 const __m128i rotrw = _mm_load_si128((const __m128i *)rotate_right_epu16);
225 const __m128i A0 = _mm_loadu_si128((const __m128i *)(above - 1));
226 const __m128i B0 = _mm_load_si128((const __m128i *)above);
227 const __m128i A1 = _mm_loadu_si128((const __m128i *)(above + 7));
228 const __m128i B1 = _mm_load_si128((const __m128i *)(above + 8));
229 const __m128i L0 = _mm_load_si128((const __m128i *)left);
230 const __m128i L1 = _mm_load_si128((const __m128i *)(left + 8));
231 const __m128i C0 = _mm_alignr_epi8(B1, B0, 2);
232 const __m128i C1 = _mm_srli_si128(B1, 2);
233 const __m128i avg3_0 = avg3_epu16(&A0, &B0, &C0);
234 const __m128i avg3_1 = avg3_epu16(&A1, &B1, &C1);
235 const __m128i XL0 = _mm_alignr_epi8(L0, _mm_slli_si128(A0, 14), 14);
236 const __m128i XL1 = _mm_alignr_epi8(L1, L0, 14);
237 const __m128i L0_ = _mm_alignr_epi8(XL0, _mm_slli_si128(B0, 14), 14);
238 const __m128i L1_ = _mm_alignr_epi8(XL1, XL0, 14);
239 __m128i rowa_0 = avg3_0;
240 __m128i rowa_1 = avg3_1;
241 __m128i avg3_left[2];
242 int i, j;
243 (void)bd;
244 avg3_left[0] = avg3_epu16(&L0, &XL0, &L0_);
245 avg3_left[1] = avg3_epu16(&L1, &XL1, &L1_);
246 for (i = 0; i < 2; ++i) {
247 __m128i avg_left = avg3_left[i];
248 for (j = 0; j < 8; ++j) {
249 rowa_1 = _mm_alignr_epi8(rowa_1, rowa_0, 14);
250 rowa_0 = _mm_alignr_epi8(rowa_0, rotr_epu16(&avg_left, &rotrw), 14);
251 _mm_store_si128((__m128i *)dst, rowa_0);
252 _mm_store_si128((__m128i *)(dst + 8), rowa_1);
253 dst += stride;
254 }
255 }
256 }
257
aom_highbd_d135_predictor_32x32_ssse3(uint16_t * dst,ptrdiff_t stride,const uint16_t * above,const uint16_t * left,int bd)258 void aom_highbd_d135_predictor_32x32_ssse3(uint16_t *dst, ptrdiff_t stride,
259 const uint16_t *above,
260 const uint16_t *left, int bd) {
261 const __m128i rotrw = _mm_load_si128((const __m128i *)rotate_right_epu16);
262 const __m128i A0 = _mm_loadu_si128((const __m128i *)(above - 1));
263 const __m128i A1 = _mm_loadu_si128((const __m128i *)(above + 7));
264 const __m128i A2 = _mm_loadu_si128((const __m128i *)(above + 15));
265 const __m128i A3 = _mm_loadu_si128((const __m128i *)(above + 23));
266 const __m128i B0 = _mm_load_si128((const __m128i *)above);
267 const __m128i B1 = _mm_load_si128((const __m128i *)(above + 8));
268 const __m128i B2 = _mm_load_si128((const __m128i *)(above + 16));
269 const __m128i B3 = _mm_load_si128((const __m128i *)(above + 24));
270 const __m128i L0 = _mm_load_si128((const __m128i *)left);
271 const __m128i L1 = _mm_load_si128((const __m128i *)(left + 8));
272 const __m128i L2 = _mm_load_si128((const __m128i *)(left + 16));
273 const __m128i L3 = _mm_load_si128((const __m128i *)(left + 24));
274 const __m128i C0 = _mm_alignr_epi8(B1, B0, 2);
275 const __m128i C1 = _mm_alignr_epi8(B2, B1, 2);
276 const __m128i C2 = _mm_alignr_epi8(B3, B2, 2);
277 const __m128i C3 = _mm_srli_si128(B3, 2);
278 const __m128i avg3_0 = avg3_epu16(&A0, &B0, &C0);
279 const __m128i avg3_1 = avg3_epu16(&A1, &B1, &C1);
280 const __m128i avg3_2 = avg3_epu16(&A2, &B2, &C2);
281 const __m128i avg3_3 = avg3_epu16(&A3, &B3, &C3);
282 const __m128i XL0 = _mm_alignr_epi8(L0, _mm_slli_si128(A0, 14), 14);
283 const __m128i XL1 = _mm_alignr_epi8(L1, L0, 14);
284 const __m128i XL2 = _mm_alignr_epi8(L2, L1, 14);
285 const __m128i XL3 = _mm_alignr_epi8(L3, L2, 14);
286 const __m128i L0_ = _mm_alignr_epi8(XL0, _mm_slli_si128(B0, 14), 14);
287 const __m128i L1_ = _mm_alignr_epi8(XL1, XL0, 14);
288 const __m128i L2_ = _mm_alignr_epi8(XL2, XL1, 14);
289 const __m128i L3_ = _mm_alignr_epi8(XL3, XL2, 14);
290 __m128i rowa_0 = avg3_0;
291 __m128i rowa_1 = avg3_1;
292 __m128i rowa_2 = avg3_2;
293 __m128i rowa_3 = avg3_3;
294 __m128i avg3_left[4];
295 int i, j;
296 (void)bd;
297 avg3_left[0] = avg3_epu16(&L0, &XL0, &L0_);
298 avg3_left[1] = avg3_epu16(&L1, &XL1, &L1_);
299 avg3_left[2] = avg3_epu16(&L2, &XL2, &L2_);
300 avg3_left[3] = avg3_epu16(&L3, &XL3, &L3_);
301 for (i = 0; i < 4; ++i) {
302 __m128i avg_left = avg3_left[i];
303 for (j = 0; j < 8; ++j) {
304 rowa_3 = _mm_alignr_epi8(rowa_3, rowa_2, 14);
305 rowa_2 = _mm_alignr_epi8(rowa_2, rowa_1, 14);
306 rowa_1 = _mm_alignr_epi8(rowa_1, rowa_0, 14);
307 rowa_0 = _mm_alignr_epi8(rowa_0, rotr_epu16(&avg_left, &rotrw), 14);
308 _mm_store_si128((__m128i *)dst, rowa_0);
309 _mm_store_si128((__m128i *)(dst + 8), rowa_1);
310 _mm_store_si128((__m128i *)(dst + 16), rowa_2);
311 _mm_store_si128((__m128i *)(dst + 24), rowa_3);
312 dst += stride;
313 }
314 }
315 }
316
aom_highbd_d153_predictor_8x8_ssse3(uint16_t * dst,ptrdiff_t stride,const uint16_t * above,const uint16_t * left,int bd)317 void aom_highbd_d153_predictor_8x8_ssse3(uint16_t *dst, ptrdiff_t stride,
318 const uint16_t *above,
319 const uint16_t *left, int bd) {
320 const __m128i XABCDEFG = _mm_loadu_si128((const __m128i *)(above - 1));
321 const __m128i ABCDEFG0 = _mm_srli_si128(XABCDEFG, 2);
322 const __m128i BCDEFG00 = _mm_srli_si128(XABCDEFG, 4);
323 const __m128i avg3 = avg3_epu16(&BCDEFG00, &ABCDEFG0, &XABCDEFG);
324 const __m128i IJKLMNOP = _mm_load_si128((const __m128i *)left);
325 const __m128i XIJKLMNO =
326 _mm_alignr_epi8(IJKLMNOP, _mm_slli_si128(XABCDEFG, 14), 14);
327 const __m128i AXIJKLMN =
328 _mm_alignr_epi8(XIJKLMNO, _mm_slli_si128(XABCDEFG, 12), 14);
329 const __m128i avg3_left = avg3_epu16(&IJKLMNOP, &XIJKLMNO, &AXIJKLMN);
330 const __m128i avg2_left = _mm_avg_epu16(IJKLMNOP, XIJKLMNO);
331 const __m128i avg2_avg3_lo = _mm_unpacklo_epi16(avg2_left, avg3_left);
332 const __m128i avg2_avg3_hi = _mm_unpackhi_epi16(avg2_left, avg3_left);
333 const __m128i row0 =
334 _mm_alignr_epi8(avg3, _mm_slli_si128(avg2_avg3_lo, 12), 12);
335 const __m128i row1 =
336 _mm_alignr_epi8(row0, _mm_slli_si128(avg2_avg3_lo, 8), 12);
337 const __m128i row2 =
338 _mm_alignr_epi8(row1, _mm_slli_si128(avg2_avg3_lo, 4), 12);
339 const __m128i row3 = _mm_alignr_epi8(row2, avg2_avg3_lo, 12);
340 const __m128i row4 =
341 _mm_alignr_epi8(row3, _mm_slli_si128(avg2_avg3_hi, 12), 12);
342 const __m128i row5 =
343 _mm_alignr_epi8(row4, _mm_slli_si128(avg2_avg3_hi, 8), 12);
344 const __m128i row6 =
345 _mm_alignr_epi8(row5, _mm_slli_si128(avg2_avg3_hi, 4), 12);
346 const __m128i row7 = _mm_alignr_epi8(row6, avg2_avg3_hi, 12);
347 (void)bd;
348 _mm_store_si128((__m128i *)dst, row0);
349 dst += stride;
350 _mm_store_si128((__m128i *)dst, row1);
351 dst += stride;
352 _mm_store_si128((__m128i *)dst, row2);
353 dst += stride;
354 _mm_store_si128((__m128i *)dst, row3);
355 dst += stride;
356 _mm_store_si128((__m128i *)dst, row4);
357 dst += stride;
358 _mm_store_si128((__m128i *)dst, row5);
359 dst += stride;
360 _mm_store_si128((__m128i *)dst, row6);
361 dst += stride;
362 _mm_store_si128((__m128i *)dst, row7);
363 }
364
aom_highbd_d153_predictor_16x16_ssse3(uint16_t * dst,ptrdiff_t stride,const uint16_t * above,const uint16_t * left,int bd)365 void aom_highbd_d153_predictor_16x16_ssse3(uint16_t *dst, ptrdiff_t stride,
366 const uint16_t *above,
367 const uint16_t *left, int bd) {
368 const __m128i A0 = _mm_loadu_si128((const __m128i *)(above - 1));
369 const __m128i A1 = _mm_loadu_si128((const __m128i *)(above + 7));
370 const __m128i B0 = _mm_alignr_epi8(A1, A0, 2);
371 const __m128i B1 = _mm_srli_si128(A1, 2);
372 const __m128i C0 = _mm_alignr_epi8(A1, A0, 4);
373 const __m128i C1 = _mm_srli_si128(A1, 4);
374 const __m128i avg3_0 = avg3_epu16(&A0, &B0, &C0);
375 const __m128i avg3_1 = avg3_epu16(&A1, &B1, &C1);
376 const __m128i L0 = _mm_load_si128((const __m128i *)left);
377 const __m128i L1 = _mm_load_si128((const __m128i *)(left + 8));
378 const __m128i XL0 = _mm_alignr_epi8(L0, _mm_slli_si128(A0, 14), 14);
379 const __m128i AXL0 = _mm_alignr_epi8(XL0, _mm_slli_si128(A0, 12), 14);
380 const __m128i XL1 = _mm_alignr_epi8(L1, L0, 14);
381 const __m128i AXL1 = _mm_alignr_epi8(L1, L0, 12);
382 const __m128i avg3_left_0 = avg3_epu16(&L0, &XL0, &AXL0);
383 const __m128i avg2_left_0 = _mm_avg_epu16(L0, XL0);
384 const __m128i avg3_left_1 = avg3_epu16(&L1, &XL1, &AXL1);
385 const __m128i avg2_left_1 = _mm_avg_epu16(L1, XL1);
386 __m128i row_0 = avg3_0;
387 __m128i row_1 = avg3_1;
388 __m128i avg2_avg3_left[2][2];
389 int i, j;
390 (void)bd;
391
392 avg2_avg3_left[0][0] = _mm_unpacklo_epi16(avg2_left_0, avg3_left_0);
393 avg2_avg3_left[0][1] = _mm_unpackhi_epi16(avg2_left_0, avg3_left_0);
394 avg2_avg3_left[1][0] = _mm_unpacklo_epi16(avg2_left_1, avg3_left_1);
395 avg2_avg3_left[1][1] = _mm_unpackhi_epi16(avg2_left_1, avg3_left_1);
396
397 for (j = 0; j < 2; ++j) {
398 for (i = 0; i < 2; ++i) {
399 const __m128i avg2_avg3 = avg2_avg3_left[j][i];
400 row_1 = _mm_alignr_epi8(row_1, row_0, 12);
401 row_0 = _mm_alignr_epi8(row_0, _mm_slli_si128(avg2_avg3, 12), 12);
402 _mm_store_si128((__m128i *)dst, row_0);
403 _mm_store_si128((__m128i *)(dst + 8), row_1);
404 dst += stride;
405 row_1 = _mm_alignr_epi8(row_1, row_0, 12);
406 row_0 = _mm_alignr_epi8(row_0, _mm_slli_si128(avg2_avg3, 8), 12);
407 _mm_store_si128((__m128i *)dst, row_0);
408 _mm_store_si128((__m128i *)(dst + 8), row_1);
409 dst += stride;
410 row_1 = _mm_alignr_epi8(row_1, row_0, 12);
411 row_0 = _mm_alignr_epi8(row_0, _mm_slli_si128(avg2_avg3, 4), 12);
412 _mm_store_si128((__m128i *)dst, row_0);
413 _mm_store_si128((__m128i *)(dst + 8), row_1);
414 dst += stride;
415 row_1 = _mm_alignr_epi8(row_1, row_0, 12);
416 row_0 = _mm_alignr_epi8(row_0, avg2_avg3, 12);
417 _mm_store_si128((__m128i *)dst, row_0);
418 _mm_store_si128((__m128i *)(dst + 8), row_1);
419 dst += stride;
420 }
421 }
422 }
423
aom_highbd_d153_predictor_32x32_ssse3(uint16_t * dst,ptrdiff_t stride,const uint16_t * above,const uint16_t * left,int bd)424 void aom_highbd_d153_predictor_32x32_ssse3(uint16_t *dst, ptrdiff_t stride,
425 const uint16_t *above,
426 const uint16_t *left, int bd) {
427 const __m128i A0 = _mm_loadu_si128((const __m128i *)(above - 1));
428 const __m128i A1 = _mm_loadu_si128((const __m128i *)(above + 7));
429 const __m128i A2 = _mm_loadu_si128((const __m128i *)(above + 15));
430 const __m128i A3 = _mm_loadu_si128((const __m128i *)(above + 23));
431 const __m128i B0 = _mm_alignr_epi8(A1, A0, 2);
432 const __m128i B1 = _mm_alignr_epi8(A2, A1, 2);
433 const __m128i B2 = _mm_alignr_epi8(A3, A2, 2);
434 const __m128i B3 = _mm_srli_si128(A3, 2);
435 const __m128i C0 = _mm_alignr_epi8(A1, A0, 4);
436 const __m128i C1 = _mm_alignr_epi8(A2, A1, 4);
437 const __m128i C2 = _mm_alignr_epi8(A3, A2, 4);
438 const __m128i C3 = _mm_srli_si128(A3, 4);
439 const __m128i avg3_0 = avg3_epu16(&A0, &B0, &C0);
440 const __m128i avg3_1 = avg3_epu16(&A1, &B1, &C1);
441 const __m128i avg3_2 = avg3_epu16(&A2, &B2, &C2);
442 const __m128i avg3_3 = avg3_epu16(&A3, &B3, &C3);
443 const __m128i L0 = _mm_load_si128((const __m128i *)left);
444 const __m128i L1 = _mm_load_si128((const __m128i *)(left + 8));
445 const __m128i L2 = _mm_load_si128((const __m128i *)(left + 16));
446 const __m128i L3 = _mm_load_si128((const __m128i *)(left + 24));
447 const __m128i XL0 = _mm_alignr_epi8(L0, _mm_slli_si128(A0, 14), 14);
448 const __m128i XL1 = _mm_alignr_epi8(L1, L0, 14);
449 const __m128i XL2 = _mm_alignr_epi8(L2, L1, 14);
450 const __m128i XL3 = _mm_alignr_epi8(L3, L2, 14);
451 const __m128i AXL0 = _mm_alignr_epi8(XL0, _mm_slli_si128(A0, 12), 14);
452 const __m128i AXL1 = _mm_alignr_epi8(L1, L0, 12);
453 const __m128i AXL2 = _mm_alignr_epi8(L2, L1, 12);
454 const __m128i AXL3 = _mm_alignr_epi8(L3, L2, 12);
455 const __m128i avg3_left_0 = avg3_epu16(&L0, &XL0, &AXL0);
456 const __m128i avg3_left_1 = avg3_epu16(&L1, &XL1, &AXL1);
457 const __m128i avg3_left_2 = avg3_epu16(&L2, &XL2, &AXL2);
458 const __m128i avg3_left_3 = avg3_epu16(&L3, &XL3, &AXL3);
459 const __m128i avg2_left_0 = _mm_avg_epu16(L0, XL0);
460 const __m128i avg2_left_1 = _mm_avg_epu16(L1, XL1);
461 const __m128i avg2_left_2 = _mm_avg_epu16(L2, XL2);
462 const __m128i avg2_left_3 = _mm_avg_epu16(L3, XL3);
463 __m128i row_0 = avg3_0;
464 __m128i row_1 = avg3_1;
465 __m128i row_2 = avg3_2;
466 __m128i row_3 = avg3_3;
467 __m128i avg2_avg3_left[4][2];
468 int i, j;
469 (void)bd;
470
471 avg2_avg3_left[0][0] = _mm_unpacklo_epi16(avg2_left_0, avg3_left_0);
472 avg2_avg3_left[0][1] = _mm_unpackhi_epi16(avg2_left_0, avg3_left_0);
473 avg2_avg3_left[1][0] = _mm_unpacklo_epi16(avg2_left_1, avg3_left_1);
474 avg2_avg3_left[1][1] = _mm_unpackhi_epi16(avg2_left_1, avg3_left_1);
475 avg2_avg3_left[2][0] = _mm_unpacklo_epi16(avg2_left_2, avg3_left_2);
476 avg2_avg3_left[2][1] = _mm_unpackhi_epi16(avg2_left_2, avg3_left_2);
477 avg2_avg3_left[3][0] = _mm_unpacklo_epi16(avg2_left_3, avg3_left_3);
478 avg2_avg3_left[3][1] = _mm_unpackhi_epi16(avg2_left_3, avg3_left_3);
479
480 for (j = 0; j < 4; ++j) {
481 for (i = 0; i < 2; ++i) {
482 const __m128i avg2_avg3 = avg2_avg3_left[j][i];
483 row_3 = _mm_alignr_epi8(row_3, row_2, 12);
484 row_2 = _mm_alignr_epi8(row_2, row_1, 12);
485 row_1 = _mm_alignr_epi8(row_1, row_0, 12);
486 row_0 = _mm_alignr_epi8(row_0, _mm_slli_si128(avg2_avg3, 12), 12);
487 _mm_store_si128((__m128i *)dst, row_0);
488 _mm_store_si128((__m128i *)(dst + 8), row_1);
489 _mm_store_si128((__m128i *)(dst + 16), row_2);
490 _mm_store_si128((__m128i *)(dst + 24), row_3);
491 dst += stride;
492 row_3 = _mm_alignr_epi8(row_3, row_2, 12);
493 row_2 = _mm_alignr_epi8(row_2, row_1, 12);
494 row_1 = _mm_alignr_epi8(row_1, row_0, 12);
495 row_0 = _mm_alignr_epi8(row_0, _mm_slli_si128(avg2_avg3, 8), 12);
496 _mm_store_si128((__m128i *)dst, row_0);
497 _mm_store_si128((__m128i *)(dst + 8), row_1);
498 _mm_store_si128((__m128i *)(dst + 16), row_2);
499 _mm_store_si128((__m128i *)(dst + 24), row_3);
500 dst += stride;
501 row_3 = _mm_alignr_epi8(row_3, row_2, 12);
502 row_2 = _mm_alignr_epi8(row_2, row_1, 12);
503 row_1 = _mm_alignr_epi8(row_1, row_0, 12);
504 row_0 = _mm_alignr_epi8(row_0, _mm_slli_si128(avg2_avg3, 4), 12);
505 _mm_store_si128((__m128i *)dst, row_0);
506 _mm_store_si128((__m128i *)(dst + 8), row_1);
507 _mm_store_si128((__m128i *)(dst + 16), row_2);
508 _mm_store_si128((__m128i *)(dst + 24), row_3);
509 dst += stride;
510 row_3 = _mm_alignr_epi8(row_3, row_2, 12);
511 row_2 = _mm_alignr_epi8(row_2, row_1, 12);
512 row_1 = _mm_alignr_epi8(row_1, row_0, 12);
513 row_0 = _mm_alignr_epi8(row_0, avg2_avg3, 12);
514 _mm_store_si128((__m128i *)dst, row_0);
515 _mm_store_si128((__m128i *)(dst + 8), row_1);
516 _mm_store_si128((__m128i *)(dst + 16), row_2);
517 _mm_store_si128((__m128i *)(dst + 24), row_3);
518 dst += stride;
519 }
520 }
521 }
522