1 /*
2  * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 
12 #include <tmmintrin.h>
13 
14 #include "config/aom_dsp_rtcd.h"
15 
16 #include "aom_dsp/aom_filter.h"
17 #include "aom_dsp/x86/convolve.h"
18 #include "aom_dsp/x86/convolve_sse2.h"
19 #include "aom_dsp/x86/convolve_ssse3.h"
20 #include "aom_dsp/x86/mem_sse2.h"
21 #include "aom_dsp/x86/transpose_sse2.h"
22 #include "aom_mem/aom_mem.h"
23 #include "aom_ports/mem.h"
24 #include "aom_ports/emmintrin_compat.h"
25 
26 // filters only for the 4_h8 convolution
27 DECLARE_ALIGNED(16, static const uint8_t, filt1_4_h8[16]) = { 0, 1, 1, 2, 2, 3,
28                                                               3, 4, 2, 3, 3, 4,
29                                                               4, 5, 5, 6 };
30 
31 DECLARE_ALIGNED(16, static const uint8_t, filt2_4_h8[16]) = { 4, 5, 5, 6, 6, 7,
32                                                               7, 8, 6, 7, 7, 8,
33                                                               8, 9, 9, 10 };
34 
35 // filters for 8_h8 and 16_h8
36 DECLARE_ALIGNED(16, static const uint8_t,
37                 filt1_global[16]) = { 0, 1, 1, 2, 2, 3, 3, 4,
38                                       4, 5, 5, 6, 6, 7, 7, 8 };
39 
40 DECLARE_ALIGNED(16, static const uint8_t,
41                 filt2_global[16]) = { 2, 3, 3, 4, 4, 5, 5, 6,
42                                       6, 7, 7, 8, 8, 9, 9, 10 };
43 
44 DECLARE_ALIGNED(16, static const uint8_t,
45                 filt3_global[16]) = { 4, 5, 5, 6,  6,  7,  7,  8,
46                                       8, 9, 9, 10, 10, 11, 11, 12 };
47 
48 DECLARE_ALIGNED(16, static const uint8_t,
49                 filt4_global[16]) = { 6,  7,  7,  8,  8,  9,  9,  10,
50                                       10, 11, 11, 12, 12, 13, 13, 14 };
51 
52 DECLARE_ALIGNED(32, static const uint8_t, filt_h4[]) = {
53   0,  1,  1,  2,  2, 3,  3,  4,  4,  5,  5,  6,  6,  7,  7,  8,  0,  1,  1,
54   2,  2,  3,  3,  4, 4,  5,  5,  6,  6,  7,  7,  8,  2,  3,  3,  4,  4,  5,
55   5,  6,  6,  7,  7, 8,  8,  9,  9,  10, 2,  3,  3,  4,  4,  5,  5,  6,  6,
56   7,  7,  8,  8,  9, 9,  10, 4,  5,  5,  6,  6,  7,  7,  8,  8,  9,  9,  10,
57   10, 11, 11, 12, 4, 5,  5,  6,  6,  7,  7,  8,  8,  9,  9,  10, 10, 11, 11,
58   12, 6,  7,  7,  8, 8,  9,  9,  10, 10, 11, 11, 12, 12, 13, 13, 14, 6,  7,
59   7,  8,  8,  9,  9, 10, 10, 11, 11, 12, 12, 13, 13, 14
60 };
61 
62 DECLARE_ALIGNED(32, static const uint8_t, filtd4[]) = {
63   2, 3, 4, 5, 3, 4, 5, 6, 4, 5, 6, 7, 5, 6, 7, 8,
64   2, 3, 4, 5, 3, 4, 5, 6, 4, 5, 6, 7, 5, 6, 7, 8,
65 };
66 
67 // These are reused by the avx2 intrinsics.
68 filter8_1dfunction aom_filter_block1d8_v8_intrin_ssse3;
69 filter8_1dfunction aom_filter_block1d8_h8_intrin_ssse3;
70 filter8_1dfunction aom_filter_block1d4_h8_intrin_ssse3;
71 
aom_filter_block1d4_h4_ssse3(const uint8_t * src_ptr,ptrdiff_t src_pixels_per_line,uint8_t * output_ptr,ptrdiff_t output_pitch,uint32_t output_height,const int16_t * filter)72 static void aom_filter_block1d4_h4_ssse3(
73     const uint8_t *src_ptr, ptrdiff_t src_pixels_per_line, uint8_t *output_ptr,
74     ptrdiff_t output_pitch, uint32_t output_height, const int16_t *filter) {
75   __m128i filtersReg;
76   __m128i addFilterReg32, filt1Reg, firstFilters, srcReg32b1, srcRegFilt32b1_1;
77   unsigned int i;
78   src_ptr -= 3;
79   addFilterReg32 = _mm_set1_epi16(32);
80   filtersReg = _mm_loadu_si128((const __m128i *)filter);
81   filtersReg = _mm_srai_epi16(filtersReg, 1);
82   // converting the 16 bit (short) to 8 bit (byte) and have the same data
83   // in both lanes of 128 bit register.
84   filtersReg = _mm_packs_epi16(filtersReg, filtersReg);
85 
86   firstFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi32(0x5040302u));
87   filt1Reg = _mm_load_si128((__m128i const *)(filtd4));
88 
89   for (i = output_height; i > 0; i -= 1) {
90     // load the 2 strides of source
91     srcReg32b1 = _mm_loadu_si128((const __m128i *)src_ptr);
92 
93     // filter the source buffer
94     srcRegFilt32b1_1 = _mm_shuffle_epi8(srcReg32b1, filt1Reg);
95 
96     // multiply 4 adjacent elements with the filter and add the result
97     srcRegFilt32b1_1 = _mm_maddubs_epi16(srcRegFilt32b1_1, firstFilters);
98 
99     srcRegFilt32b1_1 = _mm_hadds_epi16(srcRegFilt32b1_1, _mm_setzero_si128());
100 
101     // shift by 6 bit each 16 bit
102     srcRegFilt32b1_1 = _mm_adds_epi16(srcRegFilt32b1_1, addFilterReg32);
103     srcRegFilt32b1_1 = _mm_srai_epi16(srcRegFilt32b1_1, 6);
104 
105     // shrink to 8 bit each 16 bits, the first lane contain the first
106     // convolve result and the second lane contain the second convolve result
107     srcRegFilt32b1_1 = _mm_packus_epi16(srcRegFilt32b1_1, _mm_setzero_si128());
108 
109     src_ptr += src_pixels_per_line;
110 
111     *((uint32_t *)(output_ptr)) = _mm_cvtsi128_si32(srcRegFilt32b1_1);
112     output_ptr += output_pitch;
113   }
114 }
115 
aom_filter_block1d4_v4_ssse3(const uint8_t * src_ptr,ptrdiff_t src_pitch,uint8_t * output_ptr,ptrdiff_t out_pitch,uint32_t output_height,const int16_t * filter)116 static void aom_filter_block1d4_v4_ssse3(
117     const uint8_t *src_ptr, ptrdiff_t src_pitch, uint8_t *output_ptr,
118     ptrdiff_t out_pitch, uint32_t output_height, const int16_t *filter) {
119   __m128i filtersReg;
120   __m128i addFilterReg32;
121   __m128i srcReg2, srcReg3, srcReg23, srcReg4, srcReg34, srcReg5, srcReg45,
122       srcReg6, srcReg56;
123   __m128i srcReg23_34_lo, srcReg45_56_lo;
124   __m128i srcReg2345_3456_lo, srcReg2345_3456_hi;
125   __m128i resReglo, resReghi;
126   __m128i firstFilters;
127   unsigned int i;
128   ptrdiff_t src_stride, dst_stride;
129 
130   addFilterReg32 = _mm_set1_epi16(32);
131   filtersReg = _mm_loadu_si128((const __m128i *)filter);
132   // converting the 16 bit (short) to  8 bit (byte) and have the
133   // same data in both lanes of 128 bit register.
134   filtersReg = _mm_srai_epi16(filtersReg, 1);
135   filtersReg = _mm_packs_epi16(filtersReg, filtersReg);
136 
137   firstFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi32(0x5040302u));
138 
139   // multiple the size of the source and destination stride by two
140   src_stride = src_pitch << 1;
141   dst_stride = out_pitch << 1;
142 
143   srcReg2 = _mm_loadl_epi64((const __m128i *)(src_ptr + src_pitch * 2));
144   srcReg3 = _mm_loadl_epi64((const __m128i *)(src_ptr + src_pitch * 3));
145   srcReg23 = _mm_unpacklo_epi32(srcReg2, srcReg3);
146 
147   srcReg4 = _mm_loadl_epi64((const __m128i *)(src_ptr + src_pitch * 4));
148 
149   // have consecutive loads on the same 256 register
150   srcReg34 = _mm_unpacklo_epi32(srcReg3, srcReg4);
151 
152   srcReg23_34_lo = _mm_unpacklo_epi8(srcReg23, srcReg34);
153 
154   for (i = output_height; i > 1; i -= 2) {
155     srcReg5 = _mm_loadl_epi64((const __m128i *)(src_ptr + src_pitch * 5));
156     srcReg45 = _mm_unpacklo_epi32(srcReg4, srcReg5);
157 
158     srcReg6 = _mm_loadl_epi64((const __m128i *)(src_ptr + src_pitch * 6));
159     srcReg56 = _mm_unpacklo_epi32(srcReg5, srcReg6);
160 
161     // merge every two consecutive registers
162     srcReg45_56_lo = _mm_unpacklo_epi8(srcReg45, srcReg56);
163 
164     srcReg2345_3456_lo = _mm_unpacklo_epi16(srcReg23_34_lo, srcReg45_56_lo);
165     srcReg2345_3456_hi = _mm_unpackhi_epi16(srcReg23_34_lo, srcReg45_56_lo);
166 
167     // multiply 2 adjacent elements with the filter and add the result
168     resReglo = _mm_maddubs_epi16(srcReg2345_3456_lo, firstFilters);
169     resReghi = _mm_maddubs_epi16(srcReg2345_3456_hi, firstFilters);
170 
171     resReglo = _mm_hadds_epi16(resReglo, _mm_setzero_si128());
172     resReghi = _mm_hadds_epi16(resReghi, _mm_setzero_si128());
173 
174     // shift by 6 bit each 16 bit
175     resReglo = _mm_adds_epi16(resReglo, addFilterReg32);
176     resReghi = _mm_adds_epi16(resReghi, addFilterReg32);
177     resReglo = _mm_srai_epi16(resReglo, 6);
178     resReghi = _mm_srai_epi16(resReghi, 6);
179 
180     // shrink to 8 bit each 16 bits, the first lane contain the first
181     // convolve result and the second lane contain the second convolve
182     // result
183     resReglo = _mm_packus_epi16(resReglo, resReglo);
184     resReghi = _mm_packus_epi16(resReghi, resReghi);
185 
186     src_ptr += src_stride;
187 
188     *((uint32_t *)(output_ptr)) = _mm_cvtsi128_si32(resReglo);
189     *((uint32_t *)(output_ptr + out_pitch)) = _mm_cvtsi128_si32(resReghi);
190 
191     output_ptr += dst_stride;
192 
193     // save part of the registers for next strides
194     srcReg23_34_lo = srcReg45_56_lo;
195     srcReg4 = srcReg6;
196   }
197 }
198 
aom_filter_block1d4_h8_intrin_ssse3(const uint8_t * src_ptr,ptrdiff_t src_pixels_per_line,uint8_t * output_ptr,ptrdiff_t output_pitch,uint32_t output_height,const int16_t * filter)199 void aom_filter_block1d4_h8_intrin_ssse3(
200     const uint8_t *src_ptr, ptrdiff_t src_pixels_per_line, uint8_t *output_ptr,
201     ptrdiff_t output_pitch, uint32_t output_height, const int16_t *filter) {
202   __m128i firstFilters, secondFilters, shuffle1, shuffle2;
203   __m128i srcRegFilt1, srcRegFilt2, srcRegFilt3, srcRegFilt4;
204   __m128i addFilterReg64, filtersReg, srcReg, minReg;
205   unsigned int i;
206 
207   // create a register with 0,64,0,64,0,64,0,64,0,64,0,64,0,64,0,64
208   addFilterReg64 = _mm_set1_epi32((int)0x0400040u);
209   filtersReg = _mm_loadu_si128((const __m128i *)filter);
210   // converting the 16 bit (short) to  8 bit (byte) and have the same data
211   // in both lanes of 128 bit register.
212   filtersReg = _mm_packs_epi16(filtersReg, filtersReg);
213 
214   // duplicate only the first 16 bits in the filter into the first lane
215   firstFilters = _mm_shufflelo_epi16(filtersReg, 0);
216   // duplicate only the third 16 bit in the filter into the first lane
217   secondFilters = _mm_shufflelo_epi16(filtersReg, 0xAAu);
218   // duplicate only the seconds 16 bits in the filter into the second lane
219   // firstFilters: k0 k1 k0 k1 k0 k1 k0 k1 k2 k3 k2 k3 k2 k3 k2 k3
220   firstFilters = _mm_shufflehi_epi16(firstFilters, 0x55u);
221   // duplicate only the forth 16 bits in the filter into the second lane
222   // secondFilters: k4 k5 k4 k5 k4 k5 k4 k5 k6 k7 k6 k7 k6 k7 k6 k7
223   secondFilters = _mm_shufflehi_epi16(secondFilters, 0xFFu);
224 
225   // loading the local filters
226   shuffle1 = _mm_load_si128((__m128i const *)filt1_4_h8);
227   shuffle2 = _mm_load_si128((__m128i const *)filt2_4_h8);
228 
229   for (i = 0; i < output_height; i++) {
230     srcReg = _mm_loadu_si128((const __m128i *)(src_ptr - 3));
231 
232     // filter the source buffer
233     srcRegFilt1 = _mm_shuffle_epi8(srcReg, shuffle1);
234     srcRegFilt2 = _mm_shuffle_epi8(srcReg, shuffle2);
235 
236     // multiply 2 adjacent elements with the filter and add the result
237     srcRegFilt1 = _mm_maddubs_epi16(srcRegFilt1, firstFilters);
238     srcRegFilt2 = _mm_maddubs_epi16(srcRegFilt2, secondFilters);
239 
240     // extract the higher half of the lane
241     srcRegFilt3 = _mm_srli_si128(srcRegFilt1, 8);
242     srcRegFilt4 = _mm_srli_si128(srcRegFilt2, 8);
243 
244     minReg = _mm_min_epi16(srcRegFilt3, srcRegFilt2);
245 
246     // add and saturate all the results together
247     srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, srcRegFilt4);
248     srcRegFilt3 = _mm_max_epi16(srcRegFilt3, srcRegFilt2);
249     srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, minReg);
250     srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, srcRegFilt3);
251     srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, addFilterReg64);
252 
253     // shift by 7 bit each 16 bits
254     srcRegFilt1 = _mm_srai_epi16(srcRegFilt1, 7);
255 
256     // shrink to 8 bit each 16 bits
257     srcRegFilt1 = _mm_packus_epi16(srcRegFilt1, srcRegFilt1);
258     src_ptr += src_pixels_per_line;
259 
260     // save only 4 bytes
261     *((int *)&output_ptr[0]) = _mm_cvtsi128_si32(srcRegFilt1);
262 
263     output_ptr += output_pitch;
264   }
265 }
266 
aom_filter_block1d8_h4_ssse3(const uint8_t * src_ptr,ptrdiff_t src_pixels_per_line,uint8_t * output_ptr,ptrdiff_t output_pitch,uint32_t output_height,const int16_t * filter)267 static void aom_filter_block1d8_h4_ssse3(
268     const uint8_t *src_ptr, ptrdiff_t src_pixels_per_line, uint8_t *output_ptr,
269     ptrdiff_t output_pitch, uint32_t output_height, const int16_t *filter) {
270   __m128i filtersReg;
271   __m128i addFilterReg32, filt2Reg, filt3Reg;
272   __m128i secondFilters, thirdFilters;
273   __m128i srcRegFilt32b1_1, srcRegFilt32b2, srcRegFilt32b3;
274   __m128i srcReg32b1;
275   unsigned int i;
276   src_ptr -= 3;
277   addFilterReg32 = _mm_set1_epi16(32);
278   filtersReg = _mm_loadu_si128((const __m128i *)filter);
279   filtersReg = _mm_srai_epi16(filtersReg, 1);
280   // converting the 16 bit (short) to 8 bit (byte) and have the same data
281   // in both lanes of 128 bit register.
282   filtersReg = _mm_packs_epi16(filtersReg, filtersReg);
283 
284   // duplicate only the second 16 bits (third and forth byte)
285   // across 256 bit register
286   secondFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi16(0x302u));
287   // duplicate only the third 16 bits (fifth and sixth byte)
288   // across 256 bit register
289   thirdFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi16(0x504u));
290 
291   filt2Reg = _mm_load_si128((__m128i const *)(filt_h4 + 32));
292   filt3Reg = _mm_load_si128((__m128i const *)(filt_h4 + 32 * 2));
293 
294   for (i = output_height; i > 0; i -= 1) {
295     srcReg32b1 = _mm_loadu_si128((const __m128i *)src_ptr);
296 
297     // filter the source buffer
298     srcRegFilt32b3 = _mm_shuffle_epi8(srcReg32b1, filt2Reg);
299     srcRegFilt32b2 = _mm_shuffle_epi8(srcReg32b1, filt3Reg);
300 
301     // multiply 2 adjacent elements with the filter and add the result
302     srcRegFilt32b3 = _mm_maddubs_epi16(srcRegFilt32b3, secondFilters);
303     srcRegFilt32b2 = _mm_maddubs_epi16(srcRegFilt32b2, thirdFilters);
304 
305     srcRegFilt32b1_1 = _mm_adds_epi16(srcRegFilt32b3, srcRegFilt32b2);
306 
307     // shift by 6 bit each 16 bit
308     srcRegFilt32b1_1 = _mm_adds_epi16(srcRegFilt32b1_1, addFilterReg32);
309     srcRegFilt32b1_1 = _mm_srai_epi16(srcRegFilt32b1_1, 6);
310 
311     // shrink to 8 bit each 16 bits
312     srcRegFilt32b1_1 = _mm_packus_epi16(srcRegFilt32b1_1, _mm_setzero_si128());
313 
314     src_ptr += src_pixels_per_line;
315 
316     _mm_storel_epi64((__m128i *)output_ptr, srcRegFilt32b1_1);
317 
318     output_ptr += output_pitch;
319   }
320 }
321 
aom_filter_block1d8_v4_ssse3(const uint8_t * src_ptr,ptrdiff_t src_pitch,uint8_t * output_ptr,ptrdiff_t out_pitch,uint32_t output_height,const int16_t * filter)322 static void aom_filter_block1d8_v4_ssse3(
323     const uint8_t *src_ptr, ptrdiff_t src_pitch, uint8_t *output_ptr,
324     ptrdiff_t out_pitch, uint32_t output_height, const int16_t *filter) {
325   __m128i filtersReg;
326   __m128i srcReg2, srcReg3, srcReg4, srcReg5, srcReg6;
327   __m128i srcReg23, srcReg34, srcReg45, srcReg56;
328   __m128i resReg23, resReg34, resReg45, resReg56;
329   __m128i resReg23_45, resReg34_56;
330   __m128i addFilterReg32, secondFilters, thirdFilters;
331   unsigned int i;
332   ptrdiff_t src_stride, dst_stride;
333 
334   addFilterReg32 = _mm_set1_epi16(32);
335   filtersReg = _mm_loadu_si128((const __m128i *)filter);
336   // converting the 16 bit (short) to  8 bit (byte) and have the
337   // same data in both lanes of 128 bit register.
338   filtersReg = _mm_srai_epi16(filtersReg, 1);
339   filtersReg = _mm_packs_epi16(filtersReg, filtersReg);
340 
341   // duplicate only the second 16 bits (third and forth byte)
342   // across 128 bit register
343   secondFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi16(0x302u));
344   // duplicate only the third 16 bits (fifth and sixth byte)
345   // across 128 bit register
346   thirdFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi16(0x504u));
347 
348   // multiple the size of the source and destination stride by two
349   src_stride = src_pitch << 1;
350   dst_stride = out_pitch << 1;
351 
352   srcReg2 = _mm_loadl_epi64((const __m128i *)(src_ptr + src_pitch * 2));
353   srcReg3 = _mm_loadl_epi64((const __m128i *)(src_ptr + src_pitch * 3));
354   srcReg23 = _mm_unpacklo_epi8(srcReg2, srcReg3);
355 
356   srcReg4 = _mm_loadl_epi64((const __m128i *)(src_ptr + src_pitch * 4));
357 
358   // have consecutive loads on the same 256 register
359   srcReg34 = _mm_unpacklo_epi8(srcReg3, srcReg4);
360 
361   for (i = output_height; i > 1; i -= 2) {
362     srcReg5 = _mm_loadl_epi64((const __m128i *)(src_ptr + src_pitch * 5));
363 
364     srcReg45 = _mm_unpacklo_epi8(srcReg4, srcReg5);
365 
366     srcReg6 = _mm_loadl_epi64((const __m128i *)(src_ptr + src_pitch * 6));
367 
368     srcReg56 = _mm_unpacklo_epi8(srcReg5, srcReg6);
369 
370     // multiply 2 adjacent elements with the filter and add the result
371     resReg23 = _mm_maddubs_epi16(srcReg23, secondFilters);
372     resReg34 = _mm_maddubs_epi16(srcReg34, secondFilters);
373     resReg45 = _mm_maddubs_epi16(srcReg45, thirdFilters);
374     resReg56 = _mm_maddubs_epi16(srcReg56, thirdFilters);
375 
376     // add and saturate the results together
377     resReg23_45 = _mm_adds_epi16(resReg23, resReg45);
378     resReg34_56 = _mm_adds_epi16(resReg34, resReg56);
379 
380     // shift by 6 bit each 16 bit
381     resReg23_45 = _mm_adds_epi16(resReg23_45, addFilterReg32);
382     resReg34_56 = _mm_adds_epi16(resReg34_56, addFilterReg32);
383     resReg23_45 = _mm_srai_epi16(resReg23_45, 6);
384     resReg34_56 = _mm_srai_epi16(resReg34_56, 6);
385 
386     // shrink to 8 bit each 16 bits, the first lane contain the first
387     // convolve result and the second lane contain the second convolve
388     // result
389     resReg23_45 = _mm_packus_epi16(resReg23_45, _mm_setzero_si128());
390     resReg34_56 = _mm_packus_epi16(resReg34_56, _mm_setzero_si128());
391 
392     src_ptr += src_stride;
393 
394     _mm_storel_epi64((__m128i *)output_ptr, (resReg23_45));
395     _mm_storel_epi64((__m128i *)(output_ptr + out_pitch), (resReg34_56));
396 
397     output_ptr += dst_stride;
398 
399     // save part of the registers for next strides
400     srcReg23 = srcReg45;
401     srcReg34 = srcReg56;
402     srcReg4 = srcReg6;
403   }
404 }
405 
aom_filter_block1d8_h8_intrin_ssse3(const uint8_t * src_ptr,ptrdiff_t src_pixels_per_line,uint8_t * output_ptr,ptrdiff_t output_pitch,uint32_t output_height,const int16_t * filter)406 void aom_filter_block1d8_h8_intrin_ssse3(
407     const uint8_t *src_ptr, ptrdiff_t src_pixels_per_line, uint8_t *output_ptr,
408     ptrdiff_t output_pitch, uint32_t output_height, const int16_t *filter) {
409   __m128i firstFilters, secondFilters, thirdFilters, forthFilters, srcReg;
410   __m128i filt1Reg, filt2Reg, filt3Reg, filt4Reg;
411   __m128i srcRegFilt1, srcRegFilt2, srcRegFilt3, srcRegFilt4;
412   __m128i addFilterReg64, filtersReg, minReg;
413   unsigned int i;
414 
415   // create a register with 0,64,0,64,0,64,0,64,0,64,0,64,0,64,0,64
416   addFilterReg64 = _mm_set1_epi32((int)0x0400040u);
417   filtersReg = _mm_loadu_si128((const __m128i *)filter);
418   // converting the 16 bit (short) to  8 bit (byte) and have the same data
419   // in both lanes of 128 bit register.
420   filtersReg = _mm_packs_epi16(filtersReg, filtersReg);
421 
422   // duplicate only the first 16 bits (first and second byte)
423   // across 128 bit register
424   firstFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi16(0x100u));
425   // duplicate only the second 16 bits (third and forth byte)
426   // across 128 bit register
427   secondFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi16(0x302u));
428   // duplicate only the third 16 bits (fifth and sixth byte)
429   // across 128 bit register
430   thirdFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi16(0x504u));
431   // duplicate only the forth 16 bits (seventh and eighth byte)
432   // across 128 bit register
433   forthFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi16(0x706u));
434 
435   filt1Reg = _mm_load_si128((__m128i const *)filt1_global);
436   filt2Reg = _mm_load_si128((__m128i const *)filt2_global);
437   filt3Reg = _mm_load_si128((__m128i const *)filt3_global);
438   filt4Reg = _mm_load_si128((__m128i const *)filt4_global);
439 
440   for (i = 0; i < output_height; i++) {
441     srcReg = _mm_loadu_si128((const __m128i *)(src_ptr - 3));
442 
443     // filter the source buffer
444     srcRegFilt1 = _mm_shuffle_epi8(srcReg, filt1Reg);
445     srcRegFilt2 = _mm_shuffle_epi8(srcReg, filt2Reg);
446 
447     // multiply 2 adjacent elements with the filter and add the result
448     srcRegFilt1 = _mm_maddubs_epi16(srcRegFilt1, firstFilters);
449     srcRegFilt2 = _mm_maddubs_epi16(srcRegFilt2, secondFilters);
450 
451     // filter the source buffer
452     srcRegFilt3 = _mm_shuffle_epi8(srcReg, filt3Reg);
453     srcRegFilt4 = _mm_shuffle_epi8(srcReg, filt4Reg);
454 
455     // multiply 2 adjacent elements with the filter and add the result
456     srcRegFilt3 = _mm_maddubs_epi16(srcRegFilt3, thirdFilters);
457     srcRegFilt4 = _mm_maddubs_epi16(srcRegFilt4, forthFilters);
458 
459     // add and saturate all the results together
460     minReg = _mm_min_epi16(srcRegFilt2, srcRegFilt3);
461     srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, srcRegFilt4);
462 
463     srcRegFilt2 = _mm_max_epi16(srcRegFilt2, srcRegFilt3);
464     srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, minReg);
465     srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, srcRegFilt2);
466     srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, addFilterReg64);
467 
468     // shift by 7 bit each 16 bits
469     srcRegFilt1 = _mm_srai_epi16(srcRegFilt1, 7);
470 
471     // shrink to 8 bit each 16 bits
472     srcRegFilt1 = _mm_packus_epi16(srcRegFilt1, srcRegFilt1);
473 
474     src_ptr += src_pixels_per_line;
475 
476     // save only 8 bytes
477     _mm_storel_epi64((__m128i *)&output_ptr[0], srcRegFilt1);
478 
479     output_ptr += output_pitch;
480   }
481 }
482 
aom_filter_block1d8_v8_intrin_ssse3(const uint8_t * src_ptr,ptrdiff_t src_pitch,uint8_t * output_ptr,ptrdiff_t out_pitch,uint32_t output_height,const int16_t * filter)483 void aom_filter_block1d8_v8_intrin_ssse3(
484     const uint8_t *src_ptr, ptrdiff_t src_pitch, uint8_t *output_ptr,
485     ptrdiff_t out_pitch, uint32_t output_height, const int16_t *filter) {
486   __m128i addFilterReg64, filtersReg, minReg;
487   __m128i firstFilters, secondFilters, thirdFilters, forthFilters;
488   __m128i srcRegFilt1, srcRegFilt2, srcRegFilt3, srcRegFilt5;
489   __m128i srcReg1, srcReg2, srcReg3, srcReg4, srcReg5, srcReg6, srcReg7;
490   __m128i srcReg8;
491   unsigned int i;
492 
493   // create a register with 0,64,0,64,0,64,0,64,0,64,0,64,0,64,0,64
494   addFilterReg64 = _mm_set1_epi32((int)0x0400040u);
495   filtersReg = _mm_loadu_si128((const __m128i *)filter);
496   // converting the 16 bit (short) to  8 bit (byte) and have the same data
497   // in both lanes of 128 bit register.
498   filtersReg = _mm_packs_epi16(filtersReg, filtersReg);
499 
500   // duplicate only the first 16 bits in the filter
501   firstFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi16(0x100u));
502   // duplicate only the second 16 bits in the filter
503   secondFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi16(0x302u));
504   // duplicate only the third 16 bits in the filter
505   thirdFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi16(0x504u));
506   // duplicate only the forth 16 bits in the filter
507   forthFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi16(0x706u));
508 
509   // load the first 7 rows of 8 bytes
510   srcReg1 = _mm_loadl_epi64((const __m128i *)src_ptr);
511   srcReg2 = _mm_loadl_epi64((const __m128i *)(src_ptr + src_pitch));
512   srcReg3 = _mm_loadl_epi64((const __m128i *)(src_ptr + src_pitch * 2));
513   srcReg4 = _mm_loadl_epi64((const __m128i *)(src_ptr + src_pitch * 3));
514   srcReg5 = _mm_loadl_epi64((const __m128i *)(src_ptr + src_pitch * 4));
515   srcReg6 = _mm_loadl_epi64((const __m128i *)(src_ptr + src_pitch * 5));
516   srcReg7 = _mm_loadl_epi64((const __m128i *)(src_ptr + src_pitch * 6));
517 
518   for (i = 0; i < output_height; i++) {
519     // load the last 8 bytes
520     srcReg8 = _mm_loadl_epi64((const __m128i *)(src_ptr + src_pitch * 7));
521 
522     // merge the result together
523     srcRegFilt1 = _mm_unpacklo_epi8(srcReg1, srcReg2);
524     srcRegFilt3 = _mm_unpacklo_epi8(srcReg3, srcReg4);
525 
526     // merge the result together
527     srcRegFilt2 = _mm_unpacklo_epi8(srcReg5, srcReg6);
528     srcRegFilt5 = _mm_unpacklo_epi8(srcReg7, srcReg8);
529 
530     // multiply 2 adjacent elements with the filter and add the result
531     srcRegFilt1 = _mm_maddubs_epi16(srcRegFilt1, firstFilters);
532     srcRegFilt3 = _mm_maddubs_epi16(srcRegFilt3, secondFilters);
533     srcRegFilt2 = _mm_maddubs_epi16(srcRegFilt2, thirdFilters);
534     srcRegFilt5 = _mm_maddubs_epi16(srcRegFilt5, forthFilters);
535 
536     // add and saturate the results together
537     minReg = _mm_min_epi16(srcRegFilt2, srcRegFilt3);
538     srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, srcRegFilt5);
539     srcRegFilt2 = _mm_max_epi16(srcRegFilt2, srcRegFilt3);
540     srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, minReg);
541     srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, srcRegFilt2);
542     srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, addFilterReg64);
543 
544     // shift by 7 bit each 16 bit
545     srcRegFilt1 = _mm_srai_epi16(srcRegFilt1, 7);
546 
547     // shrink to 8 bit each 16 bits
548     srcRegFilt1 = _mm_packus_epi16(srcRegFilt1, srcRegFilt1);
549 
550     src_ptr += src_pitch;
551 
552     // shift down a row
553     srcReg1 = srcReg2;
554     srcReg2 = srcReg3;
555     srcReg3 = srcReg4;
556     srcReg4 = srcReg5;
557     srcReg5 = srcReg6;
558     srcReg6 = srcReg7;
559     srcReg7 = srcReg8;
560 
561     // save only 8 bytes convolve result
562     _mm_storel_epi64((__m128i *)&output_ptr[0], srcRegFilt1);
563 
564     output_ptr += out_pitch;
565   }
566 }
567 
aom_filter_block1d16_h4_ssse3(const uint8_t * src_ptr,ptrdiff_t src_pixels_per_line,uint8_t * output_ptr,ptrdiff_t output_pitch,uint32_t output_height,const int16_t * filter)568 static void aom_filter_block1d16_h4_ssse3(
569     const uint8_t *src_ptr, ptrdiff_t src_pixels_per_line, uint8_t *output_ptr,
570     ptrdiff_t output_pitch, uint32_t output_height, const int16_t *filter) {
571   __m128i filtersReg;
572   __m128i addFilterReg32, filt2Reg, filt3Reg;
573   __m128i secondFilters, thirdFilters;
574   __m128i srcRegFilt32b1_1, srcRegFilt32b2_1, srcRegFilt32b2, srcRegFilt32b3;
575   __m128i srcReg32b1, srcReg32b2;
576   unsigned int i;
577   src_ptr -= 3;
578   addFilterReg32 = _mm_set1_epi16(32);
579   filtersReg = _mm_loadu_si128((const __m128i *)filter);
580   filtersReg = _mm_srai_epi16(filtersReg, 1);
581   // converting the 16 bit (short) to 8 bit (byte) and have the same data
582   // in both lanes of 128 bit register.
583   filtersReg = _mm_packs_epi16(filtersReg, filtersReg);
584 
585   // duplicate only the second 16 bits (third and forth byte)
586   // across 256 bit register
587   secondFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi16(0x302u));
588   // duplicate only the third 16 bits (fifth and sixth byte)
589   // across 256 bit register
590   thirdFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi16(0x504u));
591 
592   filt2Reg = _mm_load_si128((__m128i const *)(filt_h4 + 32));
593   filt3Reg = _mm_load_si128((__m128i const *)(filt_h4 + 32 * 2));
594 
595   for (i = output_height; i > 0; i -= 1) {
596     srcReg32b1 = _mm_loadu_si128((const __m128i *)src_ptr);
597 
598     // filter the source buffer
599     srcRegFilt32b3 = _mm_shuffle_epi8(srcReg32b1, filt2Reg);
600     srcRegFilt32b2 = _mm_shuffle_epi8(srcReg32b1, filt3Reg);
601 
602     // multiply 2 adjacent elements with the filter and add the result
603     srcRegFilt32b3 = _mm_maddubs_epi16(srcRegFilt32b3, secondFilters);
604     srcRegFilt32b2 = _mm_maddubs_epi16(srcRegFilt32b2, thirdFilters);
605 
606     srcRegFilt32b1_1 = _mm_adds_epi16(srcRegFilt32b3, srcRegFilt32b2);
607 
608     // reading stride of the next 16 bytes
609     // (part of it was being read by earlier read)
610     srcReg32b2 = _mm_loadu_si128((const __m128i *)(src_ptr + 8));
611 
612     // filter the source buffer
613     srcRegFilt32b3 = _mm_shuffle_epi8(srcReg32b2, filt2Reg);
614     srcRegFilt32b2 = _mm_shuffle_epi8(srcReg32b2, filt3Reg);
615 
616     // multiply 2 adjacent elements with the filter and add the result
617     srcRegFilt32b3 = _mm_maddubs_epi16(srcRegFilt32b3, secondFilters);
618     srcRegFilt32b2 = _mm_maddubs_epi16(srcRegFilt32b2, thirdFilters);
619 
620     // add and saturate the results together
621     srcRegFilt32b2_1 = _mm_adds_epi16(srcRegFilt32b3, srcRegFilt32b2);
622 
623     // shift by 6 bit each 16 bit
624     srcRegFilt32b1_1 = _mm_adds_epi16(srcRegFilt32b1_1, addFilterReg32);
625     srcRegFilt32b2_1 = _mm_adds_epi16(srcRegFilt32b2_1, addFilterReg32);
626     srcRegFilt32b1_1 = _mm_srai_epi16(srcRegFilt32b1_1, 6);
627     srcRegFilt32b2_1 = _mm_srai_epi16(srcRegFilt32b2_1, 6);
628 
629     // shrink to 8 bit each 16 bits, the first lane contain the first
630     // convolve result and the second lane contain the second convolve result
631     srcRegFilt32b1_1 = _mm_packus_epi16(srcRegFilt32b1_1, srcRegFilt32b2_1);
632 
633     src_ptr += src_pixels_per_line;
634 
635     _mm_store_si128((__m128i *)output_ptr, srcRegFilt32b1_1);
636 
637     output_ptr += output_pitch;
638   }
639 }
640 
aom_filter_block1d16_v4_ssse3(const uint8_t * src_ptr,ptrdiff_t src_pitch,uint8_t * output_ptr,ptrdiff_t out_pitch,uint32_t output_height,const int16_t * filter)641 static void aom_filter_block1d16_v4_ssse3(
642     const uint8_t *src_ptr, ptrdiff_t src_pitch, uint8_t *output_ptr,
643     ptrdiff_t out_pitch, uint32_t output_height, const int16_t *filter) {
644   __m128i filtersReg;
645   __m128i srcReg2, srcReg3, srcReg4, srcReg5, srcReg6;
646   __m128i srcReg23_lo, srcReg23_hi, srcReg34_lo, srcReg34_hi;
647   __m128i srcReg45_lo, srcReg45_hi, srcReg56_lo, srcReg56_hi;
648   __m128i resReg23_lo, resReg34_lo, resReg45_lo, resReg56_lo;
649   __m128i resReg23_hi, resReg34_hi, resReg45_hi, resReg56_hi;
650   __m128i resReg23_45_lo, resReg34_56_lo, resReg23_45_hi, resReg34_56_hi;
651   __m128i resReg23_45, resReg34_56;
652   __m128i addFilterReg32, secondFilters, thirdFilters;
653   unsigned int i;
654   ptrdiff_t src_stride, dst_stride;
655 
656   addFilterReg32 = _mm_set1_epi16(32);
657   filtersReg = _mm_loadu_si128((const __m128i *)filter);
658   // converting the 16 bit (short) to  8 bit (byte) and have the
659   // same data in both lanes of 128 bit register.
660   filtersReg = _mm_srai_epi16(filtersReg, 1);
661   filtersReg = _mm_packs_epi16(filtersReg, filtersReg);
662 
663   // duplicate only the second 16 bits (third and forth byte)
664   // across 128 bit register
665   secondFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi16(0x302u));
666   // duplicate only the third 16 bits (fifth and sixth byte)
667   // across 128 bit register
668   thirdFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi16(0x504u));
669 
670   // multiple the size of the source and destination stride by two
671   src_stride = src_pitch << 1;
672   dst_stride = out_pitch << 1;
673 
674   srcReg2 = _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 2));
675   srcReg3 = _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 3));
676   srcReg23_lo = _mm_unpacklo_epi8(srcReg2, srcReg3);
677   srcReg23_hi = _mm_unpackhi_epi8(srcReg2, srcReg3);
678 
679   srcReg4 = _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 4));
680 
681   // have consecutive loads on the same 256 register
682   srcReg34_lo = _mm_unpacklo_epi8(srcReg3, srcReg4);
683   srcReg34_hi = _mm_unpackhi_epi8(srcReg3, srcReg4);
684 
685   for (i = output_height; i > 1; i -= 2) {
686     srcReg5 = _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 5));
687 
688     srcReg45_lo = _mm_unpacklo_epi8(srcReg4, srcReg5);
689     srcReg45_hi = _mm_unpackhi_epi8(srcReg4, srcReg5);
690 
691     srcReg6 = _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 6));
692 
693     srcReg56_lo = _mm_unpacklo_epi8(srcReg5, srcReg6);
694     srcReg56_hi = _mm_unpackhi_epi8(srcReg5, srcReg6);
695 
696     // multiply 2 adjacent elements with the filter and add the result
697     resReg23_lo = _mm_maddubs_epi16(srcReg23_lo, secondFilters);
698     resReg34_lo = _mm_maddubs_epi16(srcReg34_lo, secondFilters);
699     resReg45_lo = _mm_maddubs_epi16(srcReg45_lo, thirdFilters);
700     resReg56_lo = _mm_maddubs_epi16(srcReg56_lo, thirdFilters);
701 
702     // add and saturate the results together
703     resReg23_45_lo = _mm_adds_epi16(resReg23_lo, resReg45_lo);
704     resReg34_56_lo = _mm_adds_epi16(resReg34_lo, resReg56_lo);
705 
706     // multiply 2 adjacent elements with the filter and add the result
707 
708     resReg23_hi = _mm_maddubs_epi16(srcReg23_hi, secondFilters);
709     resReg34_hi = _mm_maddubs_epi16(srcReg34_hi, secondFilters);
710     resReg45_hi = _mm_maddubs_epi16(srcReg45_hi, thirdFilters);
711     resReg56_hi = _mm_maddubs_epi16(srcReg56_hi, thirdFilters);
712 
713     // add and saturate the results together
714     resReg23_45_hi = _mm_adds_epi16(resReg23_hi, resReg45_hi);
715     resReg34_56_hi = _mm_adds_epi16(resReg34_hi, resReg56_hi);
716 
717     // shift by 6 bit each 16 bit
718     resReg23_45_lo = _mm_adds_epi16(resReg23_45_lo, addFilterReg32);
719     resReg34_56_lo = _mm_adds_epi16(resReg34_56_lo, addFilterReg32);
720     resReg23_45_hi = _mm_adds_epi16(resReg23_45_hi, addFilterReg32);
721     resReg34_56_hi = _mm_adds_epi16(resReg34_56_hi, addFilterReg32);
722     resReg23_45_lo = _mm_srai_epi16(resReg23_45_lo, 6);
723     resReg34_56_lo = _mm_srai_epi16(resReg34_56_lo, 6);
724     resReg23_45_hi = _mm_srai_epi16(resReg23_45_hi, 6);
725     resReg34_56_hi = _mm_srai_epi16(resReg34_56_hi, 6);
726 
727     // shrink to 8 bit each 16 bits, the first lane contain the first
728     // convolve result and the second lane contain the second convolve
729     // result
730     resReg23_45 = _mm_packus_epi16(resReg23_45_lo, resReg23_45_hi);
731     resReg34_56 = _mm_packus_epi16(resReg34_56_lo, resReg34_56_hi);
732 
733     src_ptr += src_stride;
734 
735     _mm_store_si128((__m128i *)output_ptr, (resReg23_45));
736     _mm_store_si128((__m128i *)(output_ptr + out_pitch), (resReg34_56));
737 
738     output_ptr += dst_stride;
739 
740     // save part of the registers for next strides
741     srcReg23_lo = srcReg45_lo;
742     srcReg34_lo = srcReg56_lo;
743     srcReg23_hi = srcReg45_hi;
744     srcReg34_hi = srcReg56_hi;
745     srcReg4 = srcReg6;
746   }
747 }
748 
shuffle_filter_convolve8_8_ssse3(const __m128i * const s,const int16_t * const filter)749 static INLINE __m128i shuffle_filter_convolve8_8_ssse3(
750     const __m128i *const s, const int16_t *const filter) {
751   __m128i f[4];
752   shuffle_filter_ssse3(filter, f);
753   return convolve8_8_ssse3(s, f);
754 }
755 
filter_horiz_w8_ssse3(const uint8_t * const src,const ptrdiff_t src_stride,uint8_t * const dst,const int16_t * const x_filter)756 static void filter_horiz_w8_ssse3(const uint8_t *const src,
757                                   const ptrdiff_t src_stride,
758                                   uint8_t *const dst,
759                                   const int16_t *const x_filter) {
760   __m128i s[8], ss[4], temp;
761 
762   load_8bit_8x8(src, src_stride, s);
763   // 00 01 10 11 20 21 30 31  40 41 50 51 60 61 70 71
764   // 02 03 12 13 22 23 32 33  42 43 52 53 62 63 72 73
765   // 04 05 14 15 24 25 34 35  44 45 54 55 64 65 74 75
766   // 06 07 16 17 26 27 36 37  46 47 56 57 66 67 76 77
767   transpose_16bit_4x8(s, ss);
768   temp = shuffle_filter_convolve8_8_ssse3(ss, x_filter);
769   // shrink to 8 bit each 16 bits
770   temp = _mm_packus_epi16(temp, temp);
771   // save only 8 bytes convolve result
772   _mm_storel_epi64((__m128i *)dst, temp);
773 }
774 
transpose8x8_to_dst(const uint8_t * const src,const ptrdiff_t src_stride,uint8_t * const dst,const ptrdiff_t dst_stride)775 static void transpose8x8_to_dst(const uint8_t *const src,
776                                 const ptrdiff_t src_stride, uint8_t *const dst,
777                                 const ptrdiff_t dst_stride) {
778   __m128i s[8];
779 
780   load_8bit_8x8(src, src_stride, s);
781   transpose_8bit_8x8(s, s);
782   store_8bit_8x8(s, dst, dst_stride);
783 }
784 
scaledconvolve_horiz_w8(const uint8_t * src,const ptrdiff_t src_stride,uint8_t * dst,const ptrdiff_t dst_stride,const InterpKernel * const x_filters,const int x0_q4,const int x_step_q4,const int w,const int h)785 static void scaledconvolve_horiz_w8(const uint8_t *src,
786                                     const ptrdiff_t src_stride, uint8_t *dst,
787                                     const ptrdiff_t dst_stride,
788                                     const InterpKernel *const x_filters,
789                                     const int x0_q4, const int x_step_q4,
790                                     const int w, const int h) {
791   DECLARE_ALIGNED(16, uint8_t, temp[8 * 8]);
792   int x, y, z;
793   src -= SUBPEL_TAPS / 2 - 1;
794 
795   // This function processes 8x8 areas. The intermediate height is not always
796   // a multiple of 8, so force it to be a multiple of 8 here.
797   y = h + (8 - (h & 0x7));
798 
799   do {
800     int x_q4 = x0_q4;
801     for (x = 0; x < w; x += 8) {
802       // process 8 src_x steps
803       for (z = 0; z < 8; ++z) {
804         const uint8_t *const src_x = &src[x_q4 >> SUBPEL_BITS];
805         const int16_t *const x_filter = x_filters[x_q4 & SUBPEL_MASK];
806         if (x_q4 & SUBPEL_MASK) {
807           filter_horiz_w8_ssse3(src_x, src_stride, temp + (z * 8), x_filter);
808         } else {
809           int i;
810           for (i = 0; i < 8; ++i) {
811             temp[z * 8 + i] = src_x[i * src_stride + 3];
812           }
813         }
814         x_q4 += x_step_q4;
815       }
816 
817       // transpose the 8x8 filters values back to dst
818       transpose8x8_to_dst(temp, 8, dst + x, dst_stride);
819     }
820 
821     src += src_stride * 8;
822     dst += dst_stride * 8;
823   } while (y -= 8);
824 }
825 
filter_horiz_w4_ssse3(const uint8_t * const src,const ptrdiff_t src_stride,uint8_t * const dst,const int16_t * const filter)826 static void filter_horiz_w4_ssse3(const uint8_t *const src,
827                                   const ptrdiff_t src_stride,
828                                   uint8_t *const dst,
829                                   const int16_t *const filter) {
830   __m128i s[4];
831   __m128i temp;
832 
833   load_8bit_8x4(src, src_stride, s);
834   transpose_16bit_4x4(s, s);
835 
836   temp = shuffle_filter_convolve8_8_ssse3(s, filter);
837   // shrink to 8 bit each 16 bits
838   temp = _mm_packus_epi16(temp, temp);
839   // save only 4 bytes
840   *(int *)dst = _mm_cvtsi128_si32(temp);
841 }
842 
transpose4x4_to_dst(const uint8_t * const src,const ptrdiff_t src_stride,uint8_t * const dst,const ptrdiff_t dst_stride)843 static void transpose4x4_to_dst(const uint8_t *const src,
844                                 const ptrdiff_t src_stride, uint8_t *const dst,
845                                 const ptrdiff_t dst_stride) {
846   __m128i s[4];
847 
848   load_8bit_4x4(src, src_stride, s);
849   s[0] = transpose_8bit_4x4(s);
850   s[1] = _mm_srli_si128(s[0], 4);
851   s[2] = _mm_srli_si128(s[0], 8);
852   s[3] = _mm_srli_si128(s[0], 12);
853   store_8bit_4x4(s, dst, dst_stride);
854 }
855 
scaledconvolve_horiz_w4(const uint8_t * src,const ptrdiff_t src_stride,uint8_t * dst,const ptrdiff_t dst_stride,const InterpKernel * const x_filters,const int x0_q4,const int x_step_q4,const int w,const int h)856 static void scaledconvolve_horiz_w4(const uint8_t *src,
857                                     const ptrdiff_t src_stride, uint8_t *dst,
858                                     const ptrdiff_t dst_stride,
859                                     const InterpKernel *const x_filters,
860                                     const int x0_q4, const int x_step_q4,
861                                     const int w, const int h) {
862   DECLARE_ALIGNED(16, uint8_t, temp[4 * 4]);
863   int x, y, z;
864   src -= SUBPEL_TAPS / 2 - 1;
865 
866   for (y = 0; y < h; y += 4) {
867     int x_q4 = x0_q4;
868     for (x = 0; x < w; x += 4) {
869       // process 4 src_x steps
870       for (z = 0; z < 4; ++z) {
871         const uint8_t *const src_x = &src[x_q4 >> SUBPEL_BITS];
872         const int16_t *const x_filter = x_filters[x_q4 & SUBPEL_MASK];
873         if (x_q4 & SUBPEL_MASK) {
874           filter_horiz_w4_ssse3(src_x, src_stride, temp + (z * 4), x_filter);
875         } else {
876           int i;
877           for (i = 0; i < 4; ++i) {
878             temp[z * 4 + i] = src_x[i * src_stride + 3];
879           }
880         }
881         x_q4 += x_step_q4;
882       }
883 
884       // transpose the 4x4 filters values back to dst
885       transpose4x4_to_dst(temp, 4, dst + x, dst_stride);
886     }
887 
888     src += src_stride * 4;
889     dst += dst_stride * 4;
890   }
891 }
892 
filter_vert_kernel(const __m128i * const s,const int16_t * const filter)893 static __m128i filter_vert_kernel(const __m128i *const s,
894                                   const int16_t *const filter) {
895   __m128i ss[4];
896   __m128i temp;
897 
898   // 00 10 01 11 02 12 03 13
899   ss[0] = _mm_unpacklo_epi8(s[0], s[1]);
900   // 20 30 21 31 22 32 23 33
901   ss[1] = _mm_unpacklo_epi8(s[2], s[3]);
902   // 40 50 41 51 42 52 43 53
903   ss[2] = _mm_unpacklo_epi8(s[4], s[5]);
904   // 60 70 61 71 62 72 63 73
905   ss[3] = _mm_unpacklo_epi8(s[6], s[7]);
906 
907   temp = shuffle_filter_convolve8_8_ssse3(ss, filter);
908   // shrink to 8 bit each 16 bits
909   return _mm_packus_epi16(temp, temp);
910 }
911 
filter_vert_w4_ssse3(const uint8_t * const src,const ptrdiff_t src_stride,uint8_t * const dst,const int16_t * const filter)912 static void filter_vert_w4_ssse3(const uint8_t *const src,
913                                  const ptrdiff_t src_stride, uint8_t *const dst,
914                                  const int16_t *const filter) {
915   __m128i s[8];
916   __m128i temp;
917 
918   load_8bit_4x8(src, src_stride, s);
919   temp = filter_vert_kernel(s, filter);
920   // save only 4 bytes
921   *(int *)dst = _mm_cvtsi128_si32(temp);
922 }
923 
scaledconvolve_vert_w4(const uint8_t * src,const ptrdiff_t src_stride,uint8_t * const dst,const ptrdiff_t dst_stride,const InterpKernel * const y_filters,const int y0_q4,const int y_step_q4,const int w,const int h)924 static void scaledconvolve_vert_w4(
925     const uint8_t *src, const ptrdiff_t src_stride, uint8_t *const dst,
926     const ptrdiff_t dst_stride, const InterpKernel *const y_filters,
927     const int y0_q4, const int y_step_q4, const int w, const int h) {
928   int y;
929   int y_q4 = y0_q4;
930 
931   src -= src_stride * (SUBPEL_TAPS / 2 - 1);
932   for (y = 0; y < h; ++y) {
933     const unsigned char *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride];
934     const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK];
935 
936     if (y_q4 & SUBPEL_MASK) {
937       filter_vert_w4_ssse3(src_y, src_stride, &dst[y * dst_stride], y_filter);
938     } else {
939       memcpy(&dst[y * dst_stride], &src_y[3 * src_stride], w);
940     }
941 
942     y_q4 += y_step_q4;
943   }
944 }
945 
filter_vert_w8_ssse3(const uint8_t * const src,const ptrdiff_t src_stride,uint8_t * const dst,const int16_t * const filter)946 static void filter_vert_w8_ssse3(const uint8_t *const src,
947                                  const ptrdiff_t src_stride, uint8_t *const dst,
948                                  const int16_t *const filter) {
949   __m128i s[8], temp;
950 
951   load_8bit_8x8(src, src_stride, s);
952   temp = filter_vert_kernel(s, filter);
953   // save only 8 bytes convolve result
954   _mm_storel_epi64((__m128i *)dst, temp);
955 }
956 
scaledconvolve_vert_w8(const uint8_t * src,const ptrdiff_t src_stride,uint8_t * const dst,const ptrdiff_t dst_stride,const InterpKernel * const y_filters,const int y0_q4,const int y_step_q4,const int w,const int h)957 static void scaledconvolve_vert_w8(
958     const uint8_t *src, const ptrdiff_t src_stride, uint8_t *const dst,
959     const ptrdiff_t dst_stride, const InterpKernel *const y_filters,
960     const int y0_q4, const int y_step_q4, const int w, const int h) {
961   int y;
962   int y_q4 = y0_q4;
963 
964   src -= src_stride * (SUBPEL_TAPS / 2 - 1);
965   for (y = 0; y < h; ++y) {
966     const unsigned char *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride];
967     const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK];
968     if (y_q4 & SUBPEL_MASK) {
969       filter_vert_w8_ssse3(src_y, src_stride, &dst[y * dst_stride], y_filter);
970     } else {
971       memcpy(&dst[y * dst_stride], &src_y[3 * src_stride], w);
972     }
973     y_q4 += y_step_q4;
974   }
975 }
976 
filter_vert_w16_ssse3(const uint8_t * src,const ptrdiff_t src_stride,uint8_t * const dst,const int16_t * const filter,const int w)977 static void filter_vert_w16_ssse3(const uint8_t *src,
978                                   const ptrdiff_t src_stride,
979                                   uint8_t *const dst,
980                                   const int16_t *const filter, const int w) {
981   int i;
982   __m128i f[4];
983   shuffle_filter_ssse3(filter, f);
984 
985   for (i = 0; i < w; i += 16) {
986     __m128i s[8], s_lo[4], s_hi[4], temp_lo, temp_hi;
987 
988     loadu_8bit_16x8(src, src_stride, s);
989 
990     // merge the result together
991     s_lo[0] = _mm_unpacklo_epi8(s[0], s[1]);
992     s_hi[0] = _mm_unpackhi_epi8(s[0], s[1]);
993     s_lo[1] = _mm_unpacklo_epi8(s[2], s[3]);
994     s_hi[1] = _mm_unpackhi_epi8(s[2], s[3]);
995     s_lo[2] = _mm_unpacklo_epi8(s[4], s[5]);
996     s_hi[2] = _mm_unpackhi_epi8(s[4], s[5]);
997     s_lo[3] = _mm_unpacklo_epi8(s[6], s[7]);
998     s_hi[3] = _mm_unpackhi_epi8(s[6], s[7]);
999     temp_lo = convolve8_8_ssse3(s_lo, f);
1000     temp_hi = convolve8_8_ssse3(s_hi, f);
1001 
1002     // shrink to 8 bit each 16 bits, the first lane contain the first convolve
1003     // result and the second lane contain the second convolve result
1004     temp_hi = _mm_packus_epi16(temp_lo, temp_hi);
1005     src += 16;
1006     // save 16 bytes convolve result
1007     _mm_store_si128((__m128i *)&dst[i], temp_hi);
1008   }
1009 }
1010 
scaledconvolve_vert_w16(const uint8_t * src,const ptrdiff_t src_stride,uint8_t * const dst,const ptrdiff_t dst_stride,const InterpKernel * const y_filters,const int y0_q4,const int y_step_q4,const int w,const int h)1011 static void scaledconvolve_vert_w16(
1012     const uint8_t *src, const ptrdiff_t src_stride, uint8_t *const dst,
1013     const ptrdiff_t dst_stride, const InterpKernel *const y_filters,
1014     const int y0_q4, const int y_step_q4, const int w, const int h) {
1015   int y;
1016   int y_q4 = y0_q4;
1017 
1018   src -= src_stride * (SUBPEL_TAPS / 2 - 1);
1019   for (y = 0; y < h; ++y) {
1020     const unsigned char *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride];
1021     const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK];
1022     if (y_q4 & SUBPEL_MASK) {
1023       filter_vert_w16_ssse3(src_y, src_stride, &dst[y * dst_stride], y_filter,
1024                             w);
1025     } else {
1026       memcpy(&dst[y * dst_stride], &src_y[3 * src_stride], w);
1027     }
1028     y_q4 += y_step_q4;
1029   }
1030 }
1031 
aom_scaled_2d_ssse3(const uint8_t * src,ptrdiff_t src_stride,uint8_t * dst,ptrdiff_t dst_stride,const InterpKernel * filter,int x0_q4,int x_step_q4,int y0_q4,int y_step_q4,int w,int h)1032 void aom_scaled_2d_ssse3(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
1033                          ptrdiff_t dst_stride, const InterpKernel *filter,
1034                          int x0_q4, int x_step_q4, int y0_q4, int y_step_q4,
1035                          int w, int h) {
1036   // Note: Fixed size intermediate buffer, temp, places limits on parameters.
1037   // 2d filtering proceeds in 2 steps:
1038   //   (1) Interpolate horizontally into an intermediate buffer, temp.
1039   //   (2) Interpolate temp vertically to derive the sub-pixel result.
1040   // Deriving the maximum number of rows in the temp buffer (135):
1041   // --Smallest scaling factor is x1/2 ==> y_step_q4 = 32 (Normative).
1042   // --Largest block size is 64x64 pixels.
1043   // --64 rows in the downscaled frame span a distance of (64 - 1) * 32 in the
1044   //   original frame (in 1/16th pixel units).
1045   // --Must round-up because block may be located at sub-pixel position.
1046   // --Require an additional SUBPEL_TAPS rows for the 8-tap filter tails.
1047   // --((64 - 1) * 32 + 15) >> 4 + 8 = 135.
1048   // --Require an additional 8 rows for the horiz_w8 transpose tail.
1049   // When calling in frame scaling function, the smallest scaling factor is x1/4
1050   // ==> y_step_q4 = 64. Since w and h are at most 16, the temp buffer is still
1051   // big enough.
1052   DECLARE_ALIGNED(16, uint8_t, temp[(135 + 8) * 64]);
1053   const int intermediate_height =
1054       (((h - 1) * y_step_q4 + y0_q4) >> SUBPEL_BITS) + SUBPEL_TAPS;
1055 
1056   assert(w <= 64);
1057   assert(h <= 64);
1058   assert(y_step_q4 <= 32 || (y_step_q4 <= 64 && h <= 32));
1059   assert(x_step_q4 <= 64);
1060 
1061   if (w >= 8) {
1062     scaledconvolve_horiz_w8(src - src_stride * (SUBPEL_TAPS / 2 - 1),
1063                             src_stride, temp, 64, filter, x0_q4, x_step_q4, w,
1064                             intermediate_height);
1065   } else {
1066     scaledconvolve_horiz_w4(src - src_stride * (SUBPEL_TAPS / 2 - 1),
1067                             src_stride, temp, 64, filter, x0_q4, x_step_q4, w,
1068                             intermediate_height);
1069   }
1070 
1071   if (w >= 16) {
1072     scaledconvolve_vert_w16(temp + 64 * (SUBPEL_TAPS / 2 - 1), 64, dst,
1073                             dst_stride, filter, y0_q4, y_step_q4, w, h);
1074   } else if (w == 8) {
1075     scaledconvolve_vert_w8(temp + 64 * (SUBPEL_TAPS / 2 - 1), 64, dst,
1076                            dst_stride, filter, y0_q4, y_step_q4, w, h);
1077   } else {
1078     scaledconvolve_vert_w4(temp + 64 * (SUBPEL_TAPS / 2 - 1), 64, dst,
1079                            dst_stride, filter, y0_q4, y_step_q4, w, h);
1080   }
1081 }
1082 
1083 filter8_1dfunction aom_filter_block1d16_v8_ssse3;
1084 filter8_1dfunction aom_filter_block1d16_h8_ssse3;
1085 filter8_1dfunction aom_filter_block1d8_v8_ssse3;
1086 filter8_1dfunction aom_filter_block1d8_h8_ssse3;
1087 filter8_1dfunction aom_filter_block1d4_v8_ssse3;
1088 filter8_1dfunction aom_filter_block1d4_h8_ssse3;
1089 
1090 filter8_1dfunction aom_filter_block1d16_v2_ssse3;
1091 filter8_1dfunction aom_filter_block1d16_h2_ssse3;
1092 filter8_1dfunction aom_filter_block1d8_v2_ssse3;
1093 filter8_1dfunction aom_filter_block1d8_h2_ssse3;
1094 filter8_1dfunction aom_filter_block1d4_v2_ssse3;
1095 filter8_1dfunction aom_filter_block1d4_h2_ssse3;
1096 
1097 // void aom_convolve8_horiz_ssse3(const uint8_t *src, ptrdiff_t src_stride,
1098 //                                uint8_t *dst, ptrdiff_t dst_stride,
1099 //                                const int16_t *filter_x, int x_step_q4,
1100 //                                const int16_t *filter_y, int y_step_q4,
1101 //                                int w, int h);
1102 // void aom_convolve8_vert_ssse3(const uint8_t *src, ptrdiff_t src_stride,
1103 //                               uint8_t *dst, ptrdiff_t dst_stride,
1104 //                               const int16_t *filter_x, int x_step_q4,
1105 //                               const int16_t *filter_y, int y_step_q4,
1106 //                               int w, int h);
1107 FUN_CONV_1D(horiz, x_step_q4, filter_x, h, src, , ssse3);
1108 FUN_CONV_1D(vert, y_step_q4, filter_y, v, src - src_stride * 3, , ssse3);
1109