1 /*
2 * Copyright (c) 2018, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #include <arm_neon.h>
13 #include <assert.h>
14
15 #include "config/aom_config.h"
16 #include "config/av1_rtcd.h"
17
18 #include "aom_dsp/txfm_common.h"
19 #include "aom_ports/mem.h"
20 #include "av1/common/common.h"
21 #include "av1/common/arm/convolve_neon.h"
22 #include "av1/common/arm/mem_neon.h"
23 #include "av1/common/arm/transpose_neon.h"
24
25 #if !defined(__aarch64__)
compute_avg_4x1(uint16x4_t res0,uint16x4_t d0,const uint16_t fwd_offset,const uint16_t bck_offset,const int16x4_t sub_const_vec,const int16_t round_bits,const int use_dist_wtd_comp_avg,uint8x8_t * t0)26 static INLINE void compute_avg_4x1(
27 uint16x4_t res0, uint16x4_t d0, const uint16_t fwd_offset,
28 const uint16_t bck_offset, const int16x4_t sub_const_vec,
29 const int16_t round_bits, const int use_dist_wtd_comp_avg, uint8x8_t *t0) {
30 int16x4_t tmp0;
31 uint16x4_t tmp_u0;
32 uint32x4_t sum0;
33 int32x4_t dst0;
34 int16x8_t tmp4;
35
36 if (use_dist_wtd_comp_avg) {
37 const int32x4_t round_bits_vec = vdupq_n_s32((int32_t)(-round_bits));
38
39 sum0 = vmull_n_u16(res0, fwd_offset);
40 sum0 = vmlal_n_u16(sum0, d0, bck_offset);
41
42 sum0 = vshrq_n_u32(sum0, DIST_PRECISION_BITS);
43
44 dst0 = vsubq_s32(vreinterpretq_s32_u32(sum0), vmovl_s16(sub_const_vec));
45
46 dst0 = vqrshlq_s32(dst0, round_bits_vec);
47
48 tmp0 = vqmovn_s32(dst0);
49 tmp4 = vcombine_s16(tmp0, tmp0);
50
51 *t0 = vqmovun_s16(tmp4);
52 } else {
53 const int16x4_t round_bits_vec = vdup_n_s16(-round_bits);
54 tmp_u0 = vhadd_u16(res0, d0);
55
56 tmp0 = vsub_s16(vreinterpret_s16_u16(tmp_u0), sub_const_vec);
57
58 tmp0 = vqrshl_s16(tmp0, round_bits_vec);
59
60 tmp4 = vcombine_s16(tmp0, tmp0);
61
62 *t0 = vqmovun_s16(tmp4);
63 }
64 }
65
compute_avg_8x1(uint16x8_t res0,uint16x8_t d0,const uint16_t fwd_offset,const uint16_t bck_offset,const int16x4_t sub_const,const int16_t round_bits,const int use_dist_wtd_comp_avg,uint8x8_t * t0)66 static INLINE void compute_avg_8x1(
67 uint16x8_t res0, uint16x8_t d0, const uint16_t fwd_offset,
68 const uint16_t bck_offset, const int16x4_t sub_const,
69 const int16_t round_bits, const int use_dist_wtd_comp_avg, uint8x8_t *t0) {
70 int16x4_t tmp0, tmp2;
71 int16x8_t f0;
72 uint32x4_t sum0, sum2;
73 int32x4_t dst0, dst2;
74
75 uint16x8_t tmp_u0;
76
77 if (use_dist_wtd_comp_avg) {
78 const int32x4_t sub_const_vec = vmovl_s16(sub_const);
79 const int32x4_t round_bits_vec = vdupq_n_s32(-(int32_t)round_bits);
80
81 sum0 = vmull_n_u16(vget_low_u16(res0), fwd_offset);
82 sum0 = vmlal_n_u16(sum0, vget_low_u16(d0), bck_offset);
83 sum0 = vshrq_n_u32(sum0, DIST_PRECISION_BITS);
84
85 sum2 = vmull_n_u16(vget_high_u16(res0), fwd_offset);
86 sum2 = vmlal_n_u16(sum2, vget_high_u16(d0), bck_offset);
87 sum2 = vshrq_n_u32(sum2, DIST_PRECISION_BITS);
88
89 dst0 = vsubq_s32(vreinterpretq_s32_u32(sum0), sub_const_vec);
90 dst2 = vsubq_s32(vreinterpretq_s32_u32(sum2), sub_const_vec);
91
92 dst0 = vqrshlq_s32(dst0, round_bits_vec);
93 dst2 = vqrshlq_s32(dst2, round_bits_vec);
94
95 tmp0 = vqmovn_s32(dst0);
96 tmp2 = vqmovn_s32(dst2);
97
98 f0 = vcombine_s16(tmp0, tmp2);
99
100 *t0 = vqmovun_s16(f0);
101
102 } else {
103 const int16x8_t sub_const_vec = vcombine_s16(sub_const, sub_const);
104 const int16x8_t round_bits_vec = vdupq_n_s16(-round_bits);
105
106 tmp_u0 = vhaddq_u16(res0, d0);
107
108 f0 = vsubq_s16(vreinterpretq_s16_u16(tmp_u0), sub_const_vec);
109
110 f0 = vqrshlq_s16(f0, round_bits_vec);
111
112 *t0 = vqmovun_s16(f0);
113 }
114 }
115 #endif // !defined(__arch64__)
116
compute_avg_4x4(uint16x4_t res0,uint16x4_t res1,uint16x4_t res2,uint16x4_t res3,uint16x4_t d0,uint16x4_t d1,uint16x4_t d2,uint16x4_t d3,const uint16_t fwd_offset,const uint16_t bck_offset,const int16x4_t sub_const_vec,const int16_t round_bits,const int use_dist_wtd_comp_avg,uint8x8_t * t0,uint8x8_t * t1)117 static INLINE void compute_avg_4x4(
118 uint16x4_t res0, uint16x4_t res1, uint16x4_t res2, uint16x4_t res3,
119 uint16x4_t d0, uint16x4_t d1, uint16x4_t d2, uint16x4_t d3,
120 const uint16_t fwd_offset, const uint16_t bck_offset,
121 const int16x4_t sub_const_vec, const int16_t round_bits,
122 const int use_dist_wtd_comp_avg, uint8x8_t *t0, uint8x8_t *t1) {
123 int16x4_t tmp0, tmp1, tmp2, tmp3;
124 uint16x4_t tmp_u0, tmp_u1, tmp_u2, tmp_u3;
125 uint32x4_t sum0, sum1, sum2, sum3;
126
127 int32x4_t dst0, dst1, dst2, dst3;
128 int16x8_t tmp4, tmp5;
129 const int16x8_t zero = vdupq_n_s16(0);
130
131 if (use_dist_wtd_comp_avg) {
132 const int32x4_t round_bits_vec = vdupq_n_s32((int32_t)(-round_bits));
133 const int32x4_t const_vec = vmovl_s16(sub_const_vec);
134
135 sum0 = vmull_n_u16(res0, fwd_offset);
136 sum0 = vmlal_n_u16(sum0, d0, bck_offset);
137 sum1 = vmull_n_u16(res1, fwd_offset);
138 sum1 = vmlal_n_u16(sum1, d1, bck_offset);
139 sum2 = vmull_n_u16(res2, fwd_offset);
140 sum2 = vmlal_n_u16(sum2, d2, bck_offset);
141 sum3 = vmull_n_u16(res3, fwd_offset);
142 sum3 = vmlal_n_u16(sum3, d3, bck_offset);
143
144 sum0 = vshrq_n_u32(sum0, DIST_PRECISION_BITS);
145 sum1 = vshrq_n_u32(sum1, DIST_PRECISION_BITS);
146 sum2 = vshrq_n_u32(sum2, DIST_PRECISION_BITS);
147 sum3 = vshrq_n_u32(sum3, DIST_PRECISION_BITS);
148
149 dst0 = vsubq_s32(vreinterpretq_s32_u32(sum0), const_vec);
150 dst1 = vsubq_s32(vreinterpretq_s32_u32(sum1), const_vec);
151 dst2 = vsubq_s32(vreinterpretq_s32_u32(sum2), const_vec);
152 dst3 = vsubq_s32(vreinterpretq_s32_u32(sum3), const_vec);
153
154 dst0 = vqrshlq_s32(dst0, round_bits_vec);
155 dst1 = vqrshlq_s32(dst1, round_bits_vec);
156 dst2 = vqrshlq_s32(dst2, round_bits_vec);
157 dst3 = vqrshlq_s32(dst3, round_bits_vec);
158
159 tmp0 = vqmovn_s32(dst0);
160 tmp1 = vqmovn_s32(dst1);
161 tmp2 = vqmovn_s32(dst2);
162 tmp3 = vqmovn_s32(dst3);
163 tmp4 = vcombine_s16(tmp0, tmp1);
164 tmp5 = vcombine_s16(tmp2, tmp3);
165 tmp4 = vmaxq_s16(tmp4, zero);
166 tmp5 = vmaxq_s16(tmp5, zero);
167
168 *t0 = vqmovn_u16(vreinterpretq_u16_s16(tmp4));
169 *t1 = vqmovn_u16(vreinterpretq_u16_s16(tmp5));
170 } else {
171 const int16x4_t round_bits_vec = vdup_n_s16(-round_bits);
172 tmp_u0 = vhadd_u16(res0, d0);
173 tmp_u1 = vhadd_u16(res1, d1);
174 tmp_u2 = vhadd_u16(res2, d2);
175 tmp_u3 = vhadd_u16(res3, d3);
176
177 tmp0 = vsub_s16(vreinterpret_s16_u16(tmp_u0), sub_const_vec);
178 tmp1 = vsub_s16(vreinterpret_s16_u16(tmp_u1), sub_const_vec);
179 tmp2 = vsub_s16(vreinterpret_s16_u16(tmp_u2), sub_const_vec);
180 tmp3 = vsub_s16(vreinterpret_s16_u16(tmp_u3), sub_const_vec);
181
182 tmp0 = vqrshl_s16(tmp0, round_bits_vec);
183 tmp1 = vqrshl_s16(tmp1, round_bits_vec);
184 tmp2 = vqrshl_s16(tmp2, round_bits_vec);
185 tmp3 = vqrshl_s16(tmp3, round_bits_vec);
186
187 tmp4 = vcombine_s16(tmp0, tmp1);
188 tmp5 = vcombine_s16(tmp2, tmp3);
189 tmp4 = vmaxq_s16(tmp4, zero);
190 tmp5 = vmaxq_s16(tmp5, zero);
191
192 *t0 = vqmovn_u16(vreinterpretq_u16_s16(tmp4));
193 *t1 = vqmovn_u16(vreinterpretq_u16_s16(tmp5));
194 }
195 }
196
compute_avg_8x4(uint16x8_t res0,uint16x8_t res1,uint16x8_t res2,uint16x8_t res3,uint16x8_t d0,uint16x8_t d1,uint16x8_t d2,uint16x8_t d3,const uint16_t fwd_offset,const uint16_t bck_offset,const int16x4_t sub_const,const int16_t round_bits,const int use_dist_wtd_comp_avg,uint8x8_t * t0,uint8x8_t * t1,uint8x8_t * t2,uint8x8_t * t3)197 static INLINE void compute_avg_8x4(
198 uint16x8_t res0, uint16x8_t res1, uint16x8_t res2, uint16x8_t res3,
199 uint16x8_t d0, uint16x8_t d1, uint16x8_t d2, uint16x8_t d3,
200 const uint16_t fwd_offset, const uint16_t bck_offset,
201 const int16x4_t sub_const, const int16_t round_bits,
202 const int use_dist_wtd_comp_avg, uint8x8_t *t0, uint8x8_t *t1,
203 uint8x8_t *t2, uint8x8_t *t3) {
204 int16x4_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
205 int16x8_t f0, f1, f2, f3;
206 uint32x4_t sum0, sum1, sum2, sum3;
207 uint32x4_t sum4, sum5, sum6, sum7;
208 int32x4_t dst0, dst1, dst2, dst3;
209 int32x4_t dst4, dst5, dst6, dst7;
210 uint16x8_t tmp_u0, tmp_u1, tmp_u2, tmp_u3;
211 const int16x8_t zero = vdupq_n_s16(0);
212
213 if (use_dist_wtd_comp_avg) {
214 const int32x4_t sub_const_vec = vmovl_s16(sub_const);
215 const int32x4_t round_bits_vec = vdupq_n_s32(-(int32_t)round_bits);
216
217 sum0 = vmull_n_u16(vget_low_u16(res0), fwd_offset);
218 sum0 = vmlal_n_u16(sum0, vget_low_u16(d0), bck_offset);
219 sum1 = vmull_n_u16(vget_low_u16(res1), fwd_offset);
220 sum1 = vmlal_n_u16(sum1, vget_low_u16(d1), bck_offset);
221 sum0 = vshrq_n_u32(sum0, DIST_PRECISION_BITS);
222 sum1 = vshrq_n_u32(sum1, DIST_PRECISION_BITS);
223
224 sum2 = vmull_n_u16(vget_high_u16(res0), fwd_offset);
225 sum2 = vmlal_n_u16(sum2, vget_high_u16(d0), bck_offset);
226 sum3 = vmull_n_u16(vget_high_u16(res1), fwd_offset);
227 sum3 = vmlal_n_u16(sum3, vget_high_u16(d1), bck_offset);
228 sum2 = vshrq_n_u32(sum2, DIST_PRECISION_BITS);
229 sum3 = vshrq_n_u32(sum3, DIST_PRECISION_BITS);
230
231 sum4 = vmull_n_u16(vget_low_u16(res2), fwd_offset);
232 sum4 = vmlal_n_u16(sum4, vget_low_u16(d2), bck_offset);
233 sum5 = vmull_n_u16(vget_low_u16(res3), fwd_offset);
234 sum5 = vmlal_n_u16(sum5, vget_low_u16(d3), bck_offset);
235 sum4 = vshrq_n_u32(sum4, DIST_PRECISION_BITS);
236 sum5 = vshrq_n_u32(sum5, DIST_PRECISION_BITS);
237
238 sum6 = vmull_n_u16(vget_high_u16(res2), fwd_offset);
239 sum6 = vmlal_n_u16(sum6, vget_high_u16(d2), bck_offset);
240 sum7 = vmull_n_u16(vget_high_u16(res3), fwd_offset);
241 sum7 = vmlal_n_u16(sum7, vget_high_u16(d3), bck_offset);
242 sum6 = vshrq_n_u32(sum6, DIST_PRECISION_BITS);
243 sum7 = vshrq_n_u32(sum7, DIST_PRECISION_BITS);
244
245 dst0 = vsubq_s32(vreinterpretq_s32_u32(sum0), sub_const_vec);
246 dst1 = vsubq_s32(vreinterpretq_s32_u32(sum1), sub_const_vec);
247 dst2 = vsubq_s32(vreinterpretq_s32_u32(sum2), sub_const_vec);
248 dst3 = vsubq_s32(vreinterpretq_s32_u32(sum3), sub_const_vec);
249 dst4 = vsubq_s32(vreinterpretq_s32_u32(sum4), sub_const_vec);
250 dst5 = vsubq_s32(vreinterpretq_s32_u32(sum5), sub_const_vec);
251 dst6 = vsubq_s32(vreinterpretq_s32_u32(sum6), sub_const_vec);
252 dst7 = vsubq_s32(vreinterpretq_s32_u32(sum7), sub_const_vec);
253
254 dst0 = vqrshlq_s32(dst0, round_bits_vec);
255 dst1 = vqrshlq_s32(dst1, round_bits_vec);
256 dst2 = vqrshlq_s32(dst2, round_bits_vec);
257 dst3 = vqrshlq_s32(dst3, round_bits_vec);
258 dst4 = vqrshlq_s32(dst4, round_bits_vec);
259 dst5 = vqrshlq_s32(dst5, round_bits_vec);
260 dst6 = vqrshlq_s32(dst6, round_bits_vec);
261 dst7 = vqrshlq_s32(dst7, round_bits_vec);
262
263 tmp0 = vqmovn_s32(dst0);
264 tmp1 = vqmovn_s32(dst1);
265 tmp2 = vqmovn_s32(dst2);
266 tmp3 = vqmovn_s32(dst3);
267 tmp4 = vqmovn_s32(dst4);
268 tmp5 = vqmovn_s32(dst5);
269 tmp6 = vqmovn_s32(dst6);
270 tmp7 = vqmovn_s32(dst7);
271
272 f0 = vcombine_s16(tmp0, tmp2);
273 f1 = vcombine_s16(tmp1, tmp3);
274 f2 = vcombine_s16(tmp4, tmp6);
275 f3 = vcombine_s16(tmp5, tmp7);
276
277 f0 = vmaxq_s16(f0, zero);
278 f1 = vmaxq_s16(f1, zero);
279 f2 = vmaxq_s16(f2, zero);
280 f3 = vmaxq_s16(f3, zero);
281
282 *t0 = vqmovn_u16(vreinterpretq_u16_s16(f0));
283 *t1 = vqmovn_u16(vreinterpretq_u16_s16(f1));
284 *t2 = vqmovn_u16(vreinterpretq_u16_s16(f2));
285 *t3 = vqmovn_u16(vreinterpretq_u16_s16(f3));
286
287 } else {
288 const int16x8_t sub_const_vec = vcombine_s16(sub_const, sub_const);
289 const int16x8_t round_bits_vec = vdupq_n_s16(-round_bits);
290
291 tmp_u0 = vhaddq_u16(res0, d0);
292 tmp_u1 = vhaddq_u16(res1, d1);
293 tmp_u2 = vhaddq_u16(res2, d2);
294 tmp_u3 = vhaddq_u16(res3, d3);
295
296 f0 = vsubq_s16(vreinterpretq_s16_u16(tmp_u0), sub_const_vec);
297 f1 = vsubq_s16(vreinterpretq_s16_u16(tmp_u1), sub_const_vec);
298 f2 = vsubq_s16(vreinterpretq_s16_u16(tmp_u2), sub_const_vec);
299 f3 = vsubq_s16(vreinterpretq_s16_u16(tmp_u3), sub_const_vec);
300
301 f0 = vqrshlq_s16(f0, round_bits_vec);
302 f1 = vqrshlq_s16(f1, round_bits_vec);
303 f2 = vqrshlq_s16(f2, round_bits_vec);
304 f3 = vqrshlq_s16(f3, round_bits_vec);
305
306 f0 = vmaxq_s16(f0, zero);
307 f1 = vmaxq_s16(f1, zero);
308 f2 = vmaxq_s16(f2, zero);
309 f3 = vmaxq_s16(f3, zero);
310
311 *t0 = vqmovn_u16(vreinterpretq_u16_s16(f0));
312 *t1 = vqmovn_u16(vreinterpretq_u16_s16(f1));
313 *t2 = vqmovn_u16(vreinterpretq_u16_s16(f2));
314 *t3 = vqmovn_u16(vreinterpretq_u16_s16(f3));
315 }
316 }
317
dist_wtd_convolve_2d_horiz_neon(const uint8_t * src,int src_stride,int16_t * im_block,const int im_stride,int16_t * x_filter_tmp,const int im_h,int w,const int round_0)318 static INLINE void dist_wtd_convolve_2d_horiz_neon(
319 const uint8_t *src, int src_stride, int16_t *im_block, const int im_stride,
320 int16_t *x_filter_tmp, const int im_h, int w, const int round_0) {
321 const int bd = 8;
322 const uint8_t *s;
323 int16_t *dst_ptr;
324 int dst_stride;
325 int width, height;
326
327 dst_ptr = im_block;
328 dst_stride = im_stride;
329 height = im_h;
330 width = w;
331
332 if (w == 4) {
333 int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, d0;
334 int16x8_t tt0;
335 uint8x8_t t0;
336
337 const int16x4_t horiz_const = vdup_n_s16((1 << (bd + FILTER_BITS - 2)));
338 const int16x4_t shift_round_0 = vdup_n_s16(-(round_0));
339
340 #if defined(__aarch64__)
341 int16x4_t s8, s9, s10, d1, d2, d3;
342 int16x8_t tt1, tt2, tt3;
343 uint8x8_t t1, t2, t3;
344 #endif
345 do {
346 s = src;
347 __builtin_prefetch(s + 0 * src_stride);
348 #if defined(__aarch64__)
349 __builtin_prefetch(s + 1 * src_stride);
350 __builtin_prefetch(s + 2 * src_stride);
351 __builtin_prefetch(s + 3 * src_stride);
352
353 load_u8_8x4(s, src_stride, &t0, &t1, &t2, &t3);
354 transpose_u8_8x4(&t0, &t1, &t2, &t3);
355 tt0 = vreinterpretq_s16_u16(vmovl_u8(t0));
356 tt1 = vreinterpretq_s16_u16(vmovl_u8(t1));
357 tt2 = vreinterpretq_s16_u16(vmovl_u8(t2));
358 tt3 = vreinterpretq_s16_u16(vmovl_u8(t3));
359 s0 = vget_low_s16(tt0);
360 s1 = vget_low_s16(tt1);
361 s2 = vget_low_s16(tt2);
362 s3 = vget_low_s16(tt3);
363 s4 = vget_high_s16(tt0);
364 s5 = vget_high_s16(tt1);
365 s6 = vget_high_s16(tt2);
366 __builtin_prefetch(dst_ptr + 0 * dst_stride);
367 __builtin_prefetch(dst_ptr + 1 * dst_stride);
368 __builtin_prefetch(dst_ptr + 2 * dst_stride);
369 __builtin_prefetch(dst_ptr + 3 * dst_stride);
370 s += 7;
371
372 load_u8_8x4(s, src_stride, &t0, &t1, &t2, &t3);
373 transpose_u8_8x4(&t0, &t1, &t2, &t3);
374 tt0 = vreinterpretq_s16_u16(vmovl_u8(t0));
375 tt1 = vreinterpretq_s16_u16(vmovl_u8(t1));
376 tt2 = vreinterpretq_s16_u16(vmovl_u8(t2));
377 tt3 = vreinterpretq_s16_u16(vmovl_u8(t3));
378 s7 = vget_low_s16(tt0);
379 s8 = vget_low_s16(tt1);
380 s9 = vget_low_s16(tt2);
381 s10 = vget_low_s16(tt3);
382
383 d0 = convolve8_4x4_s16(s0, s1, s2, s3, s4, s5, s6, s7, x_filter_tmp,
384 horiz_const, shift_round_0);
385 d1 = convolve8_4x4_s16(s1, s2, s3, s4, s5, s6, s7, s8, x_filter_tmp,
386 horiz_const, shift_round_0);
387 d2 = convolve8_4x4_s16(s2, s3, s4, s5, s6, s7, s8, s9, x_filter_tmp,
388 horiz_const, shift_round_0);
389 d3 = convolve8_4x4_s16(s3, s4, s5, s6, s7, s8, s9, s10, x_filter_tmp,
390 horiz_const, shift_round_0);
391
392 transpose_s16_4x4d(&d0, &d1, &d2, &d3);
393
394 vst1_s16((dst_ptr + 0 * dst_stride), d0);
395 vst1_s16((dst_ptr + 1 * dst_stride), d1);
396 vst1_s16((dst_ptr + 2 * dst_stride), d2);
397 vst1_s16((dst_ptr + 3 * dst_stride), d3);
398
399 src += 4 * src_stride;
400 dst_ptr += 4 * dst_stride;
401 height -= 4;
402 #else
403 t0 = vld1_u8(s); // a0 a1 a2 a3 a4 a5 a6 a7
404 tt0 = vreinterpretq_s16_u16(vmovl_u8(t0)); // a0 a1 a2 a3 a4 a5 a6 a7
405 s0 = vget_low_s16(tt0); // a0 a1 a2 a3
406 s4 = vget_high_s16(tt0); // a4 a5 a6 a7
407 __builtin_prefetch(dst_ptr);
408 s += 8;
409 t0 = vld1_u8(s); // a8 a9 a10 a11
410
411 // a8 a9 a10 a11
412 s7 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t0)));
413
414 s1 = vext_s16(s0, s4, 1); // a1 a2 a3 a4
415 s2 = vext_s16(s0, s4, 2); // a2 a3 a4 a5
416 s3 = vext_s16(s0, s4, 3); // a3 a4 a5 a6
417 s5 = vext_s16(s4, s7, 1); // a5 a6 a7 a8
418 s6 = vext_s16(s4, s7, 2); // a6 a7 a8 a9
419 s7 = vext_s16(s4, s7, 3); // a7 a8 a9 a10
420
421 d0 = convolve8_4x4_s16(s0, s1, s2, s3, s4, s5, s6, s7, x_filter_tmp,
422 horiz_const, shift_round_0);
423
424 vst1_s16(dst_ptr, d0);
425
426 src += src_stride;
427 dst_ptr += dst_stride;
428 height -= 1;
429 #endif
430 } while (height > 0);
431 } else {
432 int16_t *d_tmp;
433 int16x8_t s0, s1, s2, s3, s4, s5, s6, s7;
434 int16x8_t res0;
435 uint8x8_t t0;
436
437 const int16x8_t horiz_const = vdupq_n_s16((1 << (bd + FILTER_BITS - 2)));
438 const int16x8_t shift_round_0 = vdupq_n_s16(-(round_0));
439 do {
440 #if defined(__aarch64__)
441 uint8x8_t t1, t2, t3, t4, t5, t6, t7;
442 int16x8_t s8, s9, s10, s11, s12, s13, s14;
443 int16x8_t res1, res2, res3, res4, res5, res6, res7;
444 __builtin_prefetch(src + 0 * src_stride);
445 __builtin_prefetch(src + 1 * src_stride);
446 __builtin_prefetch(src + 2 * src_stride);
447 __builtin_prefetch(src + 3 * src_stride);
448 __builtin_prefetch(src + 4 * src_stride);
449 __builtin_prefetch(src + 5 * src_stride);
450 __builtin_prefetch(src + 6 * src_stride);
451 __builtin_prefetch(src + 7 * src_stride);
452 load_u8_8x8(src, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
453 transpose_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
454 s0 = vreinterpretq_s16_u16(vmovl_u8(t0));
455 s1 = vreinterpretq_s16_u16(vmovl_u8(t1));
456 s2 = vreinterpretq_s16_u16(vmovl_u8(t2));
457 s3 = vreinterpretq_s16_u16(vmovl_u8(t3));
458 s4 = vreinterpretq_s16_u16(vmovl_u8(t4));
459 s5 = vreinterpretq_s16_u16(vmovl_u8(t5));
460 s6 = vreinterpretq_s16_u16(vmovl_u8(t6));
461
462 width = w;
463 s = src + 7;
464 d_tmp = dst_ptr;
465 __builtin_prefetch(dst_ptr + 0 * dst_stride);
466 __builtin_prefetch(dst_ptr + 1 * dst_stride);
467 __builtin_prefetch(dst_ptr + 2 * dst_stride);
468 __builtin_prefetch(dst_ptr + 3 * dst_stride);
469 __builtin_prefetch(dst_ptr + 4 * dst_stride);
470 __builtin_prefetch(dst_ptr + 5 * dst_stride);
471 __builtin_prefetch(dst_ptr + 6 * dst_stride);
472 __builtin_prefetch(dst_ptr + 7 * dst_stride);
473
474 do {
475 load_u8_8x8(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
476 transpose_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
477 s7 = vreinterpretq_s16_u16(vmovl_u8(t0));
478 s8 = vreinterpretq_s16_u16(vmovl_u8(t1));
479 s9 = vreinterpretq_s16_u16(vmovl_u8(t2));
480 s10 = vreinterpretq_s16_u16(vmovl_u8(t3));
481 s11 = vreinterpretq_s16_u16(vmovl_u8(t4));
482 s12 = vreinterpretq_s16_u16(vmovl_u8(t5));
483 s13 = vreinterpretq_s16_u16(vmovl_u8(t6));
484 s14 = vreinterpretq_s16_u16(vmovl_u8(t7));
485
486 res0 = convolve8_8x8_s16(s0, s1, s2, s3, s4, s5, s6, s7, x_filter_tmp,
487 horiz_const, shift_round_0);
488 res1 = convolve8_8x8_s16(s1, s2, s3, s4, s5, s6, s7, s8, x_filter_tmp,
489 horiz_const, shift_round_0);
490 res2 = convolve8_8x8_s16(s2, s3, s4, s5, s6, s7, s8, s9, x_filter_tmp,
491 horiz_const, shift_round_0);
492 res3 = convolve8_8x8_s16(s3, s4, s5, s6, s7, s8, s9, s10, x_filter_tmp,
493 horiz_const, shift_round_0);
494 res4 = convolve8_8x8_s16(s4, s5, s6, s7, s8, s9, s10, s11, x_filter_tmp,
495 horiz_const, shift_round_0);
496 res5 = convolve8_8x8_s16(s5, s6, s7, s8, s9, s10, s11, s12,
497 x_filter_tmp, horiz_const, shift_round_0);
498 res6 = convolve8_8x8_s16(s6, s7, s8, s9, s10, s11, s12, s13,
499 x_filter_tmp, horiz_const, shift_round_0);
500 res7 = convolve8_8x8_s16(s7, s8, s9, s10, s11, s12, s13, s14,
501 x_filter_tmp, horiz_const, shift_round_0);
502
503 transpose_s16_8x8(&res0, &res1, &res2, &res3, &res4, &res5, &res6,
504 &res7);
505
506 store_s16_8x8(d_tmp, dst_stride, res0, res1, res2, res3, res4, res5,
507 res6, res7);
508 s0 = s8;
509 s1 = s9;
510 s2 = s10;
511 s3 = s11;
512 s4 = s12;
513 s5 = s13;
514 s6 = s14;
515 s += 8;
516 d_tmp += 8;
517 width -= 8;
518 } while (width > 0);
519 src += 8 * src_stride;
520 dst_ptr += 8 * dst_stride;
521 height -= 8;
522 #else
523 int16x8_t temp_0;
524 t0 = vld1_u8(src);
525 s0 = vreinterpretq_s16_u16(vmovl_u8(t0)); // a0 a1 a2 a3 a4 a5 a6 a7
526
527 width = w;
528 s = src + 8;
529 d_tmp = dst_ptr;
530 __builtin_prefetch(dst_ptr);
531
532 do {
533 t0 = vld1_u8(s); // a8 a9 a10 a11 a12 a13 a14 a15
534 s7 = vreinterpretq_s16_u16(vmovl_u8(t0));
535 temp_0 = s0;
536 s0 = s7;
537
538 s1 = vextq_s16(temp_0, s7, 1); // a1 a2 a3 a4 a5 a6 a7 a8
539 s2 = vextq_s16(temp_0, s7, 2); // a2 a3 a4 a5 a6 a7 a8 a9
540 s3 = vextq_s16(temp_0, s7, 3); // a3 a4 a5 a6 a7 a8 a9 a10
541 s4 = vextq_s16(temp_0, s7, 4); // a4 a5 a6 a7 a8 a9 a10 a11
542 s5 = vextq_s16(temp_0, s7, 5); // a5 a6 a7 a8 a9 a10 a11 a12
543 s6 = vextq_s16(temp_0, s7, 6); // a6 a7 a8 a9 a10 a11 a12 a13
544 s7 = vextq_s16(temp_0, s7, 7); // a7 a8 a9 a10 a11 a12 a13 a14
545
546 res0 = convolve8_8x8_s16(temp_0, s1, s2, s3, s4, s5, s6, s7,
547 x_filter_tmp, horiz_const, shift_round_0);
548 vst1q_s16(d_tmp, res0);
549
550 s += 8;
551 d_tmp += 8;
552 width -= 8;
553 } while (width > 0);
554 src += src_stride;
555 dst_ptr += dst_stride;
556 height -= 1;
557 #endif
558 } while (height > 0);
559 }
560 }
561
dist_wtd_convolve_2d_vert_neon(int16_t * im_block,const int im_stride,uint8_t * dst8,int dst8_stride,ConvolveParams * conv_params,const int16_t * y_filter,int h,int w)562 static INLINE void dist_wtd_convolve_2d_vert_neon(
563 int16_t *im_block, const int im_stride, uint8_t *dst8, int dst8_stride,
564 ConvolveParams *conv_params, const int16_t *y_filter, int h, int w) {
565 uint8_t *dst_u8_ptr, *d_u8;
566 CONV_BUF_TYPE *dst_ptr, *dst;
567 int16_t *src_ptr, *s;
568 uint16_t *d;
569
570 const int bd = 8;
571 int height;
572 int dst_stride = conv_params->dst_stride;
573 const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0;
574 const int16_t sub_const = (1 << (offset_bits - conv_params->round_1)) +
575 (1 << (offset_bits - conv_params->round_1 - 1));
576
577 const int16_t round_bits =
578 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1;
579 const int offset = bd + 2 * FILTER_BITS - conv_params->round_0;
580 const int32x4_t round_shift_vec = vdupq_n_s32(-(conv_params->round_1));
581 const int32x4_t offset_const = vdupq_n_s32(1 << offset);
582 const int16x4_t sub_const_vec = vdup_n_s16(sub_const);
583 const uint16_t fwd_offset = conv_params->fwd_offset;
584 const uint16_t bck_offset = conv_params->bck_offset;
585 const int do_average = conv_params->do_average;
586 const int use_dist_wtd_comp_avg = conv_params->use_dist_wtd_comp_avg;
587
588 int16x4_t s0, s1, s2, s3, s4, s5, s6, s7;
589 uint16x4_t res4, d0;
590 uint8x8_t t0;
591
592 #if defined(__aarch64__)
593 int16x4_t s8, s9, s10;
594 uint16x4_t res5, res6, res7, d1, d2, d3;
595 uint8x8_t t1;
596 #endif
597
598 dst = conv_params->dst;
599 src_ptr = im_block;
600 dst_u8_ptr = dst8;
601 dst_ptr = dst;
602 height = h;
603
604 do {
605 d = dst_ptr;
606 d_u8 = dst_u8_ptr;
607 s = src_ptr;
608 height = h;
609
610 __builtin_prefetch(s + 0 * im_stride);
611 __builtin_prefetch(s + 1 * im_stride);
612 __builtin_prefetch(s + 2 * im_stride);
613 __builtin_prefetch(s + 3 * im_stride);
614 __builtin_prefetch(s + 4 * im_stride);
615 __builtin_prefetch(s + 5 * im_stride);
616 __builtin_prefetch(s + 6 * im_stride);
617 __builtin_prefetch(s + 7 * im_stride);
618
619 load_s16_4x8(s, im_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7);
620 s += (7 * im_stride);
621
622 do {
623 #if defined(__aarch64__)
624 load_s16_4x4(s, im_stride, &s7, &s8, &s9, &s10);
625 s += (im_stride << 2);
626
627 __builtin_prefetch(d + 0 * dst_stride);
628 __builtin_prefetch(d + 1 * dst_stride);
629 __builtin_prefetch(d + 2 * dst_stride);
630 __builtin_prefetch(d + 3 * dst_stride);
631
632 __builtin_prefetch(d_u8 + 4 * dst8_stride);
633 __builtin_prefetch(d_u8 + 5 * dst8_stride);
634 __builtin_prefetch(d_u8 + 6 * dst8_stride);
635 __builtin_prefetch(d_u8 + 7 * dst8_stride);
636
637 d0 = convolve8_4x4_s32(s0, s1, s2, s3, s4, s5, s6, s7, y_filter,
638 round_shift_vec, offset_const);
639 d1 = convolve8_4x4_s32(s1, s2, s3, s4, s5, s6, s7, s8, y_filter,
640 round_shift_vec, offset_const);
641 d2 = convolve8_4x4_s32(s2, s3, s4, s5, s6, s7, s8, s9, y_filter,
642 round_shift_vec, offset_const);
643 d3 = convolve8_4x4_s32(s3, s4, s5, s6, s7, s8, s9, s10, y_filter,
644 round_shift_vec, offset_const);
645
646 if (do_average) {
647 load_u16_4x4(d, dst_stride, &res4, &res5, &res6, &res7);
648 d += (dst_stride << 2);
649
650 compute_avg_4x4(res4, res5, res6, res7, d0, d1, d2, d3, fwd_offset,
651 bck_offset, sub_const_vec, round_bits,
652 use_dist_wtd_comp_avg, &t0, &t1);
653
654 vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(t0), 0);
655 d_u8 += dst8_stride;
656 vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(t0), 1);
657 d_u8 += dst8_stride;
658 vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(t1), 0);
659 d_u8 += dst8_stride;
660 vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(t1), 1);
661 d_u8 += dst8_stride;
662
663 } else {
664 store_u16_4x4(d, dst_stride, d0, d1, d2, d3);
665 d += (dst_stride << 2);
666 }
667 s0 = s4;
668 s1 = s5;
669 s2 = s6;
670 s3 = s7;
671 s4 = s8;
672 s5 = s9;
673 s6 = s10;
674 height -= 4;
675 #else
676 s7 = vld1_s16(s);
677 s += (im_stride);
678
679 __builtin_prefetch(d + 0 * dst_stride);
680 __builtin_prefetch(d_u8 + 0 * dst8_stride);
681
682 d0 = convolve8_4x4_s32(s0, s1, s2, s3, s4, s5, s6, s7, y_filter,
683 round_shift_vec, offset_const);
684
685 if (do_average) {
686 res4 = vld1_u16(d);
687 d += (dst_stride);
688
689 compute_avg_4x1(res4, d0, fwd_offset, bck_offset, sub_const_vec,
690 round_bits, use_dist_wtd_comp_avg, &t0);
691
692 vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(t0), 0);
693 d_u8 += dst8_stride;
694
695 } else {
696 vst1_u16(d, d0);
697 d += (dst_stride);
698 }
699 s0 = s1;
700 s1 = s2;
701 s2 = s3;
702 s3 = s4;
703 s4 = s5;
704 s5 = s6;
705 s6 = s7;
706 height--;
707 #endif
708 } while (height > 0);
709 src_ptr += 4;
710 dst_ptr += 4;
711 dst_u8_ptr += 4;
712 w -= 4;
713 } while (w > 0);
714 }
715
av1_dist_wtd_convolve_2d_neon(const uint8_t * src,int src_stride,uint8_t * dst8,int dst8_stride,int w,int h,const InterpFilterParams * filter_params_x,const InterpFilterParams * filter_params_y,const int subpel_x_qn,const int subpel_y_qn,ConvolveParams * conv_params)716 void av1_dist_wtd_convolve_2d_neon(const uint8_t *src, int src_stride,
717 uint8_t *dst8, int dst8_stride, int w, int h,
718 const InterpFilterParams *filter_params_x,
719 const InterpFilterParams *filter_params_y,
720 const int subpel_x_qn, const int subpel_y_qn,
721 ConvolveParams *conv_params) {
722 assert(!(w % 4));
723 assert(!(h % 4));
724
725 DECLARE_ALIGNED(16, int16_t,
726 im_block[(MAX_SB_SIZE + HORIZ_EXTRA_ROWS) * MAX_SB_SIZE]);
727
728 const int im_h = h + filter_params_y->taps - 1;
729 const int im_stride = MAX_SB_SIZE;
730 const int vert_offset = filter_params_y->taps / 2 - 1;
731 const int horiz_offset = filter_params_x->taps / 2 - 1;
732 const int round_0 = conv_params->round_0 - 1;
733 const uint8_t *src_ptr = src - vert_offset * src_stride - horiz_offset;
734 const int16_t *x_filter = av1_get_interp_filter_subpel_kernel(
735 filter_params_x, subpel_x_qn & SUBPEL_MASK);
736 const int16_t *y_filter = av1_get_interp_filter_subpel_kernel(
737 filter_params_y, subpel_y_qn & SUBPEL_MASK);
738
739 int16_t x_filter_tmp[8];
740 int16x8_t filter_x_coef = vld1q_s16(x_filter);
741
742 // filter coeffs are even, so downshifting by 1 to reduce intermediate
743 // precision requirements.
744 filter_x_coef = vshrq_n_s16(filter_x_coef, 1);
745 vst1q_s16(&x_filter_tmp[0], filter_x_coef);
746
747 dist_wtd_convolve_2d_horiz_neon(src_ptr, src_stride, im_block, im_stride,
748 x_filter_tmp, im_h, w, round_0);
749
750 dist_wtd_convolve_2d_vert_neon(im_block, im_stride, dst8, dst8_stride,
751 conv_params, y_filter, h, w);
752 }
753
av1_dist_wtd_convolve_2d_copy_neon(const uint8_t * src,int src_stride,uint8_t * dst8,int dst8_stride,int w,int h,const InterpFilterParams * filter_params_x,const InterpFilterParams * filter_params_y,const int subpel_x_qn,const int subpel_y_qn,ConvolveParams * conv_params)754 void av1_dist_wtd_convolve_2d_copy_neon(
755 const uint8_t *src, int src_stride, uint8_t *dst8, int dst8_stride, int w,
756 int h, const InterpFilterParams *filter_params_x,
757 const InterpFilterParams *filter_params_y, const int subpel_x_qn,
758 const int subpel_y_qn, ConvolveParams *conv_params) {
759 uint8x8_t res0_8, res1_8, res2_8, res3_8, tmp_shift0, tmp_shift1, tmp_shift2,
760 tmp_shift3;
761 uint16x8_t res_q0, res_q1, res_q2, res_q3, tmp_q0, tmp_q1, tmp_q2, tmp_q3;
762 uint16x4_t tmp4, tmp5, tmp6, tmp7, res4, res5, res6, res7;
763 const uint8_t *src1, *src2;
764 uint8_t *dst8_1;
765 CONV_BUF_TYPE *dst = conv_params->dst, *dst_1, *dst_2;
766 const int dst_stride = conv_params->dst_stride;
767 int x, y;
768 const int16_t bits =
769 FILTER_BITS * 2 - conv_params->round_1 - conv_params->round_0;
770 const int bd = 8;
771 const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0;
772 const int round_offset = (1 << (offset_bits - conv_params->round_1)) +
773 (1 << (offset_bits - conv_params->round_1 - 1));
774 const int16x4_t sub_const_vec = vdup_n_s16((int16_t)round_offset);
775 const uint16x8_t dup_round_offset16x8 = vdupq_n_u16((uint16_t)round_offset);
776 const int16x4_t dup_bits16x4 = vdup_n_s16(bits);
777 const int16x8_t dup_bits16x8 = vdupq_n_s16(bits);
778
779 (void)filter_params_x;
780 (void)filter_params_y;
781 (void)subpel_x_qn;
782 (void)subpel_y_qn;
783
784 if (!(w & 0x07)) {
785 for (y = 0; y < (h >> 2); ++y) {
786 src1 = src;
787 dst8_1 = dst8;
788 dst_1 = dst;
789 for (x = 0; x < (w >> 3); ++x) {
790 src2 = src1;
791 load_u8_8x4(src2, src_stride, &res0_8, &res1_8, &res2_8, &res3_8);
792
793 res_q0 = vaddq_u16(vshlq_u16(vmovl_u8(res0_8), dup_bits16x8),
794 dup_round_offset16x8);
795 res_q1 = vaddq_u16(vshlq_u16(vmovl_u8(res1_8), dup_bits16x8),
796 dup_round_offset16x8);
797 res_q2 = vaddq_u16(vshlq_u16(vmovl_u8(res2_8), dup_bits16x8),
798 dup_round_offset16x8);
799 res_q3 = vaddq_u16(vshlq_u16(vmovl_u8(res3_8), dup_bits16x8),
800 dup_round_offset16x8);
801
802 if (conv_params->do_average) {
803 dst_2 = dst_1;
804 load_u16_8x4(dst_2, dst_stride, &tmp_q0, &tmp_q1, &tmp_q2, &tmp_q3);
805
806 compute_avg_8x4(tmp_q0, tmp_q1, tmp_q2, tmp_q3, res_q0, res_q1,
807 res_q2, res_q3, conv_params->fwd_offset,
808 conv_params->bck_offset, sub_const_vec, bits,
809 conv_params->use_dist_wtd_comp_avg, &tmp_shift0,
810 &tmp_shift1, &tmp_shift2, &tmp_shift3);
811
812 vst1_u8(dst8_1 + (0 * dst8_stride), tmp_shift0);
813 vst1_u8(dst8_1 + (1 * dst8_stride), tmp_shift1);
814 vst1_u8(dst8_1 + (2 * dst8_stride), tmp_shift2);
815 vst1_u8(dst8_1 + (3 * dst8_stride), tmp_shift3);
816
817 } else {
818 vst1q_u16(dst_1 + (0 * dst_stride), res_q0);
819 vst1q_u16(dst_1 + (1 * dst_stride), res_q1);
820 vst1q_u16(dst_1 + (2 * dst_stride), res_q2);
821 vst1q_u16(dst_1 + (3 * dst_stride), res_q3);
822 }
823 src1 = src1 + 8;
824 dst_1 = dst_1 + 8;
825 dst8_1 = dst8_1 + 8;
826 }
827 src += src_stride * 4;
828 dst8 += dst8_stride * 4;
829 dst += dst_stride * 4;
830 }
831 } else if (!(w & 0x03)) {
832 for (y = 0; y < (h >> 2); ++y) {
833 src1 = src;
834 dst8_1 = dst8;
835 dst_1 = dst;
836
837 load_u8_8x4(src1, src_stride, &res0_8, &res1_8, &res2_8, &res3_8);
838
839 res4 = vadd_u16(vshl_u16(vget_low_u16(vmovl_u8(res0_8)), dup_bits16x4),
840 vreinterpret_u16_s16(sub_const_vec));
841 res5 = vadd_u16(vshl_u16(vget_low_u16(vmovl_u8(res1_8)), dup_bits16x4),
842 vreinterpret_u16_s16(sub_const_vec));
843 res6 = vadd_u16(vshl_u16(vget_low_u16(vmovl_u8(res2_8)), dup_bits16x4),
844 vreinterpret_u16_s16(sub_const_vec));
845 res7 = vadd_u16(vshl_u16(vget_low_u16(vmovl_u8(res3_8)), dup_bits16x4),
846 vreinterpret_u16_s16(sub_const_vec));
847 if (conv_params->do_average) {
848 load_u16_4x4(dst_1, dst_stride, &tmp4, &tmp5, &tmp6, &tmp7);
849
850 compute_avg_4x4(tmp4, tmp5, tmp6, tmp7, res4, res5, res6, res7,
851 conv_params->fwd_offset, conv_params->bck_offset,
852 sub_const_vec, bits, conv_params->use_dist_wtd_comp_avg,
853 &tmp_shift0, &tmp_shift1);
854
855 vst1_lane_u32((uint32_t *)(dst8_1), vreinterpret_u32_u8(tmp_shift0), 0);
856 dst8_1 += dst8_stride;
857 vst1_lane_u32((uint32_t *)(dst8_1), vreinterpret_u32_u8(tmp_shift0), 1);
858 dst8_1 += dst8_stride;
859 vst1_lane_u32((uint32_t *)(dst8_1), vreinterpret_u32_u8(tmp_shift1), 0);
860 dst8_1 += dst8_stride;
861 vst1_lane_u32((uint32_t *)(dst8_1), vreinterpret_u32_u8(tmp_shift1), 1);
862
863 } else {
864 vst1_u16(dst_1, res4);
865 dst_1 += dst_stride;
866 vst1_u16(dst_1, res5);
867 dst_1 += dst_stride;
868 vst1_u16(dst_1, res6);
869 dst_1 += dst_stride;
870 vst1_u16(dst_1, res7);
871 }
872 src += src_stride * 4;
873 dst += dst_stride * 4;
874 dst8 += dst8_stride * 4;
875 }
876 }
877 }
878
av1_dist_wtd_convolve_x_neon(const uint8_t * src,int src_stride,uint8_t * dst8,int dst8_stride,int w,int h,const InterpFilterParams * filter_params_x,const InterpFilterParams * filter_params_y,const int subpel_x_qn,const int subpel_y_qn,ConvolveParams * conv_params)879 void av1_dist_wtd_convolve_x_neon(const uint8_t *src, int src_stride,
880 uint8_t *dst8, int dst8_stride, int w, int h,
881 const InterpFilterParams *filter_params_x,
882 const InterpFilterParams *filter_params_y,
883 const int subpel_x_qn, const int subpel_y_qn,
884 ConvolveParams *conv_params) {
885 assert(!(w % 4));
886 assert(!(h % 4));
887
888 CONV_BUF_TYPE *dst = conv_params->dst;
889 int dst_stride = conv_params->dst_stride;
890 const int horiz_offset = filter_params_x->taps / 2 - 1;
891 const int bits = FILTER_BITS - conv_params->round_1;
892 const int bd = 8;
893 const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0;
894 const int round_offset = (1 << (offset_bits - conv_params->round_1)) +
895 (1 << (offset_bits - conv_params->round_1 - 1));
896 const int round_bits =
897 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1;
898 const uint16_t fwd_offset = conv_params->fwd_offset;
899 const uint16_t bck_offset = conv_params->bck_offset;
900 const int use_dist_wtd_comp_avg = conv_params->use_dist_wtd_comp_avg;
901
902 (void)filter_params_y;
903 (void)subpel_y_qn;
904
905 // horizontal filter
906 const int16_t *x_filter = av1_get_interp_filter_subpel_kernel(
907 filter_params_x, subpel_x_qn & SUBPEL_MASK);
908
909 const uint8_t *src_ptr = src - horiz_offset;
910
911 int16_t x_filter_tmp[8];
912 int16x8_t filter_x_coef = vld1q_s16(x_filter);
913
914 // filter coeffs are even, so downshifting by 1 to reduce intermediate
915 // precision requirements.
916 filter_x_coef = vshrq_n_s16(filter_x_coef, 1);
917 vst1q_s16(&x_filter_tmp[0], filter_x_coef);
918
919 const uint8_t *s;
920 uint8_t *d_u8;
921 uint8_t *dst_u8_ptr;
922 CONV_BUF_TYPE *d, *dst_ptr;
923 int width, height;
924 uint8x8_t t0;
925 #if defined(__aarch64__)
926 uint8x8_t t1, t2, t3, t4, t5, t6, t7;
927 #endif
928 s = src_ptr;
929 dst_ptr = dst;
930 dst_u8_ptr = dst8;
931 width = w;
932 height = h;
933
934 if ((w == 4) || (h == 4)) {
935 int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, d0;
936 int16x8_t tt0;
937 uint16x4_t res4;
938 #if defined(__aarch64__)
939 int16x4_t s8, s9, s10, d1, d2, d3;
940 int16x8_t tt1, tt2, tt3;
941 uint16x4_t res5, res6, res7;
942 uint32x2_t tu0 = vdup_n_u32(0), tu1 = vdup_n_u32(0);
943 int16x8_t u0, u1;
944 #else
945 int16x4_t temp_0;
946 #endif
947 const int16x4_t zero = vdup_n_s16(0);
948 const int16x4_t round_offset_vec = vdup_n_s16(round_offset);
949 const int16x4_t shift_round_0 = vdup_n_s16(-conv_params->round_0 + 1);
950 const int16x4_t horiz_const = vdup_n_s16(bits);
951 do {
952 s = src_ptr;
953 d = dst_ptr;
954 d_u8 = dst_u8_ptr;
955 width = w;
956 __builtin_prefetch(s + 0 * src_stride);
957 #if defined(__aarch64__)
958 __builtin_prefetch(s + 1 * src_stride);
959 __builtin_prefetch(s + 2 * src_stride);
960 __builtin_prefetch(s + 3 * src_stride);
961
962 load_u8_8x4(s, src_stride, &t0, &t1, &t2, &t3);
963 transpose_u8_8x4(&t0, &t1, &t2, &t3);
964 tt0 = vreinterpretq_s16_u16(vmovl_u8(t0));
965 tt1 = vreinterpretq_s16_u16(vmovl_u8(t1));
966 tt2 = vreinterpretq_s16_u16(vmovl_u8(t2));
967 tt3 = vreinterpretq_s16_u16(vmovl_u8(t3));
968 s0 = vget_low_s16(tt0);
969 s1 = vget_low_s16(tt1);
970 s2 = vget_low_s16(tt2);
971 s3 = vget_low_s16(tt3);
972 s4 = vget_high_s16(tt0);
973 s5 = vget_high_s16(tt1);
974 s6 = vget_high_s16(tt2);
975 __builtin_prefetch(d + 0 * dst_stride);
976 __builtin_prefetch(d + 1 * dst_stride);
977 __builtin_prefetch(d + 2 * dst_stride);
978 __builtin_prefetch(d + 3 * dst_stride);
979 s += 7;
980 do {
981 load_unaligned_u8_4x4(s, src_stride, &tu0, &tu1);
982 t0 = vreinterpret_u8_u32(tu0);
983 t1 = vreinterpret_u8_u32(tu1);
984
985 transpose_u8_4x4(&t0, &t1);
986 u0 = vreinterpretq_s16_u16(vmovl_u8(t0));
987 u1 = vreinterpretq_s16_u16(vmovl_u8(t1));
988
989 s7 = vget_low_s16(u0);
990 s8 = vget_low_s16(u1);
991 s9 = vget_high_s16(u0);
992 s10 = vget_high_s16(u1);
993
994 d0 = convolve8_4x4_s16(s0, s1, s2, s3, s4, s5, s6, s7, x_filter_tmp,
995 zero, shift_round_0);
996 d0 = vrshl_s16(d0, horiz_const);
997 d0 = vadd_s16(d0, round_offset_vec);
998 d1 = convolve8_4x4_s16(s1, s2, s3, s4, s5, s6, s7, s8, x_filter_tmp,
999 zero, shift_round_0);
1000 d1 = vrshl_s16(d1, horiz_const);
1001 d1 = vadd_s16(d1, round_offset_vec);
1002 d2 = convolve8_4x4_s16(s2, s3, s4, s5, s6, s7, s8, s9, x_filter_tmp,
1003 zero, shift_round_0);
1004 d2 = vrshl_s16(d2, horiz_const);
1005 d2 = vadd_s16(d2, round_offset_vec);
1006 d3 = convolve8_4x4_s16(s3, s4, s5, s6, s7, s8, s9, s10, x_filter_tmp,
1007 zero, shift_round_0);
1008 d3 = vrshl_s16(d3, horiz_const);
1009 d3 = vadd_s16(d3, round_offset_vec);
1010
1011 transpose_s16_4x4d(&d0, &d1, &d2, &d3);
1012
1013 if (conv_params->do_average) {
1014 __builtin_prefetch(d + 0 * dst_stride);
1015 __builtin_prefetch(d + 1 * dst_stride);
1016 __builtin_prefetch(d + 2 * dst_stride);
1017 __builtin_prefetch(d + 3 * dst_stride);
1018
1019 __builtin_prefetch(d_u8 + 0 * dst8_stride);
1020 __builtin_prefetch(d_u8 + 1 * dst8_stride);
1021 __builtin_prefetch(d_u8 + 2 * dst8_stride);
1022 __builtin_prefetch(d_u8 + 3 * dst8_stride);
1023
1024 load_u16_4x4(d, dst_stride, &res4, &res5, &res6, &res7);
1025
1026 compute_avg_4x4(res4, res5, res6, res7, vreinterpret_u16_s16(d0),
1027 vreinterpret_u16_s16(d1), vreinterpret_u16_s16(d2),
1028 vreinterpret_u16_s16(d3), fwd_offset, bck_offset,
1029 round_offset_vec, round_bits, use_dist_wtd_comp_avg,
1030 &t0, &t1);
1031
1032 vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(t0),
1033 0); // 00 01 02 03
1034 vst1_lane_u32((uint32_t *)(d_u8 + dst8_stride),
1035 vreinterpret_u32_u8(t0),
1036 1); // 10 11 12 13
1037 vst1_lane_u32((uint32_t *)(d_u8 + 2 * dst8_stride),
1038 vreinterpret_u32_u8(t1),
1039 0); // 20 21 22 23
1040 vst1_lane_u32((uint32_t *)(d_u8 + 3 * dst8_stride),
1041 vreinterpret_u32_u8(t1),
1042 1); // 30 31 32 33
1043 } else {
1044 store_u16_4x4(d, dst_stride, vreinterpret_u16_s16(d0),
1045 vreinterpret_u16_s16(d1), vreinterpret_u16_s16(d2),
1046 vreinterpret_u16_s16(d3));
1047 }
1048
1049 s0 = s4;
1050 s1 = s5;
1051 s2 = s6;
1052 s3 = s7;
1053 s4 = s8;
1054 s5 = s9;
1055 s6 = s10;
1056
1057 s += 4;
1058 width -= 4;
1059 d += 4;
1060 d_u8 += 4;
1061 } while (width > 0);
1062 src_ptr += (src_stride << 2);
1063 dst_ptr += (dst_stride << 2);
1064 dst_u8_ptr += (dst8_stride << 2);
1065 height -= 4;
1066 #else
1067 t0 = vld1_u8(s); // a0 a1 a2 a3 a4 a5 a6 a7
1068 tt0 = vreinterpretq_s16_u16(vmovl_u8(t0)); // a0 a1 a2 a3 a4 a5 a6 a7
1069 s0 = vget_low_s16(tt0); // a0 a1 a2 a3
1070 s4 = vget_high_s16(tt0); // a4 a5 a6 a7
1071 __builtin_prefetch(d);
1072
1073 s += 8;
1074 do {
1075 t0 = vld1_u8(s); // a8 a9 a10 a11
1076
1077 // a8 a9 a10 a11
1078 s7 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t0)));
1079 temp_0 = s7;
1080 s1 = vext_s16(s0, s4, 1); // a1 a2 a3 a4
1081 s2 = vext_s16(s0, s4, 2); // a2 a3 a4 a5
1082 s3 = vext_s16(s0, s4, 3); // a3 a4 a5 a6
1083 s5 = vext_s16(s4, s7, 1); // a5 a6 a7 a8
1084 s6 = vext_s16(s4, s7, 2); // a6 a7 a8 a9
1085 s7 = vext_s16(s4, s7, 3); // a7 a8 a9 a10
1086
1087 d0 = convolve8_4x4_s16(s0, s1, s2, s3, s4, s5, s6, s7, x_filter_tmp,
1088 zero, shift_round_0);
1089 d0 = vrshl_s16(d0, horiz_const);
1090 d0 = vadd_s16(d0, round_offset_vec);
1091 s0 = s4;
1092 s4 = temp_0;
1093 if (conv_params->do_average) {
1094 __builtin_prefetch(d);
1095 __builtin_prefetch(d_u8);
1096
1097 res4 = vld1_u16(d);
1098
1099 compute_avg_4x1(res4, vreinterpret_u16_s16(d0), fwd_offset,
1100 bck_offset, round_offset_vec, round_bits,
1101 use_dist_wtd_comp_avg, &t0);
1102
1103 vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(t0),
1104 0); // 00 01 02 03
1105 } else {
1106 vst1_u16(d, vreinterpret_u16_s16(d0));
1107 }
1108
1109 s += 4;
1110 width -= 4;
1111 d += 4;
1112 d_u8 += 4;
1113 } while (width > 0);
1114 src_ptr += (src_stride);
1115 dst_ptr += (dst_stride);
1116 dst_u8_ptr += (dst8_stride);
1117 height--;
1118 #endif
1119 } while (height > 0);
1120 } else {
1121 CONV_BUF_TYPE *d_tmp;
1122 uint8_t *d_u8_tmp;
1123 int16x8_t s0, s1, s2, s3, s4, s5, s6, s7;
1124 int16x8_t res0;
1125 uint16x8_t res8;
1126 const int16x8_t round_offset128 = vdupq_n_s16(round_offset);
1127 const int16x4_t round_offset64 = vdup_n_s16(round_offset);
1128 const int16x8_t shift_round_0 = vdupq_n_s16(-conv_params->round_0 + 1);
1129 const int16x8_t horiz_const = vdupq_n_s16(bits);
1130 const int16x8_t zero = vdupq_n_s16(0);
1131
1132 d = dst_ptr = dst;
1133 d_u8 = dst_u8_ptr = dst8;
1134 do {
1135 #if defined(__aarch64__)
1136 int16x8_t s11, s12, s13, s14;
1137 int16x8_t s8, s9, s10;
1138 int16x8_t res1, res2, res3, res4, res5, res6, res7;
1139 uint16x8_t res9, res10, res11;
1140 __builtin_prefetch(src_ptr + 0 * src_stride);
1141 __builtin_prefetch(src_ptr + 1 * src_stride);
1142 __builtin_prefetch(src_ptr + 2 * src_stride);
1143 __builtin_prefetch(src_ptr + 3 * src_stride);
1144 __builtin_prefetch(src_ptr + 4 * src_stride);
1145 __builtin_prefetch(src_ptr + 5 * src_stride);
1146 __builtin_prefetch(src_ptr + 6 * src_stride);
1147 __builtin_prefetch(src_ptr + 7 * src_stride);
1148 load_u8_8x8(src_ptr, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
1149 transpose_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
1150 s0 = vreinterpretq_s16_u16(vmovl_u8(t0));
1151 s1 = vreinterpretq_s16_u16(vmovl_u8(t1));
1152 s2 = vreinterpretq_s16_u16(vmovl_u8(t2));
1153 s3 = vreinterpretq_s16_u16(vmovl_u8(t3));
1154 s4 = vreinterpretq_s16_u16(vmovl_u8(t4));
1155 s5 = vreinterpretq_s16_u16(vmovl_u8(t5));
1156 s6 = vreinterpretq_s16_u16(vmovl_u8(t6));
1157
1158 width = w;
1159 s = src_ptr + 7;
1160 d = dst_ptr;
1161 d_u8_tmp = dst_u8_ptr;
1162
1163 __builtin_prefetch(dst_ptr + 0 * dst_stride);
1164 __builtin_prefetch(dst_ptr + 1 * dst_stride);
1165 __builtin_prefetch(dst_ptr + 2 * dst_stride);
1166 __builtin_prefetch(dst_ptr + 3 * dst_stride);
1167 __builtin_prefetch(dst_ptr + 4 * dst_stride);
1168 __builtin_prefetch(dst_ptr + 5 * dst_stride);
1169 __builtin_prefetch(dst_ptr + 6 * dst_stride);
1170 __builtin_prefetch(dst_ptr + 7 * dst_stride);
1171
1172 do {
1173 d_u8 = d_u8_tmp;
1174 d_tmp = d;
1175
1176 load_u8_8x8(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
1177 transpose_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
1178 s7 = vreinterpretq_s16_u16(vmovl_u8(t0));
1179 s8 = vreinterpretq_s16_u16(vmovl_u8(t1));
1180 s9 = vreinterpretq_s16_u16(vmovl_u8(t2));
1181 s10 = vreinterpretq_s16_u16(vmovl_u8(t3));
1182 s11 = vreinterpretq_s16_u16(vmovl_u8(t4));
1183 s12 = vreinterpretq_s16_u16(vmovl_u8(t5));
1184 s13 = vreinterpretq_s16_u16(vmovl_u8(t6));
1185 s14 = vreinterpretq_s16_u16(vmovl_u8(t7));
1186
1187 res0 = convolve8_8x8_s16(s0, s1, s2, s3, s4, s5, s6, s7, x_filter_tmp,
1188 zero, shift_round_0);
1189
1190 res0 = vrshlq_s16(res0, horiz_const);
1191 res0 = vaddq_s16(res0, round_offset128);
1192
1193 res1 = convolve8_8x8_s16(s1, s2, s3, s4, s5, s6, s7, s8, x_filter_tmp,
1194 zero, shift_round_0);
1195 res1 = vrshlq_s16(res1, horiz_const);
1196 res1 = vaddq_s16(res1, round_offset128);
1197 res2 = convolve8_8x8_s16(s2, s3, s4, s5, s6, s7, s8, s9, x_filter_tmp,
1198 zero, shift_round_0);
1199 res2 = vrshlq_s16(res2, horiz_const);
1200 res2 = vaddq_s16(res2, round_offset128);
1201 res3 = convolve8_8x8_s16(s3, s4, s5, s6, s7, s8, s9, s10, x_filter_tmp,
1202 zero, shift_round_0);
1203 res3 = vrshlq_s16(res3, horiz_const);
1204 res3 = vaddq_s16(res3, round_offset128);
1205 res4 = convolve8_8x8_s16(s4, s5, s6, s7, s8, s9, s10, s11, x_filter_tmp,
1206 zero, shift_round_0);
1207 res4 = vrshlq_s16(res4, horiz_const);
1208 res4 = vaddq_s16(res4, round_offset128);
1209 res5 = convolve8_8x8_s16(s5, s6, s7, s8, s9, s10, s11, s12,
1210 x_filter_tmp, zero, shift_round_0);
1211 res5 = vrshlq_s16(res5, horiz_const);
1212 res5 = vaddq_s16(res5, round_offset128);
1213 res6 = convolve8_8x8_s16(s6, s7, s8, s9, s10, s11, s12, s13,
1214 x_filter_tmp, zero, shift_round_0);
1215 res6 = vrshlq_s16(res6, horiz_const);
1216 res6 = vaddq_s16(res6, round_offset128);
1217 res7 = convolve8_8x8_s16(s7, s8, s9, s10, s11, s12, s13, s14,
1218 x_filter_tmp, zero, shift_round_0);
1219 res7 = vrshlq_s16(res7, horiz_const);
1220 res7 = vaddq_s16(res7, round_offset128);
1221
1222 transpose_s16_8x8(&res0, &res1, &res2, &res3, &res4, &res5, &res6,
1223 &res7);
1224
1225 if (conv_params->do_average) {
1226 load_u16_8x4(d_tmp, dst_stride, &res8, &res9, &res10, &res11);
1227 d_tmp += (dst_stride << 2);
1228
1229 compute_avg_8x4(res8, res9, res10, res11, vreinterpretq_u16_s16(res0),
1230 vreinterpretq_u16_s16(res1),
1231 vreinterpretq_u16_s16(res2),
1232 vreinterpretq_u16_s16(res3), fwd_offset, bck_offset,
1233 round_offset64, round_bits, use_dist_wtd_comp_avg,
1234 &t0, &t1, &t2, &t3);
1235
1236 store_u8_8x4(d_u8, dst8_stride, t0, t1, t2, t3);
1237 d_u8 += (dst8_stride << 2);
1238
1239 load_u16_8x4(d_tmp, dst_stride, &res8, &res9, &res10, &res11);
1240 d_tmp += (dst_stride << 2);
1241
1242 compute_avg_8x4(res8, res9, res10, res11, vreinterpretq_u16_s16(res4),
1243 vreinterpretq_u16_s16(res5),
1244 vreinterpretq_u16_s16(res6),
1245 vreinterpretq_u16_s16(res7), fwd_offset, bck_offset,
1246 round_offset64, round_bits, use_dist_wtd_comp_avg,
1247 &t0, &t1, &t2, &t3);
1248
1249 store_u8_8x4(d_u8, dst8_stride, t0, t1, t2, t3);
1250 d_u8 += (dst8_stride << 2);
1251 } else {
1252 store_u16_8x8(
1253 d_tmp, dst_stride, vreinterpretq_u16_s16(res0),
1254 vreinterpretq_u16_s16(res1), vreinterpretq_u16_s16(res2),
1255 vreinterpretq_u16_s16(res3), vreinterpretq_u16_s16(res4),
1256 vreinterpretq_u16_s16(res5), vreinterpretq_u16_s16(res6),
1257 vreinterpretq_u16_s16(res7));
1258 d_tmp += (dst_stride << 3);
1259 }
1260
1261 s0 = s8;
1262 s1 = s9;
1263 s2 = s10;
1264 s3 = s11;
1265 s4 = s12;
1266 s5 = s13;
1267 s6 = s14;
1268 s += 8;
1269 d += 8;
1270 width -= 8;
1271 d_u8_tmp += 8;
1272 } while (width > 0);
1273 src_ptr += 8 * src_stride;
1274 dst_ptr += 8 * dst_stride;
1275 dst_u8_ptr += 8 * dst8_stride;
1276 height -= 8;
1277 #else
1278 int16x8_t temp_0;
1279 __builtin_prefetch(src_ptr);
1280 t0 = vld1_u8(src_ptr);
1281 s0 = vreinterpretq_s16_u16(vmovl_u8(t0)); // a0 a1 a2 a3 a4 a5 a6 a7
1282
1283 width = w;
1284 s = src_ptr + 8;
1285 d = dst_ptr;
1286 d_u8_tmp = dst_u8_ptr;
1287
1288 __builtin_prefetch(dst_ptr);
1289
1290 do {
1291 d_u8 = d_u8_tmp;
1292 d_tmp = d;
1293
1294 t0 = vld1_u8(s); // a8 a9 a10 a11 a12 a13 a14 a15
1295 s7 = vreinterpretq_s16_u16(vmovl_u8(t0));
1296 temp_0 = s0;
1297 s0 = s7;
1298
1299 s1 = vextq_s16(temp_0, s7, 1); // a1 a2 a3 a4 a5 a6 a7 a8
1300 s2 = vextq_s16(temp_0, s7, 2); // a2 a3 a4 a5 a6 a7 a8 a9
1301 s3 = vextq_s16(temp_0, s7, 3); // a3 a4 a5 a6 a7 a8 a9 a10
1302 s4 = vextq_s16(temp_0, s7, 4); // a4 a5 a6 a7 a8 a9 a10 a11
1303 s5 = vextq_s16(temp_0, s7, 5); // a5 a6 a7 a8 a9 a10 a11 a12
1304 s6 = vextq_s16(temp_0, s7, 6); // a6 a7 a8 a9 a10 a11 a12 a13
1305 s7 = vextq_s16(temp_0, s7, 7); // a7 a8 a9 a10 a11 a12 a13 a14
1306
1307 res0 = convolve8_8x8_s16(temp_0, s1, s2, s3, s4, s5, s6, s7,
1308 x_filter_tmp, zero, shift_round_0);
1309
1310 res0 = vrshlq_s16(res0, horiz_const);
1311 res0 = vaddq_s16(res0, round_offset128);
1312
1313 if (conv_params->do_average) {
1314 res8 = vld1q_u16(d_tmp);
1315 d_tmp += (dst_stride);
1316
1317 compute_avg_8x1(res8, vreinterpretq_u16_s16(res0), fwd_offset,
1318 bck_offset, round_offset64, round_bits,
1319 use_dist_wtd_comp_avg, &t0);
1320
1321 vst1_u8(d_u8, t0);
1322 d_u8 += (dst8_stride);
1323 } else {
1324 vst1q_u16(d_tmp, vreinterpretq_u16_s16(res0));
1325 d_tmp += (dst_stride);
1326 }
1327
1328 s += 8;
1329 d += 8;
1330 width -= 8;
1331 d_u8_tmp += 8;
1332 } while (width > 0);
1333 src_ptr += src_stride;
1334 dst_ptr += dst_stride;
1335 dst_u8_ptr += dst8_stride;
1336 height--;
1337 #endif
1338 } while (height > 0);
1339 }
1340 }
1341
av1_dist_wtd_convolve_y_neon(const uint8_t * src,int src_stride,uint8_t * dst8,int dst8_stride,int w,int h,const InterpFilterParams * filter_params_x,const InterpFilterParams * filter_params_y,const int subpel_x_qn,const int subpel_y_qn,ConvolveParams * conv_params)1342 void av1_dist_wtd_convolve_y_neon(const uint8_t *src, int src_stride,
1343 uint8_t *dst8, int dst8_stride, int w, int h,
1344 const InterpFilterParams *filter_params_x,
1345 const InterpFilterParams *filter_params_y,
1346 const int subpel_x_qn, const int subpel_y_qn,
1347 ConvolveParams *conv_params) {
1348 assert(!(w % 4));
1349 assert(!(h % 4));
1350
1351 CONV_BUF_TYPE *dst = conv_params->dst;
1352 const int dst_stride = conv_params->dst_stride;
1353 const int vert_offset = filter_params_y->taps / 2 - 1;
1354 const int bits = FILTER_BITS - conv_params->round_0;
1355 const int bd = 8;
1356 const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0;
1357 const int round_offset = (1 << (offset_bits - conv_params->round_1)) +
1358 (1 << (offset_bits - conv_params->round_1 - 1));
1359 const int round_bits =
1360 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1;
1361 const uint16_t fwd_offset = conv_params->fwd_offset;
1362 const uint16_t bck_offset = conv_params->bck_offset;
1363 const int use_dist_wtd_comp_avg = conv_params->use_dist_wtd_comp_avg;
1364 const int shift_value = (conv_params->round_1 - 1 - bits);
1365
1366 (void)filter_params_x;
1367 (void)subpel_x_qn;
1368
1369 // vertical filter
1370 const int16_t *y_filter = av1_get_interp_filter_subpel_kernel(
1371 filter_params_y, subpel_y_qn & SUBPEL_MASK);
1372
1373 const uint8_t *src_ptr = src - (vert_offset * src_stride);
1374
1375 int16_t y_filter_tmp[8];
1376 int16x8_t filter_y_coef = vld1q_s16(y_filter);
1377
1378 // filter coeffs are even, so downshifting by 1 to reduce intermediate
1379 // precision requirements.
1380 filter_y_coef = vshrq_n_s16(filter_y_coef, 1);
1381 vst1q_s16(&y_filter_tmp[0], filter_y_coef);
1382
1383 const uint8_t *s;
1384 uint8_t *d_u8;
1385 uint8_t *dst_u8_ptr;
1386 CONV_BUF_TYPE *d, *dst_ptr;
1387 int width, height;
1388
1389 s = src_ptr;
1390 dst_ptr = dst;
1391 dst_u8_ptr = dst8;
1392 width = w;
1393 height = h;
1394
1395 // used to get rid of multiplication = (vertical filter output sum) *
1396 // (1<<bits).
1397 assert((conv_params->round_1 - 2) >= bits);
1398
1399 if ((w == 4) || (h == 4)) {
1400 int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, d0;
1401 uint16x4_t res4;
1402 uint32x2_t tu0 = vdup_n_u32(0), tu1 = vdup_n_u32(0), tu2 = vdup_n_u32(0),
1403 tu3 = vdup_n_u32(0);
1404 int16x8_t u0, u1, u2, u3;
1405 uint8x8_t t0;
1406
1407 #if defined(__aarch64__)
1408 int16x4_t s8, s9, s10, d1, d2, d3;
1409 uint16x4_t res5, res6, res7;
1410 uint8x8_t t1;
1411 #endif
1412 const int16x4_t round_offset64 = vdup_n_s16(round_offset);
1413 const int16x4_t shift_vec = vdup_n_s16(-shift_value);
1414 const int16x4_t zero = vdup_n_s16(0);
1415
1416 do {
1417 s = src_ptr;
1418 d = dst_ptr;
1419 d_u8 = dst_u8_ptr;
1420 height = h;
1421 __builtin_prefetch(s + 0 * src_stride);
1422 __builtin_prefetch(s + 1 * src_stride);
1423 __builtin_prefetch(s + 2 * src_stride);
1424 __builtin_prefetch(s + 3 * src_stride);
1425
1426 load_unaligned_u8_4x8(s, src_stride, &tu0, &tu1, &tu2, &tu3);
1427
1428 u0 = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(tu0)));
1429 u1 = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(tu1)));
1430 u2 = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(tu2)));
1431 u3 = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(tu3)));
1432
1433 s0 = vget_low_s16(u0);
1434 s1 = vget_high_s16(u0);
1435 s2 = vget_low_s16(u1);
1436 s3 = vget_high_s16(u1);
1437 s4 = vget_low_s16(u2);
1438 s5 = vget_high_s16(u2);
1439 s6 = vget_low_s16(u3);
1440
1441 __builtin_prefetch(d + 0 * dst_stride);
1442 __builtin_prefetch(d + 1 * dst_stride);
1443 __builtin_prefetch(d + 2 * dst_stride);
1444 __builtin_prefetch(d + 3 * dst_stride);
1445
1446 s += (7 * src_stride);
1447 do {
1448 #if defined(__aarch64__)
1449 load_unaligned_u8_4x4(s, src_stride, &tu0, &tu1);
1450
1451 u0 = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(tu0)));
1452 u1 = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(tu1)));
1453
1454 s7 = vget_low_s16(u0);
1455 s8 = vget_high_s16(u0);
1456 s9 = vget_low_s16(u1);
1457 s10 = vget_high_s16(u1);
1458
1459 d0 = convolve8_4x4_s16(s0, s1, s2, s3, s4, s5, s6, s7, y_filter_tmp,
1460 zero, shift_vec);
1461 d0 = vadd_s16(d0, round_offset64);
1462 d1 = convolve8_4x4_s16(s1, s2, s3, s4, s5, s6, s7, s8, y_filter_tmp,
1463 zero, shift_vec);
1464 d1 = vadd_s16(d1, round_offset64);
1465 d2 = convolve8_4x4_s16(s2, s3, s4, s5, s6, s7, s8, s9, y_filter_tmp,
1466 zero, shift_vec);
1467 d2 = vadd_s16(d2, round_offset64);
1468 d3 = convolve8_4x4_s16(s3, s4, s5, s6, s7, s8, s9, s10, y_filter_tmp,
1469 zero, shift_vec);
1470 d3 = vadd_s16(d3, round_offset64);
1471
1472 if (conv_params->do_average) {
1473 __builtin_prefetch(d + 0 * dst_stride);
1474 __builtin_prefetch(d + 1 * dst_stride);
1475 __builtin_prefetch(d + 2 * dst_stride);
1476 __builtin_prefetch(d + 3 * dst_stride);
1477
1478 __builtin_prefetch(d_u8 + 0 * dst8_stride);
1479 __builtin_prefetch(d_u8 + 1 * dst8_stride);
1480 __builtin_prefetch(d_u8 + 2 * dst8_stride);
1481 __builtin_prefetch(d_u8 + 3 * dst8_stride);
1482
1483 load_u16_4x4(d, dst_stride, &res4, &res5, &res6, &res7);
1484 d += (dst_stride << 2);
1485
1486 compute_avg_4x4(res4, res5, res6, res7, vreinterpret_u16_s16(d0),
1487 vreinterpret_u16_s16(d1), vreinterpret_u16_s16(d2),
1488 vreinterpret_u16_s16(d3), fwd_offset, bck_offset,
1489 round_offset64, round_bits, use_dist_wtd_comp_avg,
1490 &t0, &t1);
1491
1492 vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(t0), 0);
1493 d_u8 += dst8_stride;
1494 vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(t0), 1);
1495 d_u8 += dst8_stride;
1496 vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(t1), 0);
1497 d_u8 += dst8_stride;
1498 vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(t1), 1);
1499 d_u8 += dst8_stride;
1500 } else {
1501 store_u16_4x4(d, dst_stride, vreinterpret_u16_s16(d0),
1502 vreinterpret_u16_s16(d1), vreinterpret_u16_s16(d2),
1503 vreinterpret_u16_s16(d3));
1504 d += (dst_stride << 2);
1505 }
1506
1507 s0 = s4;
1508 s1 = s5;
1509 s2 = s6;
1510 s3 = s7;
1511 s4 = s8;
1512 s5 = s9;
1513 s6 = s10;
1514
1515 s += (src_stride << 2);
1516 height -= 4;
1517 #else
1518 load_unaligned_u8_4x1(s, src_stride, &tu0);
1519 u0 = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(tu0)));
1520 s7 = vget_low_s16(u0);
1521
1522 d0 = convolve8_4x4_s16(s0, s1, s2, s3, s4, s5, s6, s7, y_filter_tmp,
1523 zero, shift_vec);
1524
1525 d0 = vadd_s16(d0, round_offset64);
1526
1527 if (conv_params->do_average) {
1528 __builtin_prefetch(d);
1529
1530 res4 = vld1_u16(d);
1531 d += (dst_stride);
1532
1533 compute_avg_4x1(res4, vreinterpret_u16_s16(d0), fwd_offset,
1534 bck_offset, round_offset64, round_bits,
1535 use_dist_wtd_comp_avg, &t0);
1536
1537 vst1_lane_u32((uint32_t *)d_u8, vreinterpret_u32_u8(t0), 0);
1538 d_u8 += dst8_stride;
1539 } else {
1540 vst1_u16(d, vreinterpret_u16_s16(d0));
1541 d += (dst_stride);
1542 }
1543
1544 s0 = s1;
1545 s1 = s2;
1546 s2 = s3;
1547 s3 = s4;
1548 s4 = s5;
1549 s5 = s6;
1550 s6 = s7;
1551
1552 s += (src_stride);
1553 height--;
1554 #endif
1555 } while (height > 0);
1556 src_ptr += 4;
1557 dst_ptr += 4;
1558 dst_u8_ptr += 4;
1559 width -= 4;
1560 } while (width > 0);
1561 } else {
1562 CONV_BUF_TYPE *d_tmp;
1563 int16x8_t s0, s1, s2, s3, s4, s5, s6, s7;
1564 int16x8_t res0;
1565 uint16x8_t res8;
1566 uint8x8_t t0, t1, t2, t3, t4, t5, t6, t7;
1567 const int16x8_t round_offset128 = vdupq_n_s16(round_offset);
1568 const int16x8_t shift_vec = vdupq_n_s16(-shift_value);
1569 const int16x4_t round_offset64 = vdup_n_s16(round_offset);
1570 const int16x8_t zero = vdupq_n_s16(0);
1571 #if defined(__aarch64__)
1572 int16x8_t s8, s9, s10, s11, s12, s13, s14;
1573 int16x8_t res1, res2, res3, res4, res5, res6, res7;
1574 uint16x8_t res10, res11, res9;
1575 #endif
1576 dst_ptr = dst;
1577 dst_u8_ptr = dst8;
1578 do {
1579 __builtin_prefetch(src_ptr + 0 * src_stride);
1580 __builtin_prefetch(src_ptr + 1 * src_stride);
1581 __builtin_prefetch(src_ptr + 2 * src_stride);
1582 __builtin_prefetch(src_ptr + 3 * src_stride);
1583 __builtin_prefetch(src_ptr + 4 * src_stride);
1584 __builtin_prefetch(src_ptr + 5 * src_stride);
1585 __builtin_prefetch(src_ptr + 6 * src_stride);
1586 __builtin_prefetch(src_ptr + 7 * src_stride);
1587 load_u8_8x8(src_ptr, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
1588
1589 s0 = vreinterpretq_s16_u16(vmovl_u8(t0));
1590 s1 = vreinterpretq_s16_u16(vmovl_u8(t1));
1591 s2 = vreinterpretq_s16_u16(vmovl_u8(t2));
1592 s3 = vreinterpretq_s16_u16(vmovl_u8(t3));
1593 s4 = vreinterpretq_s16_u16(vmovl_u8(t4));
1594 s5 = vreinterpretq_s16_u16(vmovl_u8(t5));
1595 s6 = vreinterpretq_s16_u16(vmovl_u8(t6));
1596
1597 height = h;
1598 s = src_ptr + (7 * src_stride);
1599 d_tmp = dst_ptr;
1600 d_u8 = dst_u8_ptr;
1601
1602 do {
1603 #if defined(__aarch64__)
1604 load_u8_8x8(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
1605
1606 s7 = vreinterpretq_s16_u16(vmovl_u8(t0));
1607 s8 = vreinterpretq_s16_u16(vmovl_u8(t1));
1608 s9 = vreinterpretq_s16_u16(vmovl_u8(t2));
1609 s10 = vreinterpretq_s16_u16(vmovl_u8(t3));
1610 s11 = vreinterpretq_s16_u16(vmovl_u8(t4));
1611 s12 = vreinterpretq_s16_u16(vmovl_u8(t5));
1612 s13 = vreinterpretq_s16_u16(vmovl_u8(t6));
1613 s14 = vreinterpretq_s16_u16(vmovl_u8(t7));
1614
1615 __builtin_prefetch(dst_ptr + 0 * dst_stride);
1616 __builtin_prefetch(dst_ptr + 1 * dst_stride);
1617 __builtin_prefetch(dst_ptr + 2 * dst_stride);
1618 __builtin_prefetch(dst_ptr + 3 * dst_stride);
1619
1620 res0 = convolve8_8x8_s16(s0, s1, s2, s3, s4, s5, s6, s7, y_filter_tmp,
1621 zero, shift_vec);
1622 res0 = vaddq_s16(res0, round_offset128);
1623 res1 = convolve8_8x8_s16(s1, s2, s3, s4, s5, s6, s7, s8, y_filter_tmp,
1624 zero, shift_vec);
1625 res1 = vaddq_s16(res1, round_offset128);
1626 res2 = convolve8_8x8_s16(s2, s3, s4, s5, s6, s7, s8, s9, y_filter_tmp,
1627 zero, shift_vec);
1628 res2 = vaddq_s16(res2, round_offset128);
1629 res3 = convolve8_8x8_s16(s3, s4, s5, s6, s7, s8, s9, s10, y_filter_tmp,
1630 zero, shift_vec);
1631 res3 = vaddq_s16(res3, round_offset128);
1632 res4 = convolve8_8x8_s16(s4, s5, s6, s7, s8, s9, s10, s11, y_filter_tmp,
1633 zero, shift_vec);
1634 res4 = vaddq_s16(res4, round_offset128);
1635 res5 = convolve8_8x8_s16(s5, s6, s7, s8, s9, s10, s11, s12,
1636 y_filter_tmp, zero, shift_vec);
1637 res5 = vaddq_s16(res5, round_offset128);
1638 res6 = convolve8_8x8_s16(s6, s7, s8, s9, s10, s11, s12, s13,
1639 y_filter_tmp, zero, shift_vec);
1640 res6 = vaddq_s16(res6, round_offset128);
1641 res7 = convolve8_8x8_s16(s7, s8, s9, s10, s11, s12, s13, s14,
1642 y_filter_tmp, zero, shift_vec);
1643 res7 = vaddq_s16(res7, round_offset128);
1644
1645 if (conv_params->do_average) {
1646 __builtin_prefetch(d_tmp + 0 * dst8_stride);
1647 __builtin_prefetch(d_tmp + 1 * dst8_stride);
1648 __builtin_prefetch(d_tmp + 2 * dst8_stride);
1649 __builtin_prefetch(d_tmp + 3 * dst8_stride);
1650
1651 load_u16_8x4(d_tmp, dst_stride, &res8, &res9, &res10, &res11);
1652 d_tmp += (dst_stride << 2);
1653
1654 compute_avg_8x4(res8, res9, res10, res11, vreinterpretq_u16_s16(res0),
1655 vreinterpretq_u16_s16(res1),
1656 vreinterpretq_u16_s16(res2),
1657 vreinterpretq_u16_s16(res3), fwd_offset, bck_offset,
1658 round_offset64, round_bits, use_dist_wtd_comp_avg,
1659 &t0, &t1, &t2, &t3);
1660
1661 store_u8_8x4(d_u8, dst8_stride, t0, t1, t2, t3);
1662 d_u8 += (dst8_stride << 2);
1663
1664 load_u16_8x4(d_tmp, dst_stride, &res8, &res9, &res10, &res11);
1665 d_tmp += (dst_stride << 2);
1666
1667 compute_avg_8x4(res8, res9, res10, res11, vreinterpretq_u16_s16(res4),
1668 vreinterpretq_u16_s16(res5),
1669 vreinterpretq_u16_s16(res6),
1670 vreinterpretq_u16_s16(res7), fwd_offset, bck_offset,
1671 round_offset64, round_bits, use_dist_wtd_comp_avg,
1672 &t0, &t1, &t2, &t3);
1673
1674 store_u8_8x4(d_u8, dst8_stride, t0, t1, t2, t3);
1675 d_u8 += (dst8_stride << 2);
1676 } else {
1677 store_u16_8x8(
1678 d_tmp, dst_stride, vreinterpretq_u16_s16(res0),
1679 vreinterpretq_u16_s16(res1), vreinterpretq_u16_s16(res2),
1680 vreinterpretq_u16_s16(res3), vreinterpretq_u16_s16(res4),
1681 vreinterpretq_u16_s16(res5), vreinterpretq_u16_s16(res6),
1682 vreinterpretq_u16_s16(res7));
1683 d_tmp += (dst_stride << 3);
1684 }
1685
1686 s0 = s8;
1687 s1 = s9;
1688 s2 = s10;
1689 s3 = s11;
1690 s4 = s12;
1691 s5 = s13;
1692 s6 = s14;
1693 s += (8 * src_stride);
1694 height -= 8;
1695 #else
1696 s7 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s)));
1697
1698 __builtin_prefetch(dst_ptr);
1699
1700 res0 = convolve8_8x8_s16(s0, s1, s2, s3, s4, s5, s6, s7, y_filter_tmp,
1701 zero, shift_vec);
1702 res0 = vaddq_s16(res0, round_offset128);
1703
1704 s0 = s1;
1705 s1 = s2;
1706 s2 = s3;
1707 s3 = s4;
1708 s4 = s5;
1709 s5 = s6;
1710 s6 = s7;
1711
1712 if (conv_params->do_average) {
1713 __builtin_prefetch(d_tmp);
1714
1715 res8 = vld1q_u16(d_tmp);
1716 d_tmp += (dst_stride);
1717
1718 compute_avg_8x1(res8, vreinterpretq_u16_s16(res0), fwd_offset,
1719 bck_offset, round_offset64, round_bits,
1720 use_dist_wtd_comp_avg, &t0);
1721
1722 vst1_u8(d_u8, t0);
1723 d_u8 += (dst8_stride);
1724 } else {
1725 vst1q_u16(d_tmp, vreinterpretq_u16_s16(res0));
1726 d_tmp += dst_stride;
1727 }
1728
1729 s += (src_stride);
1730 height--;
1731 #endif
1732 } while (height > 0);
1733 src_ptr += 8;
1734 dst_ptr += 8;
1735 dst_u8_ptr += 8;
1736 width -= 8;
1737 } while (width > 0);
1738 }
1739 }
1740