1 /*
2  * Copyright (c) 2017, Alliance for Open Media. All rights reserved
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 #include <arm_neon.h>
12 
13 #include "config/av1_rtcd.h"
14 
15 #include "av1/common/cfl.h"
16 
vldsubstq_s16(int16_t * dst,const uint16_t * src,int offset,int16x8_t sub)17 static INLINE void vldsubstq_s16(int16_t *dst, const uint16_t *src, int offset,
18                                  int16x8_t sub) {
19   vst1q_s16(dst + offset,
20             vsubq_s16(vreinterpretq_s16_u16(vld1q_u16(src + offset)), sub));
21 }
22 
vldaddq_u16(const uint16_t * buf,size_t offset)23 static INLINE uint16x8_t vldaddq_u16(const uint16_t *buf, size_t offset) {
24   return vaddq_u16(vld1q_u16(buf), vld1q_u16(buf + offset));
25 }
26 
27 // Load half of a vector and duplicated in other half
vldh_dup_u8(const uint8_t * ptr)28 static INLINE uint8x8_t vldh_dup_u8(const uint8_t *ptr) {
29   return vreinterpret_u8_u32(vld1_dup_u32((const uint32_t *)ptr));
30 }
31 
32 // Store half of a vector.
vsth_u16(uint16_t * ptr,uint16x4_t val)33 static INLINE void vsth_u16(uint16_t *ptr, uint16x4_t val) {
34   *((uint32_t *)ptr) = vreinterpret_u32_u16(val)[0];
35 }
36 
37 // Store half of a vector.
vsth_u8(uint8_t * ptr,uint8x8_t val)38 static INLINE void vsth_u8(uint8_t *ptr, uint8x8_t val) {
39   *((uint32_t *)ptr) = vreinterpret_u32_u8(val)[0];
40 }
41 
cfl_luma_subsampling_420_lbd_neon(const uint8_t * input,int input_stride,uint16_t * pred_buf_q3,int width,int height)42 static void cfl_luma_subsampling_420_lbd_neon(const uint8_t *input,
43                                               int input_stride,
44                                               uint16_t *pred_buf_q3, int width,
45                                               int height) {
46   const uint16_t *end = pred_buf_q3 + (height >> 1) * CFL_BUF_LINE;
47   const int luma_stride = input_stride << 1;
48   do {
49     if (width == 4) {
50       const uint16x4_t top = vpaddl_u8(vldh_dup_u8(input));
51       const uint16x4_t sum = vpadal_u8(top, vldh_dup_u8(input + input_stride));
52       vsth_u16(pred_buf_q3, vshl_n_u16(sum, 1));
53     } else if (width == 8) {
54       const uint16x4_t top = vpaddl_u8(vld1_u8(input));
55       const uint16x4_t sum = vpadal_u8(top, vld1_u8(input + input_stride));
56       vst1_u16(pred_buf_q3, vshl_n_u16(sum, 1));
57     } else if (width == 16) {
58       const uint16x8_t top = vpaddlq_u8(vld1q_u8(input));
59       const uint16x8_t sum = vpadalq_u8(top, vld1q_u8(input + input_stride));
60       vst1q_u16(pred_buf_q3, vshlq_n_u16(sum, 1));
61     } else {
62       const uint8x8x4_t top = vld4_u8(input);
63       const uint8x8x4_t bot = vld4_u8(input + input_stride);
64       // equivalent to a vpaddlq_u8 (because vld4q interleaves)
65       const uint16x8_t top_0 = vaddl_u8(top.val[0], top.val[1]);
66       // equivalent to a vpaddlq_u8 (because vld4q interleaves)
67       const uint16x8_t bot_0 = vaddl_u8(bot.val[0], bot.val[1]);
68       // equivalent to a vpaddlq_u8 (because vld4q interleaves)
69       const uint16x8_t top_1 = vaddl_u8(top.val[2], top.val[3]);
70       // equivalent to a vpaddlq_u8 (because vld4q interleaves)
71       const uint16x8_t bot_1 = vaddl_u8(bot.val[2], bot.val[3]);
72       uint16x8x2_t sum;
73       sum.val[0] = vshlq_n_u16(vaddq_u16(top_0, bot_0), 1);
74       sum.val[1] = vshlq_n_u16(vaddq_u16(top_1, bot_1), 1);
75       vst2q_u16(pred_buf_q3, sum);
76     }
77     input += luma_stride;
78   } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
79 }
80 
cfl_luma_subsampling_422_lbd_neon(const uint8_t * input,int input_stride,uint16_t * pred_buf_q3,int width,int height)81 static void cfl_luma_subsampling_422_lbd_neon(const uint8_t *input,
82                                               int input_stride,
83                                               uint16_t *pred_buf_q3, int width,
84                                               int height) {
85   const uint16_t *end = pred_buf_q3 + height * CFL_BUF_LINE;
86   do {
87     if (width == 4) {
88       const uint16x4_t top = vpaddl_u8(vldh_dup_u8(input));
89       vsth_u16(pred_buf_q3, vshl_n_u16(top, 2));
90     } else if (width == 8) {
91       const uint16x4_t top = vpaddl_u8(vld1_u8(input));
92       vst1_u16(pred_buf_q3, vshl_n_u16(top, 2));
93     } else if (width == 16) {
94       const uint16x8_t top = vpaddlq_u8(vld1q_u8(input));
95       vst1q_u16(pred_buf_q3, vshlq_n_u16(top, 2));
96     } else {
97       const uint8x8x4_t top = vld4_u8(input);
98       uint16x8x2_t sum;
99       // vaddl_u8 is equivalent to a vpaddlq_u8 (because vld4q interleaves)
100       sum.val[0] = vshlq_n_u16(vaddl_u8(top.val[0], top.val[1]), 2);
101       sum.val[1] = vshlq_n_u16(vaddl_u8(top.val[2], top.val[3]), 2);
102       vst2q_u16(pred_buf_q3, sum);
103     }
104     input += input_stride;
105   } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
106 }
107 
cfl_luma_subsampling_444_lbd_neon(const uint8_t * input,int input_stride,uint16_t * pred_buf_q3,int width,int height)108 static void cfl_luma_subsampling_444_lbd_neon(const uint8_t *input,
109                                               int input_stride,
110                                               uint16_t *pred_buf_q3, int width,
111                                               int height) {
112   const uint16_t *end = pred_buf_q3 + height * CFL_BUF_LINE;
113   do {
114     if (width == 4) {
115       const uint16x8_t top = vshll_n_u8(vldh_dup_u8(input), 3);
116       vst1_u16(pred_buf_q3, vget_low_u16(top));
117     } else if (width == 8) {
118       const uint16x8_t top = vshll_n_u8(vld1_u8(input), 3);
119       vst1q_u16(pred_buf_q3, top);
120     } else {
121       const uint8x16_t top = vld1q_u8(input);
122       vst1q_u16(pred_buf_q3, vshll_n_u8(vget_low_u8(top), 3));
123       vst1q_u16(pred_buf_q3 + 8, vshll_n_u8(vget_high_u8(top), 3));
124       if (width == 32) {
125         const uint8x16_t next_top = vld1q_u8(input + 16);
126         vst1q_u16(pred_buf_q3 + 16, vshll_n_u8(vget_low_u8(next_top), 3));
127         vst1q_u16(pred_buf_q3 + 24, vshll_n_u8(vget_high_u8(next_top), 3));
128       }
129     }
130     input += input_stride;
131   } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
132 }
133 
134 #ifndef __aarch64__
vpaddq_u16(uint16x8_t a,uint16x8_t b)135 uint16x8_t vpaddq_u16(uint16x8_t a, uint16x8_t b) {
136   return vcombine_u16(vpadd_u16(vget_low_u16(a), vget_high_u16(a)),
137                       vpadd_u16(vget_low_u16(b), vget_high_u16(b)));
138 }
139 #endif
140 
cfl_luma_subsampling_420_hbd_neon(const uint16_t * input,int input_stride,uint16_t * pred_buf_q3,int width,int height)141 static void cfl_luma_subsampling_420_hbd_neon(const uint16_t *input,
142                                               int input_stride,
143                                               uint16_t *pred_buf_q3, int width,
144                                               int height) {
145   const uint16_t *end = pred_buf_q3 + (height >> 1) * CFL_BUF_LINE;
146   const int luma_stride = input_stride << 1;
147   do {
148     if (width == 4) {
149       const uint16x4_t top = vld1_u16(input);
150       const uint16x4_t bot = vld1_u16(input + input_stride);
151       const uint16x4_t sum = vadd_u16(top, bot);
152       const uint16x4_t hsum = vpadd_u16(sum, sum);
153       vsth_u16(pred_buf_q3, vshl_n_u16(hsum, 1));
154     } else if (width < 32) {
155       const uint16x8_t top = vld1q_u16(input);
156       const uint16x8_t bot = vld1q_u16(input + input_stride);
157       const uint16x8_t sum = vaddq_u16(top, bot);
158       if (width == 8) {
159         const uint16x4_t hsum = vget_low_u16(vpaddq_u16(sum, sum));
160         vst1_u16(pred_buf_q3, vshl_n_u16(hsum, 1));
161       } else {
162         const uint16x8_t top_1 = vld1q_u16(input + 8);
163         const uint16x8_t bot_1 = vld1q_u16(input + 8 + input_stride);
164         const uint16x8_t sum_1 = vaddq_u16(top_1, bot_1);
165         const uint16x8_t hsum = vpaddq_u16(sum, sum_1);
166         vst1q_u16(pred_buf_q3, vshlq_n_u16(hsum, 1));
167       }
168     } else {
169       const uint16x8x4_t top = vld4q_u16(input);
170       const uint16x8x4_t bot = vld4q_u16(input + input_stride);
171       // equivalent to a vpaddq_u16 (because vld4q interleaves)
172       const uint16x8_t top_0 = vaddq_u16(top.val[0], top.val[1]);
173       // equivalent to a vpaddq_u16 (because vld4q interleaves)
174       const uint16x8_t bot_0 = vaddq_u16(bot.val[0], bot.val[1]);
175       // equivalent to a vpaddq_u16 (because vld4q interleaves)
176       const uint16x8_t top_1 = vaddq_u16(top.val[2], top.val[3]);
177       // equivalent to a vpaddq_u16 (because vld4q interleaves)
178       const uint16x8_t bot_1 = vaddq_u16(bot.val[2], bot.val[3]);
179       uint16x8x2_t sum;
180       sum.val[0] = vshlq_n_u16(vaddq_u16(top_0, bot_0), 1);
181       sum.val[1] = vshlq_n_u16(vaddq_u16(top_1, bot_1), 1);
182       vst2q_u16(pred_buf_q3, sum);
183     }
184     input += luma_stride;
185   } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
186 }
187 
cfl_luma_subsampling_422_hbd_neon(const uint16_t * input,int input_stride,uint16_t * pred_buf_q3,int width,int height)188 static void cfl_luma_subsampling_422_hbd_neon(const uint16_t *input,
189                                               int input_stride,
190                                               uint16_t *pred_buf_q3, int width,
191                                               int height) {
192   const uint16_t *end = pred_buf_q3 + height * CFL_BUF_LINE;
193   do {
194     if (width == 4) {
195       const uint16x4_t top = vld1_u16(input);
196       const uint16x4_t hsum = vpadd_u16(top, top);
197       vsth_u16(pred_buf_q3, vshl_n_u16(hsum, 2));
198     } else if (width == 8) {
199       const uint16x4x2_t top = vld2_u16(input);
200       // equivalent to a vpadd_u16 (because vld2 interleaves)
201       const uint16x4_t hsum = vadd_u16(top.val[0], top.val[1]);
202       vst1_u16(pred_buf_q3, vshl_n_u16(hsum, 2));
203     } else if (width == 16) {
204       const uint16x8x2_t top = vld2q_u16(input);
205       // equivalent to a vpaddq_u16 (because vld2q interleaves)
206       const uint16x8_t hsum = vaddq_u16(top.val[0], top.val[1]);
207       vst1q_u16(pred_buf_q3, vshlq_n_u16(hsum, 2));
208     } else {
209       const uint16x8x4_t top = vld4q_u16(input);
210       // equivalent to a vpaddq_u16 (because vld4q interleaves)
211       const uint16x8_t hsum_0 = vaddq_u16(top.val[0], top.val[1]);
212       // equivalent to a vpaddq_u16 (because vld4q interleaves)
213       const uint16x8_t hsum_1 = vaddq_u16(top.val[2], top.val[3]);
214       uint16x8x2_t result = { { vshlq_n_u16(hsum_0, 2),
215                                 vshlq_n_u16(hsum_1, 2) } };
216       vst2q_u16(pred_buf_q3, result);
217     }
218     input += input_stride;
219   } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
220 }
221 
cfl_luma_subsampling_444_hbd_neon(const uint16_t * input,int input_stride,uint16_t * pred_buf_q3,int width,int height)222 static void cfl_luma_subsampling_444_hbd_neon(const uint16_t *input,
223                                               int input_stride,
224                                               uint16_t *pred_buf_q3, int width,
225                                               int height) {
226   const uint16_t *end = pred_buf_q3 + height * CFL_BUF_LINE;
227   do {
228     if (width == 4) {
229       const uint16x4_t top = vld1_u16(input);
230       vst1_u16(pred_buf_q3, vshl_n_u16(top, 3));
231     } else if (width == 8) {
232       const uint16x8_t top = vld1q_u16(input);
233       vst1q_u16(pred_buf_q3, vshlq_n_u16(top, 3));
234     } else if (width == 16) {
235       uint16x8x2_t top = vld2q_u16(input);
236       top.val[0] = vshlq_n_u16(top.val[0], 3);
237       top.val[1] = vshlq_n_u16(top.val[1], 3);
238       vst2q_u16(pred_buf_q3, top);
239     } else {
240       uint16x8x4_t top = vld4q_u16(input);
241       top.val[0] = vshlq_n_u16(top.val[0], 3);
242       top.val[1] = vshlq_n_u16(top.val[1], 3);
243       top.val[2] = vshlq_n_u16(top.val[2], 3);
244       top.val[3] = vshlq_n_u16(top.val[3], 3);
245       vst4q_u16(pred_buf_q3, top);
246     }
247     input += input_stride;
248   } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
249 }
250 
CFL_GET_SUBSAMPLE_FUNCTION(neon)251 CFL_GET_SUBSAMPLE_FUNCTION(neon)
252 
253 static INLINE void subtract_average_neon(const uint16_t *src, int16_t *dst,
254                                          int width, int height,
255                                          int round_offset,
256                                          const int num_pel_log2) {
257   const uint16_t *const end = src + height * CFL_BUF_LINE;
258 
259   // Round offset is not needed, because NEON will handle the rounding.
260   (void)round_offset;
261 
262   // To optimize the use of the CPU pipeline, we process 4 rows per iteration
263   const int step = 4 * CFL_BUF_LINE;
264 
265   // At this stage, the prediction buffer contains scaled reconstructed luma
266   // pixels, which are positive integer and only require 15 bits. By using
267   // unsigned integer for the sum, we can do one addition operation inside 16
268   // bits (8 lanes) before having to convert to 32 bits (4 lanes).
269   const uint16_t *sum_buf = src;
270   uint32x4_t sum_32x4 = { 0, 0, 0, 0 };
271   do {
272     // For all widths, we load, add and combine the data so it fits in 4 lanes.
273     if (width == 4) {
274       const uint16x4_t a0 =
275           vadd_u16(vld1_u16(sum_buf), vld1_u16(sum_buf + CFL_BUF_LINE));
276       const uint16x4_t a1 = vadd_u16(vld1_u16(sum_buf + 2 * CFL_BUF_LINE),
277                                      vld1_u16(sum_buf + 3 * CFL_BUF_LINE));
278       sum_32x4 = vaddq_u32(sum_32x4, vaddl_u16(a0, a1));
279     } else if (width == 8) {
280       const uint16x8_t a0 = vldaddq_u16(sum_buf, CFL_BUF_LINE);
281       const uint16x8_t a1 =
282           vldaddq_u16(sum_buf + 2 * CFL_BUF_LINE, CFL_BUF_LINE);
283       sum_32x4 = vpadalq_u16(sum_32x4, a0);
284       sum_32x4 = vpadalq_u16(sum_32x4, a1);
285     } else {
286       const uint16x8_t row0 = vldaddq_u16(sum_buf, 8);
287       const uint16x8_t row1 = vldaddq_u16(sum_buf + CFL_BUF_LINE, 8);
288       const uint16x8_t row2 = vldaddq_u16(sum_buf + 2 * CFL_BUF_LINE, 8);
289       const uint16x8_t row3 = vldaddq_u16(sum_buf + 3 * CFL_BUF_LINE, 8);
290       sum_32x4 = vpadalq_u16(sum_32x4, row0);
291       sum_32x4 = vpadalq_u16(sum_32x4, row1);
292       sum_32x4 = vpadalq_u16(sum_32x4, row2);
293       sum_32x4 = vpadalq_u16(sum_32x4, row3);
294 
295       if (width == 32) {
296         const uint16x8_t row0_1 = vldaddq_u16(sum_buf + 16, 8);
297         const uint16x8_t row1_1 = vldaddq_u16(sum_buf + CFL_BUF_LINE + 16, 8);
298         const uint16x8_t row2_1 =
299             vldaddq_u16(sum_buf + 2 * CFL_BUF_LINE + 16, 8);
300         const uint16x8_t row3_1 =
301             vldaddq_u16(sum_buf + 3 * CFL_BUF_LINE + 16, 8);
302 
303         sum_32x4 = vpadalq_u16(sum_32x4, row0_1);
304         sum_32x4 = vpadalq_u16(sum_32x4, row1_1);
305         sum_32x4 = vpadalq_u16(sum_32x4, row2_1);
306         sum_32x4 = vpadalq_u16(sum_32x4, row3_1);
307       }
308     }
309     sum_buf += step;
310   } while (sum_buf < end);
311 
312   // Permute and add in such a way that each lane contains the block sum.
313   // [A+C+B+D, B+D+A+C, C+A+D+B, D+B+C+A]
314 #ifdef __aarch64__
315   sum_32x4 = vpaddq_u32(sum_32x4, sum_32x4);
316   sum_32x4 = vpaddq_u32(sum_32x4, sum_32x4);
317 #else
318   uint32x4_t flip =
319       vcombine_u32(vget_high_u32(sum_32x4), vget_low_u32(sum_32x4));
320   sum_32x4 = vaddq_u32(sum_32x4, flip);
321   sum_32x4 = vaddq_u32(sum_32x4, vrev64q_u32(sum_32x4));
322 #endif
323 
324   // Computing the average could be done using scalars, but getting off the NEON
325   // engine introduces latency, so we use vqrshrn.
326   int16x4_t avg_16x4;
327   // Constant propagation makes for some ugly code.
328   switch (num_pel_log2) {
329     case 4: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 4)); break;
330     case 5: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 5)); break;
331     case 6: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 6)); break;
332     case 7: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 7)); break;
333     case 8: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 8)); break;
334     case 9: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 9)); break;
335     case 10:
336       avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 10));
337       break;
338     default: assert(0);
339   }
340 
341   if (width == 4) {
342     do {
343       vst1_s16(dst, vsub_s16(vreinterpret_s16_u16(vld1_u16(src)), avg_16x4));
344       src += CFL_BUF_LINE;
345       dst += CFL_BUF_LINE;
346     } while (src < end);
347   } else {
348     const int16x8_t avg_16x8 = vcombine_s16(avg_16x4, avg_16x4);
349     do {
350       vldsubstq_s16(dst, src, 0, avg_16x8);
351       vldsubstq_s16(dst, src, CFL_BUF_LINE, avg_16x8);
352       vldsubstq_s16(dst, src, 2 * CFL_BUF_LINE, avg_16x8);
353       vldsubstq_s16(dst, src, 3 * CFL_BUF_LINE, avg_16x8);
354 
355       if (width > 8) {
356         vldsubstq_s16(dst, src, 8, avg_16x8);
357         vldsubstq_s16(dst, src, 8 + CFL_BUF_LINE, avg_16x8);
358         vldsubstq_s16(dst, src, 8 + 2 * CFL_BUF_LINE, avg_16x8);
359         vldsubstq_s16(dst, src, 8 + 3 * CFL_BUF_LINE, avg_16x8);
360       }
361       if (width == 32) {
362         vldsubstq_s16(dst, src, 16, avg_16x8);
363         vldsubstq_s16(dst, src, 16 + CFL_BUF_LINE, avg_16x8);
364         vldsubstq_s16(dst, src, 16 + 2 * CFL_BUF_LINE, avg_16x8);
365         vldsubstq_s16(dst, src, 16 + 3 * CFL_BUF_LINE, avg_16x8);
366         vldsubstq_s16(dst, src, 24, avg_16x8);
367         vldsubstq_s16(dst, src, 24 + CFL_BUF_LINE, avg_16x8);
368         vldsubstq_s16(dst, src, 24 + 2 * CFL_BUF_LINE, avg_16x8);
369         vldsubstq_s16(dst, src, 24 + 3 * CFL_BUF_LINE, avg_16x8);
370       }
371       src += step;
372       dst += step;
373     } while (src < end);
374   }
375 }
376 
CFL_SUB_AVG_FN(neon)377 CFL_SUB_AVG_FN(neon)
378 
379 // Saturating negate 16-bit integers in a when the corresponding signed 16-bit
380 // integer in b is negative.
381 // Notes:
382 //   * Negating INT16_MIN results in INT16_MIN. However, this cannot occur in
383 //   practice, as scaled_luma is the multiplication of two absolute values.
384 //   * In the Intel equivalent, elements in a are zeroed out when the
385 //   corresponding elements in b are zero. Because vsign is used twice in a
386 //   row, with b in the first call becoming a in the second call, there's no
387 //   impact from not zeroing out.
388 static int16x4_t vsign_s16(int16x4_t a, int16x4_t b) {
389   const int16x4_t mask = vshr_n_s16(b, 15);
390   return veor_s16(vadd_s16(a, mask), mask);
391 }
392 
393 // Saturating negate 16-bit integers in a when the corresponding signed 16-bit
394 // integer in b is negative.
395 // Notes:
396 //   * Negating INT16_MIN results in INT16_MIN. However, this cannot occur in
397 //   practice, as scaled_luma is the multiplication of two absolute values.
398 //   * In the Intel equivalent, elements in a are zeroed out when the
399 //   corresponding elements in b are zero. Because vsignq is used twice in a
400 //   row, with b in the first call becoming a in the second call, there's no
401 //   impact from not zeroing out.
vsignq_s16(int16x8_t a,int16x8_t b)402 static int16x8_t vsignq_s16(int16x8_t a, int16x8_t b) {
403   const int16x8_t mask = vshrq_n_s16(b, 15);
404   return veorq_s16(vaddq_s16(a, mask), mask);
405 }
406 
predict_w4(const int16_t * pred_buf_q3,int16x4_t alpha_sign,int abs_alpha_q12,int16x4_t dc)407 static INLINE int16x4_t predict_w4(const int16_t *pred_buf_q3,
408                                    int16x4_t alpha_sign, int abs_alpha_q12,
409                                    int16x4_t dc) {
410   const int16x4_t ac_q3 = vld1_s16(pred_buf_q3);
411   const int16x4_t ac_sign = veor_s16(alpha_sign, ac_q3);
412   int16x4_t scaled_luma = vqrdmulh_n_s16(vabs_s16(ac_q3), abs_alpha_q12);
413   return vadd_s16(vsign_s16(scaled_luma, ac_sign), dc);
414 }
415 
predict_w8(const int16_t * pred_buf_q3,int16x8_t alpha_sign,int abs_alpha_q12,int16x8_t dc)416 static INLINE int16x8_t predict_w8(const int16_t *pred_buf_q3,
417                                    int16x8_t alpha_sign, int abs_alpha_q12,
418                                    int16x8_t dc) {
419   const int16x8_t ac_q3 = vld1q_s16(pred_buf_q3);
420   const int16x8_t ac_sign = veorq_s16(alpha_sign, ac_q3);
421   int16x8_t scaled_luma = vqrdmulhq_n_s16(vabsq_s16(ac_q3), abs_alpha_q12);
422   return vaddq_s16(vsignq_s16(scaled_luma, ac_sign), dc);
423 }
424 
predict_w16(const int16_t * pred_buf_q3,int16x8_t alpha_sign,int abs_alpha_q12,int16x8_t dc)425 static INLINE int16x8x2_t predict_w16(const int16_t *pred_buf_q3,
426                                       int16x8_t alpha_sign, int abs_alpha_q12,
427                                       int16x8_t dc) {
428   // vld2q_s16 interleaves, which is not useful for prediction. vst1q_s16_x2
429   // does not interleave, but is not currently available in the compilier used
430   // by the AOM build system.
431   const int16x8x2_t ac_q3 = vld2q_s16(pred_buf_q3);
432   const int16x8_t ac_sign_0 = veorq_s16(alpha_sign, ac_q3.val[0]);
433   const int16x8_t ac_sign_1 = veorq_s16(alpha_sign, ac_q3.val[1]);
434   const int16x8_t scaled_luma_0 =
435       vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[0]), abs_alpha_q12);
436   const int16x8_t scaled_luma_1 =
437       vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[1]), abs_alpha_q12);
438   int16x8x2_t result;
439   result.val[0] = vaddq_s16(vsignq_s16(scaled_luma_0, ac_sign_0), dc);
440   result.val[1] = vaddq_s16(vsignq_s16(scaled_luma_1, ac_sign_1), dc);
441   return result;
442 }
443 
predict_w32(const int16_t * pred_buf_q3,int16x8_t alpha_sign,int abs_alpha_q12,int16x8_t dc)444 static INLINE int16x8x4_t predict_w32(const int16_t *pred_buf_q3,
445                                       int16x8_t alpha_sign, int abs_alpha_q12,
446                                       int16x8_t dc) {
447   // vld4q_s16 interleaves, which is not useful for prediction. vst1q_s16_x4
448   // does not interleave, but is not currently available in the compilier used
449   // by the AOM build system.
450   const int16x8x4_t ac_q3 = vld4q_s16(pred_buf_q3);
451   const int16x8_t ac_sign_0 = veorq_s16(alpha_sign, ac_q3.val[0]);
452   const int16x8_t ac_sign_1 = veorq_s16(alpha_sign, ac_q3.val[1]);
453   const int16x8_t ac_sign_2 = veorq_s16(alpha_sign, ac_q3.val[2]);
454   const int16x8_t ac_sign_3 = veorq_s16(alpha_sign, ac_q3.val[3]);
455   const int16x8_t scaled_luma_0 =
456       vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[0]), abs_alpha_q12);
457   const int16x8_t scaled_luma_1 =
458       vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[1]), abs_alpha_q12);
459   const int16x8_t scaled_luma_2 =
460       vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[2]), abs_alpha_q12);
461   const int16x8_t scaled_luma_3 =
462       vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[3]), abs_alpha_q12);
463   int16x8x4_t result;
464   result.val[0] = vaddq_s16(vsignq_s16(scaled_luma_0, ac_sign_0), dc);
465   result.val[1] = vaddq_s16(vsignq_s16(scaled_luma_1, ac_sign_1), dc);
466   result.val[2] = vaddq_s16(vsignq_s16(scaled_luma_2, ac_sign_2), dc);
467   result.val[3] = vaddq_s16(vsignq_s16(scaled_luma_3, ac_sign_3), dc);
468   return result;
469 }
470 
cfl_predict_lbd_neon(const int16_t * pred_buf_q3,uint8_t * dst,int dst_stride,int alpha_q3,int width,int height)471 static INLINE void cfl_predict_lbd_neon(const int16_t *pred_buf_q3,
472                                         uint8_t *dst, int dst_stride,
473                                         int alpha_q3, int width, int height) {
474   const int16_t abs_alpha_q12 = abs(alpha_q3) << 9;
475   const int16_t *const end = pred_buf_q3 + height * CFL_BUF_LINE;
476   if (width == 4) {
477     const int16x4_t alpha_sign = vdup_n_s16(alpha_q3);
478     const int16x4_t dc = vdup_n_s16(*dst);
479     do {
480       const int16x4_t pred =
481           predict_w4(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
482       vsth_u8(dst, vqmovun_s16(vcombine_s16(pred, pred)));
483       dst += dst_stride;
484     } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
485   } else {
486     const int16x8_t alpha_sign = vdupq_n_s16(alpha_q3);
487     const int16x8_t dc = vdupq_n_s16(*dst);
488     do {
489       if (width == 8) {
490         vst1_u8(dst, vqmovun_s16(predict_w8(pred_buf_q3, alpha_sign,
491                                             abs_alpha_q12, dc)));
492       } else if (width == 16) {
493         const int16x8x2_t pred =
494             predict_w16(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
495         const uint8x8x2_t predun = { { vqmovun_s16(pred.val[0]),
496                                        vqmovun_s16(pred.val[1]) } };
497         vst2_u8(dst, predun);
498       } else {
499         const int16x8x4_t pred =
500             predict_w32(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
501         const uint8x8x4_t predun = {
502           { vqmovun_s16(pred.val[0]), vqmovun_s16(pred.val[1]),
503             vqmovun_s16(pred.val[2]), vqmovun_s16(pred.val[3]) }
504         };
505         vst4_u8(dst, predun);
506       }
507       dst += dst_stride;
508     } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
509   }
510 }
511 
CFL_PREDICT_FN(neon,lbd)512 CFL_PREDICT_FN(neon, lbd)
513 
514 static INLINE uint16x4_t clamp_s16(int16x4_t a, int16x4_t max) {
515   return vreinterpret_u16_s16(vmax_s16(vmin_s16(a, max), vdup_n_s16(0)));
516 }
517 
clampq_s16(int16x8_t a,int16x8_t max)518 static INLINE uint16x8_t clampq_s16(int16x8_t a, int16x8_t max) {
519   return vreinterpretq_u16_s16(vmaxq_s16(vminq_s16(a, max), vdupq_n_s16(0)));
520 }
521 
clamp2q_s16(int16x8x2_t a,int16x8_t max)522 static INLINE uint16x8x2_t clamp2q_s16(int16x8x2_t a, int16x8_t max) {
523   uint16x8x2_t result;
524   result.val[0] = vreinterpretq_u16_s16(
525       vmaxq_s16(vminq_s16(a.val[0], max), vdupq_n_s16(0)));
526   result.val[1] = vreinterpretq_u16_s16(
527       vmaxq_s16(vminq_s16(a.val[1], max), vdupq_n_s16(0)));
528   return result;
529 }
530 
clamp4q_s16(int16x8x4_t a,int16x8_t max)531 static INLINE uint16x8x4_t clamp4q_s16(int16x8x4_t a, int16x8_t max) {
532   uint16x8x4_t result;
533   result.val[0] = vreinterpretq_u16_s16(
534       vmaxq_s16(vminq_s16(a.val[0], max), vdupq_n_s16(0)));
535   result.val[1] = vreinterpretq_u16_s16(
536       vmaxq_s16(vminq_s16(a.val[1], max), vdupq_n_s16(0)));
537   result.val[2] = vreinterpretq_u16_s16(
538       vmaxq_s16(vminq_s16(a.val[2], max), vdupq_n_s16(0)));
539   result.val[3] = vreinterpretq_u16_s16(
540       vmaxq_s16(vminq_s16(a.val[3], max), vdupq_n_s16(0)));
541   return result;
542 }
543 
cfl_predict_hbd_neon(const int16_t * pred_buf_q3,uint16_t * dst,int dst_stride,int alpha_q3,int bd,int width,int height)544 static INLINE void cfl_predict_hbd_neon(const int16_t *pred_buf_q3,
545                                         uint16_t *dst, int dst_stride,
546                                         int alpha_q3, int bd, int width,
547                                         int height) {
548   const int max = (1 << bd) - 1;
549   const int16_t abs_alpha_q12 = abs(alpha_q3) << 9;
550   const int16_t *const end = pred_buf_q3 + height * CFL_BUF_LINE;
551   if (width == 4) {
552     const int16x4_t alpha_sign = vdup_n_s16(alpha_q3);
553     const int16x4_t dc = vdup_n_s16(*dst);
554     const int16x4_t max_16x4 = vdup_n_s16(max);
555     do {
556       const int16x4_t scaled_luma =
557           predict_w4(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
558       vst1_u16(dst, clamp_s16(scaled_luma, max_16x4));
559       dst += dst_stride;
560     } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
561   } else {
562     const int16x8_t alpha_sign = vdupq_n_s16(alpha_q3);
563     const int16x8_t dc = vdupq_n_s16(*dst);
564     const int16x8_t max_16x8 = vdupq_n_s16(max);
565     do {
566       if (width == 8) {
567         const int16x8_t pred =
568             predict_w8(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
569         vst1q_u16(dst, clampq_s16(pred, max_16x8));
570       } else if (width == 16) {
571         const int16x8x2_t pred =
572             predict_w16(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
573         vst2q_u16(dst, clamp2q_s16(pred, max_16x8));
574       } else {
575         const int16x8x4_t pred =
576             predict_w32(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
577         vst4q_u16(dst, clamp4q_s16(pred, max_16x8));
578       }
579       dst += dst_stride;
580     } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
581   }
582 }
583 
584 CFL_PREDICT_FN(neon, hbd)
585