1 /*
2  *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <arm_neon.h>
12 #include <assert.h>
13 
14 #include "./vpx_dsp_rtcd.h"
15 #include "./vpx_config.h"
16 
17 #include "vpx/vpx_integer.h"
18 #include "vpx_dsp/arm/idct_neon.h"
19 #include "vpx_dsp/arm/mem_neon.h"
20 #include "vpx_dsp/arm/sum_neon.h"
21 
vpx_avg_4x4_neon(const uint8_t * a,int a_stride)22 uint32_t vpx_avg_4x4_neon(const uint8_t *a, int a_stride) {
23   const uint8x16_t b = load_unaligned_u8q(a, a_stride);
24   const uint16x8_t c = vaddl_u8(vget_low_u8(b), vget_high_u8(b));
25   const uint32x2_t d = horizontal_add_uint16x8(c);
26   return vget_lane_u32(vrshr_n_u32(d, 4), 0);
27 }
28 
vpx_avg_8x8_neon(const uint8_t * a,int a_stride)29 uint32_t vpx_avg_8x8_neon(const uint8_t *a, int a_stride) {
30   int i;
31   uint8x8_t b, c;
32   uint16x8_t sum;
33   uint32x2_t d;
34   b = vld1_u8(a);
35   a += a_stride;
36   c = vld1_u8(a);
37   a += a_stride;
38   sum = vaddl_u8(b, c);
39 
40   for (i = 0; i < 6; ++i) {
41     const uint8x8_t d = vld1_u8(a);
42     a += a_stride;
43     sum = vaddw_u8(sum, d);
44   }
45 
46   d = horizontal_add_uint16x8(sum);
47 
48   return vget_lane_u32(vrshr_n_u32(d, 6), 0);
49 }
50 
51 // coeff: 16 bits, dynamic range [-32640, 32640].
52 // length: value range {16, 64, 256, 1024}.
vpx_satd_neon(const tran_low_t * coeff,int length)53 int vpx_satd_neon(const tran_low_t *coeff, int length) {
54   const int16x4_t zero = vdup_n_s16(0);
55   int32x4_t accum = vdupq_n_s32(0);
56 
57   do {
58     const int16x8_t src0 = load_tran_low_to_s16q(coeff);
59     const int16x8_t src8 = load_tran_low_to_s16q(coeff + 8);
60     accum = vabal_s16(accum, vget_low_s16(src0), zero);
61     accum = vabal_s16(accum, vget_high_s16(src0), zero);
62     accum = vabal_s16(accum, vget_low_s16(src8), zero);
63     accum = vabal_s16(accum, vget_high_s16(src8), zero);
64     length -= 16;
65     coeff += 16;
66   } while (length != 0);
67 
68   {
69     // satd: 26 bits, dynamic range [-32640 * 1024, 32640 * 1024]
70     const int64x2_t s0 = vpaddlq_s32(accum);  // cascading summation of 'accum'.
71     const int32x2_t s1 = vadd_s32(vreinterpret_s32_s64(vget_low_s64(s0)),
72                                   vreinterpret_s32_s64(vget_high_s64(s0)));
73     const int satd = vget_lane_s32(s1, 0);
74     return satd;
75   }
76 }
77 
vpx_int_pro_row_neon(int16_t hbuf[16],uint8_t const * ref,const int ref_stride,const int height)78 void vpx_int_pro_row_neon(int16_t hbuf[16], uint8_t const *ref,
79                           const int ref_stride, const int height) {
80   int i;
81   uint16x8_t vec_sum_lo = vdupq_n_u16(0);
82   uint16x8_t vec_sum_hi = vdupq_n_u16(0);
83   const int shift_factor = ((height >> 5) + 3) * -1;
84   const int16x8_t vec_shift = vdupq_n_s16(shift_factor);
85 
86   for (i = 0; i < height; i += 8) {
87     const uint8x16_t vec_row1 = vld1q_u8(ref);
88     const uint8x16_t vec_row2 = vld1q_u8(ref + ref_stride);
89     const uint8x16_t vec_row3 = vld1q_u8(ref + ref_stride * 2);
90     const uint8x16_t vec_row4 = vld1q_u8(ref + ref_stride * 3);
91     const uint8x16_t vec_row5 = vld1q_u8(ref + ref_stride * 4);
92     const uint8x16_t vec_row6 = vld1q_u8(ref + ref_stride * 5);
93     const uint8x16_t vec_row7 = vld1q_u8(ref + ref_stride * 6);
94     const uint8x16_t vec_row8 = vld1q_u8(ref + ref_stride * 7);
95 
96     vec_sum_lo = vaddw_u8(vec_sum_lo, vget_low_u8(vec_row1));
97     vec_sum_hi = vaddw_u8(vec_sum_hi, vget_high_u8(vec_row1));
98 
99     vec_sum_lo = vaddw_u8(vec_sum_lo, vget_low_u8(vec_row2));
100     vec_sum_hi = vaddw_u8(vec_sum_hi, vget_high_u8(vec_row2));
101 
102     vec_sum_lo = vaddw_u8(vec_sum_lo, vget_low_u8(vec_row3));
103     vec_sum_hi = vaddw_u8(vec_sum_hi, vget_high_u8(vec_row3));
104 
105     vec_sum_lo = vaddw_u8(vec_sum_lo, vget_low_u8(vec_row4));
106     vec_sum_hi = vaddw_u8(vec_sum_hi, vget_high_u8(vec_row4));
107 
108     vec_sum_lo = vaddw_u8(vec_sum_lo, vget_low_u8(vec_row5));
109     vec_sum_hi = vaddw_u8(vec_sum_hi, vget_high_u8(vec_row5));
110 
111     vec_sum_lo = vaddw_u8(vec_sum_lo, vget_low_u8(vec_row6));
112     vec_sum_hi = vaddw_u8(vec_sum_hi, vget_high_u8(vec_row6));
113 
114     vec_sum_lo = vaddw_u8(vec_sum_lo, vget_low_u8(vec_row7));
115     vec_sum_hi = vaddw_u8(vec_sum_hi, vget_high_u8(vec_row7));
116 
117     vec_sum_lo = vaddw_u8(vec_sum_lo, vget_low_u8(vec_row8));
118     vec_sum_hi = vaddw_u8(vec_sum_hi, vget_high_u8(vec_row8));
119 
120     ref += ref_stride * 8;
121   }
122 
123   vec_sum_lo = vshlq_u16(vec_sum_lo, vec_shift);
124   vec_sum_hi = vshlq_u16(vec_sum_hi, vec_shift);
125 
126   vst1q_s16(hbuf, vreinterpretq_s16_u16(vec_sum_lo));
127   hbuf += 8;
128   vst1q_s16(hbuf, vreinterpretq_s16_u16(vec_sum_hi));
129 }
130 
vpx_int_pro_col_neon(uint8_t const * ref,const int width)131 int16_t vpx_int_pro_col_neon(uint8_t const *ref, const int width) {
132   int i;
133   uint16x8_t vec_sum = vdupq_n_u16(0);
134 
135   for (i = 0; i < width; i += 16) {
136     const uint8x16_t vec_row = vld1q_u8(ref);
137     vec_sum = vaddw_u8(vec_sum, vget_low_u8(vec_row));
138     vec_sum = vaddw_u8(vec_sum, vget_high_u8(vec_row));
139     ref += 16;
140   }
141 
142   return vget_lane_s16(vreinterpret_s16_u32(horizontal_add_uint16x8(vec_sum)),
143                        0);
144 }
145 
146 // ref, src = [0, 510] - max diff = 16-bits
147 // bwl = {2, 3, 4}, width = {16, 32, 64}
vpx_vector_var_neon(int16_t const * ref,int16_t const * src,const int bwl)148 int vpx_vector_var_neon(int16_t const *ref, int16_t const *src, const int bwl) {
149   int width = 4 << bwl;
150   int32x4_t sse = vdupq_n_s32(0);
151   int16x8_t total = vdupq_n_s16(0);
152 
153   assert(width >= 8);
154   assert((width % 8) == 0);
155 
156   do {
157     const int16x8_t r = vld1q_s16(ref);
158     const int16x8_t s = vld1q_s16(src);
159     const int16x8_t diff = vsubq_s16(r, s);  // [-510, 510], 10 bits.
160     const int16x4_t diff_lo = vget_low_s16(diff);
161     const int16x4_t diff_hi = vget_high_s16(diff);
162     sse = vmlal_s16(sse, diff_lo, diff_lo);  // dynamic range 26 bits.
163     sse = vmlal_s16(sse, diff_hi, diff_hi);
164     total = vaddq_s16(total, diff);  // dynamic range 16 bits.
165 
166     ref += 8;
167     src += 8;
168     width -= 8;
169   } while (width != 0);
170 
171   {
172     // Note: 'total''s pairwise addition could be implemented similarly to
173     // horizontal_add_uint16x8(), but one less vpaddl with 'total' when paired
174     // with the summation of 'sse' performed better on a Cortex-A15.
175     const int32x4_t t0 = vpaddlq_s16(total);  // cascading summation of 'total'
176     const int32x2_t t1 = vadd_s32(vget_low_s32(t0), vget_high_s32(t0));
177     const int32x2_t t2 = vpadd_s32(t1, t1);
178     const int t = vget_lane_s32(t2, 0);
179     const int64x2_t s0 = vpaddlq_s32(sse);  // cascading summation of 'sse'.
180     const int32x2_t s1 = vadd_s32(vreinterpret_s32_s64(vget_low_s64(s0)),
181                                   vreinterpret_s32_s64(vget_high_s64(s0)));
182     const int s = vget_lane_s32(s1, 0);
183     const int shift_factor = bwl + 2;
184     return s - ((t * t) >> shift_factor);
185   }
186 }
187 
vpx_minmax_8x8_neon(const uint8_t * a,int a_stride,const uint8_t * b,int b_stride,int * min,int * max)188 void vpx_minmax_8x8_neon(const uint8_t *a, int a_stride, const uint8_t *b,
189                          int b_stride, int *min, int *max) {
190   // Load and concatenate.
191   const uint8x16_t a01 = vcombine_u8(vld1_u8(a), vld1_u8(a + a_stride));
192   const uint8x16_t a23 =
193       vcombine_u8(vld1_u8(a + 2 * a_stride), vld1_u8(a + 3 * a_stride));
194   const uint8x16_t a45 =
195       vcombine_u8(vld1_u8(a + 4 * a_stride), vld1_u8(a + 5 * a_stride));
196   const uint8x16_t a67 =
197       vcombine_u8(vld1_u8(a + 6 * a_stride), vld1_u8(a + 7 * a_stride));
198 
199   const uint8x16_t b01 = vcombine_u8(vld1_u8(b), vld1_u8(b + b_stride));
200   const uint8x16_t b23 =
201       vcombine_u8(vld1_u8(b + 2 * b_stride), vld1_u8(b + 3 * b_stride));
202   const uint8x16_t b45 =
203       vcombine_u8(vld1_u8(b + 4 * b_stride), vld1_u8(b + 5 * b_stride));
204   const uint8x16_t b67 =
205       vcombine_u8(vld1_u8(b + 6 * b_stride), vld1_u8(b + 7 * b_stride));
206 
207   // Absolute difference.
208   const uint8x16_t ab01_diff = vabdq_u8(a01, b01);
209   const uint8x16_t ab23_diff = vabdq_u8(a23, b23);
210   const uint8x16_t ab45_diff = vabdq_u8(a45, b45);
211   const uint8x16_t ab67_diff = vabdq_u8(a67, b67);
212 
213   // Max values between the Q vectors.
214   const uint8x16_t ab0123_max = vmaxq_u8(ab01_diff, ab23_diff);
215   const uint8x16_t ab4567_max = vmaxq_u8(ab45_diff, ab67_diff);
216   const uint8x16_t ab0123_min = vminq_u8(ab01_diff, ab23_diff);
217   const uint8x16_t ab4567_min = vminq_u8(ab45_diff, ab67_diff);
218 
219   const uint8x16_t ab07_max = vmaxq_u8(ab0123_max, ab4567_max);
220   const uint8x16_t ab07_min = vminq_u8(ab0123_min, ab4567_min);
221 
222   // Split to D and start doing pairwise.
223   uint8x8_t ab_max = vmax_u8(vget_high_u8(ab07_max), vget_low_u8(ab07_max));
224   uint8x8_t ab_min = vmin_u8(vget_high_u8(ab07_min), vget_low_u8(ab07_min));
225 
226   // Enough runs of vpmax/min propogate the max/min values to every position.
227   ab_max = vpmax_u8(ab_max, ab_max);
228   ab_min = vpmin_u8(ab_min, ab_min);
229 
230   ab_max = vpmax_u8(ab_max, ab_max);
231   ab_min = vpmin_u8(ab_min, ab_min);
232 
233   ab_max = vpmax_u8(ab_max, ab_max);
234   ab_min = vpmin_u8(ab_min, ab_min);
235 
236   *min = *max = 0;  // Clear high bits
237   // Store directly to avoid costly neon->gpr transfer.
238   vst1_lane_u8((uint8_t *)max, ab_max, 0);
239   vst1_lane_u8((uint8_t *)min, ab_min, 0);
240 }
241