1 /* -*- c++ -*- */
2 /*
3  * Copyright 2016 Free Software Foundation, Inc.
4  *
5  * This file is part of GNU Radio
6  *
7  * GNU Radio is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 3, or (at your option)
10  * any later version.
11  *
12  * GNU Radio is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with GNU Radio; see the file COPYING.  If not, write to
19  * the Free Software Foundation, Inc., 51 Franklin Street,
20  * Boston, MA 02110-1301, USA.
21  */
22 
23 /*!
24  * \page volk_16ic_x2_dot_prod_16ic
25  *
26  * \b Overview
27  *
28  * Multiplies two input complex vectors (16-bit integer each component) and accumulates them,
29  * storing the result. Results are saturated so never go beyond the limits of the data type.
30  *
31  * <b>Dispatcher Prototype</b>
32  * \code
33  * void volk_16ic_x2_dot_prod_16ic(lv_16sc_t* result, const lv_16sc_t* in_a, const lv_16sc_t* in_b, unsigned int num_points);
34  * \endcode
35  *
36  * \b Inputs
37  * \li in_a:          One of the vectors to be multiplied and accumulated.
38  * \li in_b:          The other vector to be multiplied and accumulated.
39  * \li num_points:    Number of complex values to be multiplied together, accumulated and stored into \p result
40  *
41  * \b Outputs
42  * \li result:        Value of the accumulated result.
43  *
44  */
45 
46 #ifndef INCLUDED_volk_16ic_x2_dot_prod_16ic_H
47 #define INCLUDED_volk_16ic_x2_dot_prod_16ic_H
48 
49 #include <volk/volk_common.h>
50 #include <volk/volk_complex.h>
51 #include <volk/saturation_arithmetic.h>
52 
53 
54 #ifdef LV_HAVE_GENERIC
55 
volk_16ic_x2_dot_prod_16ic_generic(lv_16sc_t * result,const lv_16sc_t * in_a,const lv_16sc_t * in_b,unsigned int num_points)56 static inline void volk_16ic_x2_dot_prod_16ic_generic(lv_16sc_t* result, const lv_16sc_t* in_a, const lv_16sc_t* in_b, unsigned int num_points)
57 {
58     result[0] = lv_cmake((int16_t)0, (int16_t)0);
59     unsigned int n;
60     for (n = 0; n < num_points; n++)
61         {
62             lv_16sc_t tmp = in_a[n] * in_b[n];
63             result[0] = lv_cmake(sat_adds16i(lv_creal(result[0]), lv_creal(tmp)), sat_adds16i(lv_cimag(result[0]), lv_cimag(tmp) ));
64         }
65 }
66 
67 #endif /*LV_HAVE_GENERIC*/
68 
69 
70 #ifdef LV_HAVE_SSE2
71 #include <emmintrin.h>
72 
volk_16ic_x2_dot_prod_16ic_a_sse2(lv_16sc_t * out,const lv_16sc_t * in_a,const lv_16sc_t * in_b,unsigned int num_points)73 static inline void volk_16ic_x2_dot_prod_16ic_a_sse2(lv_16sc_t* out, const lv_16sc_t* in_a, const lv_16sc_t* in_b, unsigned int num_points)
74 {
75     lv_16sc_t dotProduct = lv_cmake((int16_t)0, (int16_t)0);
76 
77     const unsigned int sse_iters = num_points / 4;
78     unsigned int number;
79 
80     const lv_16sc_t* _in_a = in_a;
81     const lv_16sc_t* _in_b = in_b;
82     lv_16sc_t* _out = out;
83 
84     if (sse_iters > 0)
85         {
86             __m128i a, b, c, c_sr, mask_imag, mask_real, real, imag, imag1, imag2, b_sl, a_sl, realcacc, imagcacc;
87             __VOLK_ATTR_ALIGNED(16) lv_16sc_t dotProductVector[4];
88 
89             realcacc = _mm_setzero_si128();
90             imagcacc = _mm_setzero_si128();
91 
92             mask_imag = _mm_set_epi8(0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0);
93             mask_real = _mm_set_epi8(0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF);
94 
95             for(number = 0; number < sse_iters; number++)
96                 {
97                     // a[127:0]=[a3.i,a3.r,a2.i,a2.r,a1.i,a1.r,a0.i,a0.r]
98                     a = _mm_load_si128((__m128i*)_in_a); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg
99                     __VOLK_PREFETCH(_in_a + 8);
100                     b = _mm_load_si128((__m128i*)_in_b);
101                     __VOLK_PREFETCH(_in_b + 8);
102                     c = _mm_mullo_epi16(a, b); // a3.i*b3.i, a3.r*b3.r, ....
103 
104                     c_sr = _mm_srli_si128(c, 2); // Shift a right by imm8 bytes while shifting in zeros, and store the results in dst.
105                     real = _mm_subs_epi16(c, c_sr);
106 
107                     b_sl = _mm_slli_si128(b, 2); // b3.r, b2.i ....
108                     a_sl = _mm_slli_si128(a, 2); // a3.r, a2.i ....
109 
110                     imag1 = _mm_mullo_epi16(a, b_sl); // a3.i*b3.r, ....
111                     imag2 = _mm_mullo_epi16(b, a_sl); // b3.i*a3.r, ....
112 
113                     imag = _mm_adds_epi16(imag1, imag2); //with saturation arithmetic!
114 
115                     realcacc = _mm_adds_epi16(realcacc, real);
116                     imagcacc = _mm_adds_epi16(imagcacc, imag);
117 
118                     _in_a += 4;
119                     _in_b += 4;
120                 }
121 
122             realcacc = _mm_and_si128(realcacc, mask_real);
123             imagcacc = _mm_and_si128(imagcacc, mask_imag);
124 
125             a = _mm_or_si128(realcacc, imagcacc);
126 
127             _mm_store_si128((__m128i*)dotProductVector, a); // Store the results back into the dot product vector
128 
129             for (number = 0; number < 4; ++number)
130                 {
131                     dotProduct = lv_cmake(sat_adds16i(lv_creal(dotProduct), lv_creal(dotProductVector[number])), sat_adds16i(lv_cimag(dotProduct), lv_cimag(dotProductVector[number])));
132                 }
133         }
134 
135     for (number = 0; number < (num_points % 4); ++number)
136         {
137             lv_16sc_t tmp = (*_in_a++) * (*_in_b++);
138             dotProduct = lv_cmake(sat_adds16i(lv_creal(dotProduct), lv_creal(tmp)), sat_adds16i(lv_cimag(dotProduct), lv_cimag(tmp)));
139         }
140 
141     *_out = dotProduct;
142 }
143 
144 #endif /* LV_HAVE_SSE2 */
145 
146 
147 #ifdef LV_HAVE_SSE2
148 #include <emmintrin.h>
149 
volk_16ic_x2_dot_prod_16ic_u_sse2(lv_16sc_t * out,const lv_16sc_t * in_a,const lv_16sc_t * in_b,unsigned int num_points)150 static inline void volk_16ic_x2_dot_prod_16ic_u_sse2(lv_16sc_t* out, const lv_16sc_t* in_a, const lv_16sc_t* in_b, unsigned int num_points)
151 {
152     lv_16sc_t dotProduct = lv_cmake((int16_t)0, (int16_t)0);
153 
154     const unsigned int sse_iters = num_points / 4;
155 
156     const lv_16sc_t* _in_a = in_a;
157     const lv_16sc_t* _in_b = in_b;
158     lv_16sc_t* _out = out;
159     unsigned int number;
160 
161     if (sse_iters > 0)
162         {
163             __m128i a, b, c, c_sr, mask_imag, mask_real, real, imag, imag1, imag2, b_sl, a_sl, realcacc, imagcacc, result;
164             __VOLK_ATTR_ALIGNED(16) lv_16sc_t dotProductVector[4];
165 
166             realcacc = _mm_setzero_si128();
167             imagcacc = _mm_setzero_si128();
168 
169             mask_imag = _mm_set_epi8(0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0);
170             mask_real = _mm_set_epi8(0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF);
171 
172             for(number = 0; number < sse_iters; number++)
173                 {
174                     // a[127:0]=[a3.i,a3.r,a2.i,a2.r,a1.i,a1.r,a0.i,a0.r]
175                     a = _mm_loadu_si128((__m128i*)_in_a); //load (2 byte imag, 2 byte real) x 4 into 128 bits reg
176                     __VOLK_PREFETCH(_in_a + 8);
177                     b = _mm_loadu_si128((__m128i*)_in_b);
178                     __VOLK_PREFETCH(_in_b + 8);
179                     c = _mm_mullo_epi16(a, b); // a3.i*b3.i, a3.r*b3.r, ....
180 
181                     c_sr = _mm_srli_si128(c, 2); // Shift a right by imm8 bytes while shifting in zeros, and store the results in dst.
182                     real = _mm_subs_epi16(c, c_sr);
183 
184                     b_sl = _mm_slli_si128(b, 2); // b3.r, b2.i ....
185                     a_sl = _mm_slli_si128(a, 2); // a3.r, a2.i ....
186 
187                     imag1 = _mm_mullo_epi16(a, b_sl); // a3.i*b3.r, ....
188                     imag2 = _mm_mullo_epi16(b, a_sl); // b3.i*a3.r, ....
189 
190                     imag = _mm_adds_epi16(imag1, imag2); //with saturation arithmetic!
191 
192                     realcacc = _mm_adds_epi16(realcacc, real);
193                     imagcacc = _mm_adds_epi16(imagcacc, imag);
194 
195                     _in_a += 4;
196                     _in_b += 4;
197                 }
198 
199             realcacc = _mm_and_si128(realcacc, mask_real);
200             imagcacc = _mm_and_si128(imagcacc, mask_imag);
201 
202             result = _mm_or_si128(realcacc, imagcacc);
203 
204             _mm_storeu_si128((__m128i*)dotProductVector, result); // Store the results back into the dot product vector
205 
206             for (number = 0; number < 4; ++number)
207                 {
208                     dotProduct = lv_cmake(sat_adds16i(lv_creal(dotProduct), lv_creal(dotProductVector[number])), sat_adds16i(lv_cimag(dotProduct), lv_cimag(dotProductVector[number])));
209                 }
210         }
211 
212     for (number = 0; number < (num_points % 4); ++number)
213         {
214             lv_16sc_t tmp = (*_in_a++) * (*_in_b++);
215             dotProduct = lv_cmake(sat_adds16i(lv_creal(dotProduct), lv_creal(tmp)), sat_adds16i(lv_cimag(dotProduct), lv_cimag(tmp)));
216         }
217 
218     *_out = dotProduct;
219 }
220 #endif /* LV_HAVE_SSE2 */
221 
222 
223 #ifdef LV_HAVE_AVX2
224 #include <immintrin.h>
225 
volk_16ic_x2_dot_prod_16ic_u_axv2(lv_16sc_t * out,const lv_16sc_t * in_a,const lv_16sc_t * in_b,unsigned int num_points)226 static inline void volk_16ic_x2_dot_prod_16ic_u_axv2(lv_16sc_t* out, const lv_16sc_t* in_a, const lv_16sc_t* in_b, unsigned int num_points)
227 {
228     lv_16sc_t dotProduct = lv_cmake((int16_t)0, (int16_t)0);
229 
230     const unsigned int avx_iters = num_points / 8;
231 
232     const lv_16sc_t* _in_a = in_a;
233     const lv_16sc_t* _in_b = in_b;
234     lv_16sc_t* _out = out;
235     unsigned int number;
236 
237     if (avx_iters > 0)
238         {
239             __m256i a, b, c, c_sr, mask_imag, mask_real, real, imag, imag1, imag2, b_sl, a_sl, realcacc, imagcacc, result;
240             __VOLK_ATTR_ALIGNED(32) lv_16sc_t dotProductVector[8];
241 
242             realcacc = _mm256_setzero_si256();
243             imagcacc = _mm256_setzero_si256();
244 
245             mask_imag = _mm256_set_epi8(0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0);
246             mask_real = _mm256_set_epi8(0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF);
247 
248             for(number = 0; number < avx_iters; number++)
249                 {
250                     a = _mm256_loadu_si256((__m256i*)_in_a);
251                     __VOLK_PREFETCH(_in_a + 16);
252                     b = _mm256_loadu_si256((__m256i*)_in_b);
253                     __VOLK_PREFETCH(_in_b + 16);
254                     c = _mm256_mullo_epi16(a, b);
255 
256                     c_sr = _mm256_srli_si256(c, 2); // Shift a right by imm8 bytes while shifting in zeros, and store the results in dst.
257                     real = _mm256_subs_epi16(c, c_sr);
258 
259                     b_sl = _mm256_slli_si256(b, 2);
260                     a_sl = _mm256_slli_si256(a, 2);
261 
262                     imag1 = _mm256_mullo_epi16(a, b_sl);
263                     imag2 = _mm256_mullo_epi16(b, a_sl);
264 
265                     imag = _mm256_adds_epi16(imag1, imag2); //with saturation arithmetic!
266 
267                     realcacc = _mm256_adds_epi16(realcacc, real);
268                     imagcacc = _mm256_adds_epi16(imagcacc, imag);
269 
270                     _in_a += 8;
271                     _in_b += 8;
272                 }
273 
274             realcacc = _mm256_and_si256(realcacc, mask_real);
275             imagcacc = _mm256_and_si256(imagcacc, mask_imag);
276 
277             result = _mm256_or_si256(realcacc, imagcacc);
278 
279             _mm256_storeu_si256((__m256i*)dotProductVector, result); // Store the results back into the dot product vector
280             _mm256_zeroupper();
281 
282             for (number = 0; number < 8; ++number)
283                 {
284                     dotProduct = lv_cmake(sat_adds16i(lv_creal(dotProduct), lv_creal(dotProductVector[number])), sat_adds16i(lv_cimag(dotProduct), lv_cimag(dotProductVector[number])));
285                 }
286         }
287 
288     for (number = 0; number < (num_points % 8); ++number)
289         {
290             lv_16sc_t tmp = (*_in_a++) * (*_in_b++);
291             dotProduct = lv_cmake(sat_adds16i(lv_creal(dotProduct), lv_creal(tmp)), sat_adds16i(lv_cimag(dotProduct), lv_cimag(tmp)));
292         }
293 
294     *_out = dotProduct;
295 }
296 #endif /* LV_HAVE_AVX2 */
297 
298 
299 #ifdef LV_HAVE_AVX2
300 #include <immintrin.h>
301 
volk_16ic_x2_dot_prod_16ic_a_axv2(lv_16sc_t * out,const lv_16sc_t * in_a,const lv_16sc_t * in_b,unsigned int num_points)302 static inline void volk_16ic_x2_dot_prod_16ic_a_axv2(lv_16sc_t* out, const lv_16sc_t* in_a, const lv_16sc_t* in_b, unsigned int num_points)
303 {
304     lv_16sc_t dotProduct = lv_cmake((int16_t)0, (int16_t)0);
305 
306     const unsigned int avx_iters = num_points / 8;
307 
308     const lv_16sc_t* _in_a = in_a;
309     const lv_16sc_t* _in_b = in_b;
310     lv_16sc_t* _out = out;
311     unsigned int number;
312 
313     if (avx_iters > 0)
314         {
315             __m256i a, b, c, c_sr, mask_imag, mask_real, real, imag, imag1, imag2, b_sl, a_sl, realcacc, imagcacc, result;
316             __VOLK_ATTR_ALIGNED(32) lv_16sc_t dotProductVector[8];
317 
318             realcacc = _mm256_setzero_si256();
319             imagcacc = _mm256_setzero_si256();
320 
321             mask_imag = _mm256_set_epi8(0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0);
322             mask_real = _mm256_set_epi8(0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF, 0, 0, 0xFF, 0xFF);
323 
324             for(number = 0; number < avx_iters; number++)
325                 {
326                     a = _mm256_load_si256((__m256i*)_in_a);
327                     __VOLK_PREFETCH(_in_a + 16);
328                     b = _mm256_load_si256((__m256i*)_in_b);
329                     __VOLK_PREFETCH(_in_b + 16);
330                     c = _mm256_mullo_epi16(a, b);
331 
332                     c_sr = _mm256_srli_si256(c, 2); // Shift a right by imm8 bytes while shifting in zeros, and store the results in dst.
333                     real = _mm256_subs_epi16(c, c_sr);
334 
335                     b_sl = _mm256_slli_si256(b, 2);
336                     a_sl = _mm256_slli_si256(a, 2);
337 
338                     imag1 = _mm256_mullo_epi16(a, b_sl);
339                     imag2 = _mm256_mullo_epi16(b, a_sl);
340 
341                     imag = _mm256_adds_epi16(imag1, imag2); //with saturation arithmetic!
342 
343                     realcacc = _mm256_adds_epi16(realcacc, real);
344                     imagcacc = _mm256_adds_epi16(imagcacc, imag);
345 
346                     _in_a += 8;
347                     _in_b += 8;
348                 }
349 
350             realcacc = _mm256_and_si256(realcacc, mask_real);
351             imagcacc = _mm256_and_si256(imagcacc, mask_imag);
352 
353             result = _mm256_or_si256(realcacc, imagcacc);
354 
355             _mm256_store_si256((__m256i*)dotProductVector, result); // Store the results back into the dot product vector
356             _mm256_zeroupper();
357 
358             for (number = 0; number < 8; ++number)
359                 {
360                     dotProduct = lv_cmake(sat_adds16i(lv_creal(dotProduct), lv_creal(dotProductVector[number])), sat_adds16i(lv_cimag(dotProduct), lv_cimag(dotProductVector[number])));
361                 }
362         }
363 
364     for (number = 0; number < (num_points % 8); ++number)
365         {
366             lv_16sc_t tmp = (*_in_a++) * (*_in_b++);
367             dotProduct = lv_cmake(sat_adds16i(lv_creal(dotProduct), lv_creal(tmp)), sat_adds16i(lv_cimag(dotProduct), lv_cimag(tmp)));
368         }
369 
370     *_out = dotProduct;
371 }
372 #endif /* LV_HAVE_AVX2 */
373 
374 
375 #ifdef LV_HAVE_NEON
376 #include <arm_neon.h>
377 
volk_16ic_x2_dot_prod_16ic_neon(lv_16sc_t * out,const lv_16sc_t * in_a,const lv_16sc_t * in_b,unsigned int num_points)378 static inline void volk_16ic_x2_dot_prod_16ic_neon(lv_16sc_t* out, const lv_16sc_t* in_a, const lv_16sc_t* in_b, unsigned int num_points)
379 {
380     unsigned int quarter_points = num_points / 4;
381     unsigned int number;
382 
383     lv_16sc_t* a_ptr = (lv_16sc_t*) in_a;
384     lv_16sc_t* b_ptr = (lv_16sc_t*) in_b;
385     *out = lv_cmake((int16_t)0, (int16_t)0);
386 
387     if (quarter_points > 0)
388         {
389             // for 2-lane vectors, 1st lane holds the real part,
390             // 2nd lane holds the imaginary part
391             int16x4x2_t a_val, b_val, c_val, accumulator;
392             int16x4x2_t tmp_real, tmp_imag;
393             __VOLK_ATTR_ALIGNED(16) lv_16sc_t accum_result[4];
394             accumulator.val[0] = vdup_n_s16(0);
395             accumulator.val[1] = vdup_n_s16(0);
396             lv_16sc_t dotProduct = lv_cmake((int16_t)0, (int16_t)0);
397 
398             for(number = 0; number < quarter_points; ++number)
399                 {
400                     a_val = vld2_s16((int16_t*)a_ptr); // a0r|a1r|a2r|a3r || a0i|a1i|a2i|a3i
401                     b_val = vld2_s16((int16_t*)b_ptr); // b0r|b1r|b2r|b3r || b0i|b1i|b2i|b3i
402                     __VOLK_PREFETCH(a_ptr + 8);
403                     __VOLK_PREFETCH(b_ptr + 8);
404 
405                     // multiply the real*real and imag*imag to get real result
406                     // a0r*b0r|a1r*b1r|a2r*b2r|a3r*b3r
407                     tmp_real.val[0] = vmul_s16(a_val.val[0], b_val.val[0]);
408                     // a0i*b0i|a1i*b1i|a2i*b2i|a3i*b3i
409                     tmp_real.val[1] = vmul_s16(a_val.val[1], b_val.val[1]);
410 
411                     // Multiply cross terms to get the imaginary result
412                     // a0r*b0i|a1r*b1i|a2r*b2i|a3r*b3i
413                     tmp_imag.val[0] = vmul_s16(a_val.val[0], b_val.val[1]);
414                     // a0i*b0r|a1i*b1r|a2i*b2r|a3i*b3r
415                     tmp_imag.val[1] = vmul_s16(a_val.val[1], b_val.val[0]);
416 
417                     c_val.val[0] = vqsub_s16(tmp_real.val[0], tmp_real.val[1]);
418                     c_val.val[1] = vqadd_s16(tmp_imag.val[0], tmp_imag.val[1]);
419 
420                     accumulator.val[0] = vqadd_s16(accumulator.val[0], c_val.val[0]);
421                     accumulator.val[1] = vqadd_s16(accumulator.val[1], c_val.val[1]);
422 
423                     a_ptr += 4;
424                     b_ptr += 4;
425                 }
426 
427             vst2_s16((int16_t*)accum_result, accumulator);
428             for (number = 0; number < 4; ++number)
429                 {
430                     dotProduct = lv_cmake(sat_adds16i(lv_creal(dotProduct), lv_creal(accum_result[number])), sat_adds16i(lv_cimag(dotProduct), lv_cimag(accum_result[number])));
431                 }
432 
433             *out = dotProduct;
434         }
435 
436     // tail case
437     for(number = quarter_points * 4; number < num_points; ++number)
438         {
439             *out += (*a_ptr++) * (*b_ptr++);
440         }
441 }
442 
443 #endif /* LV_HAVE_NEON */
444 
445 
446 #ifdef LV_HAVE_NEON
447 #include <arm_neon.h>
448 
volk_16ic_x2_dot_prod_16ic_neon_vma(lv_16sc_t * out,const lv_16sc_t * in_a,const lv_16sc_t * in_b,unsigned int num_points)449 static inline void volk_16ic_x2_dot_prod_16ic_neon_vma(lv_16sc_t* out, const lv_16sc_t* in_a, const lv_16sc_t* in_b, unsigned int num_points)
450 {
451     unsigned int quarter_points = num_points / 4;
452     unsigned int number;
453 
454     lv_16sc_t* a_ptr = (lv_16sc_t*) in_a;
455     lv_16sc_t* b_ptr = (lv_16sc_t*) in_b;
456     // for 2-lane vectors, 1st lane holds the real part,
457     // 2nd lane holds the imaginary part
458     int16x4x2_t a_val, b_val, accumulator;
459     int16x4x2_t tmp;
460     __VOLK_ATTR_ALIGNED(16) lv_16sc_t accum_result[4];
461     accumulator.val[0] = vdup_n_s16(0);
462     accumulator.val[1] = vdup_n_s16(0);
463 
464     for(number = 0; number < quarter_points; ++number)
465         {
466             a_val = vld2_s16((int16_t*)a_ptr); // a0r|a1r|a2r|a3r || a0i|a1i|a2i|a3i
467             b_val = vld2_s16((int16_t*)b_ptr); // b0r|b1r|b2r|b3r || b0i|b1i|b2i|b3i
468             __VOLK_PREFETCH(a_ptr + 8);
469             __VOLK_PREFETCH(b_ptr + 8);
470 
471             tmp.val[0] = vmul_s16(a_val.val[0], b_val.val[0]);
472             tmp.val[1] = vmul_s16(a_val.val[1], b_val.val[0]);
473 
474             // use multiply accumulate/subtract to get result
475             tmp.val[0] = vmls_s16(tmp.val[0], a_val.val[1], b_val.val[1]);
476             tmp.val[1] = vmla_s16(tmp.val[1], a_val.val[0], b_val.val[1]);
477 
478             accumulator.val[0] = vqadd_s16(accumulator.val[0], tmp.val[0]);
479             accumulator.val[1] = vqadd_s16(accumulator.val[1], tmp.val[1]);
480 
481             a_ptr += 4;
482             b_ptr += 4;
483         }
484 
485     vst2_s16((int16_t*)accum_result, accumulator);
486     *out = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
487 
488     // tail case
489     for(number = quarter_points * 4; number < num_points; ++number)
490         {
491             *out += (*a_ptr++) * (*b_ptr++);
492         }
493 }
494 
495 #endif /* LV_HAVE_NEON */
496 
497 
498 #ifdef LV_HAVE_NEON
499 #include <arm_neon.h>
500 
volk_16ic_x2_dot_prod_16ic_neon_optvma(lv_16sc_t * out,const lv_16sc_t * in_a,const lv_16sc_t * in_b,unsigned int num_points)501 static inline void volk_16ic_x2_dot_prod_16ic_neon_optvma(lv_16sc_t* out, const lv_16sc_t* in_a, const lv_16sc_t* in_b, unsigned int num_points)
502 {
503     unsigned int quarter_points = num_points / 4;
504     unsigned int number;
505 
506     lv_16sc_t* a_ptr = (lv_16sc_t*) in_a;
507     lv_16sc_t* b_ptr = (lv_16sc_t*) in_b;
508     // for 2-lane vectors, 1st lane holds the real part,
509     // 2nd lane holds the imaginary part
510     int16x4x2_t a_val, b_val, accumulator1, accumulator2;
511 
512     __VOLK_ATTR_ALIGNED(16) lv_16sc_t accum_result[4];
513     accumulator1.val[0] = vdup_n_s16(0);
514     accumulator1.val[1] = vdup_n_s16(0);
515     accumulator2.val[0] = vdup_n_s16(0);
516     accumulator2.val[1] = vdup_n_s16(0);
517 
518     for(number = 0; number < quarter_points; ++number)
519         {
520             a_val = vld2_s16((int16_t*)a_ptr); // a0r|a1r|a2r|a3r || a0i|a1i|a2i|a3i
521             b_val = vld2_s16((int16_t*)b_ptr); // b0r|b1r|b2r|b3r || b0i|b1i|b2i|b3i
522             __VOLK_PREFETCH(a_ptr + 8);
523             __VOLK_PREFETCH(b_ptr + 8);
524 
525             // use 2 accumulators to remove inter-instruction data dependencies
526             accumulator1.val[0] = vmla_s16(accumulator1.val[0], a_val.val[0], b_val.val[0]);
527             accumulator2.val[0] = vmls_s16(accumulator2.val[0], a_val.val[1], b_val.val[1]);
528             accumulator1.val[1] = vmla_s16(accumulator1.val[1], a_val.val[0], b_val.val[1]);
529             accumulator2.val[1] = vmla_s16(accumulator2.val[1], a_val.val[1], b_val.val[0]);
530 
531             a_ptr += 4;
532             b_ptr += 4;
533         }
534 
535     accumulator1.val[0] = vqadd_s16(accumulator1.val[0], accumulator2.val[0]);
536     accumulator1.val[1] = vqadd_s16(accumulator1.val[1], accumulator2.val[1]);
537 
538     vst2_s16((int16_t*)accum_result, accumulator1);
539     *out = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
540 
541     // tail case
542     for(number = quarter_points * 4; number < num_points; ++number)
543         {
544             *out += (*a_ptr++) * (*b_ptr++);
545         }
546 }
547 
548 #endif /* LV_HAVE_NEON */
549 
550 #endif /*INCLUDED_volk_16ic_x2_dot_prod_16ic_H*/
551