1 /* Spa
2 *
3 * Copyright © 2019 Wim Taymans
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "resample-native-impl.h"
26
27 #include <arm_neon.h>
28
inner_product_neon(float * d,const float * SPA_RESTRICT s,const float * SPA_RESTRICT taps,uint32_t n_taps)29 static void inner_product_neon(float *d, const float * SPA_RESTRICT s,
30 const float * SPA_RESTRICT taps, uint32_t n_taps)
31 {
32 unsigned int remainder = n_taps % 16;
33 n_taps = n_taps - remainder;
34
35 #ifdef __aarch64__
36 asm volatile(
37 " cmp %[n_taps], #0\n"
38 " bne 1f\n"
39 " ld1 {v4.4s}, [%[taps]], #16\n"
40 " ld1 {v8.4s}, [%[s]], #16\n"
41 " subs %[remainder], %[remainder], #4\n"
42 " fmul v0.4s, v4.4s, v8.4s\n"
43 " bne 4f\n"
44 " b 5f\n"
45 "1:"
46 " ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%[taps]], #64\n"
47 " ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%[s]], #64\n"
48 " subs %[n_taps], %[n_taps], #16\n"
49 " fmul v0.4s, v4.4s, v8.4s\n"
50 " fmul v1.4s, v5.4s, v9.4s\n"
51 " fmul v2.4s, v6.4s, v10.4s\n"
52 " fmul v3.4s, v7.4s, v11.4s\n"
53 " beq 3f\n"
54 "2:"
55 " ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%[taps]], #64\n"
56 " ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%[s]], #64\n"
57 " subs %[n_taps], %[n_taps], #16\n"
58 " fmla v0.4s, v4.4s, v8.4s\n"
59 " fmla v1.4s, v5.4s, v9.4s\n"
60 " fmla v2.4s, v6.4s, v10.4s\n"
61 " fmla v3.4s, v7.4s, v11.4s\n"
62 " bne 2b\n"
63 "3:"
64 " fadd v4.4s, v0.4s, v1.4s\n"
65 " fadd v5.4s, v2.4s, v3.4s\n"
66 " cmp %[remainder], #0\n"
67 " fadd v0.4s, v4.4s, v5.4s\n"
68 " beq 5f\n"
69 "4:"
70 " ld1 {v6.4s}, [%[taps]], #16\n"
71 " ld1 {v10.4s}, [%[s]], #16\n"
72 " subs %[remainder], %[remainder], #4\n"
73 " fmla v0.4s, v6.4s, v10.4s\n"
74 " bne 4b\n"
75 "5:"
76 " faddp v0.4s, v0.4s, v0.4s\n"
77 " faddp v0.2s, v0.2s, v0.2s\n"
78 " str s0, [%[d]]\n"
79 : [d] "+r" (d), [s] "+r" (s), [taps] "+r" (taps),
80 [n_taps] "+r" (n_taps), [remainder] "+r" (remainder)
81 :
82 : "cc", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8",
83 "v9", "v10", "v11");
84 #else
85 asm volatile (
86 " cmp %[n_taps], #0\n"
87 " bne 1f\n"
88 " vld1.32 {q4}, [%[taps] :128]!\n"
89 " vld1.32 {q8}, [%[s]]!\n"
90 " subs %[remainder], %[remainder], #4\n"
91 " vmul.f32 q0, q4, q8\n"
92 " bne 4f\n"
93 " b 5f\n"
94 "1:"
95 " vld1.32 {q4, q5}, [%[taps] :128]!\n"
96 " vld1.32 {q8, q9}, [%[s]]!\n"
97 " vld1.32 {q6, q7}, [%[taps] :128]!\n"
98 " vld1.32 {q10, q11}, [%[s]]!\n"
99 " subs %[n_taps], %[n_taps], #16\n"
100 " vmul.f32 q0, q4, q8\n"
101 " vmul.f32 q1, q5, q9\n"
102 " vmul.f32 q2, q6, q10\n"
103 " vmul.f32 q3, q7, q11\n"
104 " beq 3f\n"
105 "2:"
106 " vld1.32 {q4, q5}, [%[taps] :128]!\n"
107 " vld1.32 {q8, q9}, [%[s]]!\n"
108 " vld1.32 {q6, q7}, [%[taps] :128]!\n"
109 " vld1.32 {q10, q11}, [%[s]]!\n"
110 " subs %[n_taps], %[n_taps], #16\n"
111 " vmla.f32 q0, q4, q8\n"
112 " vmla.f32 q1, q5, q9\n"
113 " vmla.f32 q2, q6, q10\n"
114 " vmla.f32 q3, q7, q11\n"
115 " bne 2b\n"
116 "3:"
117 " vadd.f32 q4, q0, q1\n"
118 " vadd.f32 q5, q2, q3\n"
119 " cmp %[remainder], #0\n"
120 " vadd.f32 q0, q4, q5\n"
121 " beq 5f\n"
122 "4:"
123 " vld1.32 {q6}, [%[taps] :128]!\n"
124 " vld1.32 {q10}, [%[s]]!\n"
125 " subs %[remainder], %[remainder], #4\n"
126 " vmla.f32 q0, q6, q10\n"
127 " bne 4b\n"
128 "5:"
129 " vadd.f32 d0, d0, d1\n"
130 " vpadd.f32 d0, d0, d0\n"
131 " vstr d0, [%[d]]\n"
132 : [d] "+r" (d), [s] "+r" (s), [taps] "+r" (taps),
133 [n_taps] "+l" (n_taps), [remainder] "+l" (remainder)
134 :
135 : "cc", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8",
136 "q9", "q10", "q11");
137 #endif
138 }
139
inner_product_ip_neon(float * d,const float * SPA_RESTRICT s,const float * SPA_RESTRICT t0,const float * SPA_RESTRICT t1,float x,uint32_t n_taps)140 static void inner_product_ip_neon(float *d, const float * SPA_RESTRICT s,
141 const float * SPA_RESTRICT t0, const float * SPA_RESTRICT t1, float x,
142 uint32_t n_taps)
143 {
144 #ifdef __aarch64__
145 asm volatile(
146 " dup v10.4s, %w[x]\n"
147 " ld1 {v4.4s, v5.4s}, [%[t0]], #32\n"
148 " ld1 {v8.4s, v9.4s}, [%[s]], #32\n"
149 " ld1 {v6.4s, v7.4s}, [%[t1]], #32\n"
150 " subs %[n_taps], %[n_taps], #8\n"
151 " fmul v0.4s, v4.4s, v8.4s\n"
152 " fmul v1.4s, v5.4s, v9.4s\n"
153 " fmul v2.4s, v6.4s, v8.4s\n"
154 " fmul v3.4s, v7.4s, v9.4s\n"
155 " beq 3f\n"
156 "2:"
157 " ld1 {v4.4s, v5.4s}, [%[t0]], #32\n"
158 " ld1 {v8.4s, v9.4s}, [%[s]], #32\n"
159 " ld1 {v6.4s, v7.4s}, [%[t1]], #32\n"
160 " subs %[n_taps], %[n_taps], #8\n"
161 " fmla v0.4s, v4.4s, v8.4s\n"
162 " fmla v1.4s, v5.4s, v9.4s\n"
163 " fmla v2.4s, v6.4s, v8.4s\n"
164 " fmla v3.4s, v7.4s, v9.4s\n"
165 " bne 2b\n"
166 "3:"
167 " fadd v0.4s, v0.4s, v1.4s\n" /* sum[0] */
168 " fadd v2.4s, v2.4s, v3.4s\n" /* sum[1] */
169 " fsub v2.4s, v2.4s, v0.4s\n" /* sum[1] -= sum[0] */
170 " fmla v0.4s, v2.4s, v10.4s\n" /* sum[0] += sum[1] * x */
171 " faddp v0.4s, v0.4s, v0.4s\n"
172 " faddp v0.2s, v0.2s, v0.2s\n"
173 " str s0, [%[d]]\n"
174 : [d] "+r" (d), [s] "+r" (s), [t0] "+r" (t0), [t1] "+r" (t1),
175 [n_taps] "+r" (n_taps), [x] "+r" (x)
176 :
177 : "cc", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8",
178 "v9", "v10");
179 #else
180 asm volatile(
181 " vdup.32 q10, %[x]\n"
182 " vld1.32 {q4, q5}, [%[t0] :128]!\n"
183 " vld1.32 {q8, q9}, [%[s]]!\n"
184 " vld1.32 {q6, q7}, [%[t1] :128]!\n"
185 " subs %[n_taps], %[n_taps], #8\n"
186 " vmul.f32 q0, q4, q8\n"
187 " vmul.f32 q1, q5, q9\n"
188 " vmul.f32 q2, q6, q8\n"
189 " vmul.f32 q3, q7, q9\n"
190 " beq 3f\n"
191 "2:"
192 " vld1.32 {q4, q5}, [%[t0] :128]!\n"
193 " vld1.32 {q8, q9}, [%[s]]!\n"
194 " vld1.32 {q6, q7}, [%[t1] :128]!\n"
195 " subs %[n_taps], %[n_taps], #8\n"
196 " vmla.f32 q0, q4, q8\n"
197 " vmla.f32 q1, q5, q9\n"
198 " vmla.f32 q2, q6, q8\n"
199 " vmla.f32 q3, q7, q9\n"
200 " bne 2b\n"
201 "3:"
202 " vadd.f32 q0, q0, q1\n" /* sum[0] */
203 " vadd.f32 q2, q2, q3\n" /* sum[1] */
204 " vsub.f32 q2, q2, q0\n" /* sum[1] -= sum[0] */
205 " vmla.f32 q0, q2, q10\n" /* sum[0] += sum[1] * x */
206 " vadd.f32 d0, d0, d1\n"
207 " vpadd.f32 d0, d0, d0\n"
208 " vstr d0, [%[d]]\n"
209 : [d] "+r" (d), [s] "+r" (s), [t0] "+r" (t0), [t1] "+r" (t1),
210 [n_taps] "+l" (n_taps), [x] "+l" (x)
211 :
212 : "cc", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8",
213 "q9", "q10");
214 #endif
215 }
216
217 MAKE_RESAMPLER_FULL(neon);
218 MAKE_RESAMPLER_INTER(neon);
219