1 /* Spa
2  *
3  * Copyright © 2018 Wim Taymans
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  * DEALINGS IN THE SOFTWARE.
23  */
24 
25 #include "channelmix-ops.h"
26 
27 void
channelmix_copy_c(struct channelmix * mix,uint32_t n_dst,void * SPA_RESTRICT dst[n_dst],uint32_t n_src,const void * SPA_RESTRICT src[n_src],uint32_t n_samples)28 channelmix_copy_c(struct channelmix *mix, uint32_t n_dst, void * SPA_RESTRICT dst[n_dst],
29 		uint32_t n_src, const void * SPA_RESTRICT src[n_src], uint32_t n_samples)
30 {
31 	uint32_t i, n;
32 	float **d = (float **)dst;
33 	const float **s = (const float **)src;
34 
35 	if (SPA_FLAG_IS_SET(mix->flags, CHANNELMIX_FLAG_ZERO)) {
36 		for (i = 0; i < n_dst; i++)
37 			memset(d[i], 0, n_samples * sizeof(float));
38 	}
39 	else if (SPA_FLAG_IS_SET(mix->flags, CHANNELMIX_FLAG_IDENTITY)) {
40 		for (i = 0; i < n_dst; i++)
41 			spa_memcpy(d[i], s[i], n_samples * sizeof(float));
42 	}
43 	else {
44 		for (i = 0; i < n_dst; i++) {
45 			for (n = 0; n < n_samples; n++)
46 				d[i][n] = s[i][n] * mix->matrix[i][i];
47 		}
48 	}
49 }
50 
51 #define _M(ch)		(1UL << SPA_AUDIO_CHANNEL_ ## ch)
52 
53 void
channelmix_f32_n_m_c(struct channelmix * mix,uint32_t n_dst,void * SPA_RESTRICT dst[n_dst],uint32_t n_src,const void * SPA_RESTRICT src[n_src],uint32_t n_samples)54 channelmix_f32_n_m_c(struct channelmix *mix, uint32_t n_dst, void * SPA_RESTRICT dst[n_dst],
55 		uint32_t n_src, const void * SPA_RESTRICT src[n_src], uint32_t n_samples)
56 {
57 	uint32_t i, j, n;
58 	float **d = (float **) dst;
59 	const float **s = (const float **) src;
60 
61 	if (SPA_FLAG_IS_SET(mix->flags, CHANNELMIX_FLAG_ZERO)) {
62 		for (i = 0; i < n_dst; i++)
63 			memset(d[i], 0, n_samples * sizeof(float));
64 	}
65 	else if (SPA_FLAG_IS_SET(mix->flags, CHANNELMIX_FLAG_COPY)) {
66 		uint32_t copy = SPA_MIN(n_dst, n_src);
67 		for (i = 0; i < copy; i++)
68 			spa_memcpy(d[i], s[i], n_samples * sizeof(float));
69 		for (; i < n_dst; i++)
70 			memset(d[i], 0, n_samples * sizeof(float));
71 	}
72 	else {
73 		for (n = 0; n < n_samples; n++) {
74 			for (i = 0; i < n_dst; i++) {
75 				float sum = 0.0f;
76 				for (j = 0; j < n_src; j++)
77 					sum += s[j][n] * mix->matrix[i][j];
78 				d[i][n] = sum;
79 			}
80 		}
81 		for (i = 0; i < n_dst; i++) {
82 			if (mix->lr4_info[i] > 0)
83 				lr4_process(&mix->lr4[i], d[i], n_samples);
84 		}
85 	}
86 }
87 
88 #define MASK_MONO	_M(FC)|_M(MONO)|_M(UNKNOWN)
89 #define MASK_STEREO	_M(FL)|_M(FR)|_M(UNKNOWN)
90 
91 void
channelmix_f32_1_2_c(struct channelmix * mix,uint32_t n_dst,void * SPA_RESTRICT dst[n_dst],uint32_t n_src,const void * SPA_RESTRICT src[n_src],uint32_t n_samples)92 channelmix_f32_1_2_c(struct channelmix *mix, uint32_t n_dst, void * SPA_RESTRICT dst[n_dst],
93 		uint32_t n_src, const void * SPA_RESTRICT src[n_src], uint32_t n_samples)
94 {
95 	uint32_t n;
96 	float **d = (float **)dst;
97 	const float **s = (const float **)src;
98 	const float v0 = mix->matrix[0][0];
99 	const float v1 = mix->matrix[1][0];
100 
101 	if (SPA_FLAG_IS_SET(mix->flags, CHANNELMIX_FLAG_ZERO)) {
102 		memset(d[0], 0, n_samples * sizeof(float));
103 		memset(d[1], 0, n_samples * sizeof(float));
104 	} else if (SPA_FLAG_IS_SET(mix->flags, CHANNELMIX_FLAG_EQUAL)) {
105 		if (v0 == 1.0f) {
106 			for (n = 0; n < n_samples; n++)
107 				d[0][n] = d[1][n] = s[0][n];
108 		} else {
109 			for (n = 0; n < n_samples; n++)
110 				d[0][n] = d[1][n] = s[0][n] * v0;
111 		}
112 	} else {
113 		for (n = 0; n < n_samples; n++) {
114 			d[0][n] = s[0][n] * v0;
115 			d[1][n] = s[0][n] * v1;
116 		}
117 	}
118 }
119 
120 void
channelmix_f32_2_1_c(struct channelmix * mix,uint32_t n_dst,void * SPA_RESTRICT dst[n_dst],uint32_t n_src,const void * SPA_RESTRICT src[n_src],uint32_t n_samples)121 channelmix_f32_2_1_c(struct channelmix *mix, uint32_t n_dst, void * SPA_RESTRICT dst[n_dst],
122 		   uint32_t n_src, const void * SPA_RESTRICT src[n_src], uint32_t n_samples)
123 {
124 	uint32_t n;
125 	float **d = (float **)dst;
126 	const float **s = (const float **)src;
127 	const float v0 = mix->matrix[0][0];
128 	const float v1 = mix->matrix[0][1];
129 
130 	if (SPA_FLAG_IS_SET(mix->flags, CHANNELMIX_FLAG_ZERO)) {
131 		memset(d[0], 0, n_samples * sizeof(float));
132 	} else if (SPA_FLAG_IS_SET(mix->flags, CHANNELMIX_FLAG_EQUAL)) {
133 		for (n = 0; n < n_samples; n++)
134 			d[0][n] = (s[0][n] + s[1][n]) * v0;
135 	}
136 	else {
137 		for (n = 0; n < n_samples; n++)
138 			d[0][n] = s[0][n] * v0 + s[1][n] * v1;
139 	}
140 }
141 
142 void
channelmix_f32_4_1_c(struct channelmix * mix,uint32_t n_dst,void * SPA_RESTRICT dst[n_dst],uint32_t n_src,const void * SPA_RESTRICT src[n_src],uint32_t n_samples)143 channelmix_f32_4_1_c(struct channelmix *mix, uint32_t n_dst, void * SPA_RESTRICT dst[n_dst],
144 		   uint32_t n_src, const void * SPA_RESTRICT src[n_src], uint32_t n_samples)
145 {
146 	uint32_t n;
147 	float **d = (float **)dst;
148 	const float **s = (const float **)src;
149 	const float v0 = mix->matrix[0][0];
150 	const float v1 = mix->matrix[0][1];
151 	const float v2 = mix->matrix[0][2];
152 	const float v3 = mix->matrix[0][3];
153 
154 	if (SPA_FLAG_IS_SET(mix->flags, CHANNELMIX_FLAG_ZERO)) {
155 		memset(d[0], 0, n_samples * sizeof(float));
156 	}
157 	else if (SPA_FLAG_IS_SET(mix->flags, CHANNELMIX_FLAG_EQUAL)) {
158 		for (n = 0; n < n_samples; n++)
159 			d[0][n] = (s[0][n] + s[1][n] + s[2][n] + s[3][n]) * v0;
160 	}
161 	else {
162 		for (n = 0; n < n_samples; n++)
163 			d[0][n] = s[0][n] * v0 + s[1][n] * v1 +
164 				s[2][n] * v2 + s[3][n] * v3;
165 	}
166 }
167 
168 void
channelmix_f32_3p1_1_c(struct channelmix * mix,uint32_t n_dst,void * SPA_RESTRICT dst[n_dst],uint32_t n_src,const void * SPA_RESTRICT src[n_src],uint32_t n_samples)169 channelmix_f32_3p1_1_c(struct channelmix *mix, uint32_t n_dst, void * SPA_RESTRICT dst[n_dst],
170 		   uint32_t n_src, const void * SPA_RESTRICT src[n_src], uint32_t n_samples)
171 {
172 	uint32_t n;
173 	float **d = (float **)dst;
174 	const float **s = (const float **)src;
175 	const float v0 = mix->matrix[0][0];
176 	const float v1 = mix->matrix[0][1];
177 	const float v2 = mix->matrix[0][2];
178 
179 	if (SPA_FLAG_IS_SET(mix->flags, CHANNELMIX_FLAG_ZERO)) {
180 		memset(d[0], 0, n_samples * sizeof(float));
181 	}
182 	else if (SPA_FLAG_IS_SET(mix->flags, CHANNELMIX_FLAG_EQUAL)) {
183 		for (n = 0; n < n_samples; n++)
184 			d[0][n] = (s[0][n] + s[1][n] + s[2][n] + s[3][n]) * v0;
185 	}
186 	else {
187 		for (n = 0; n < n_samples; n++)
188 			d[0][n] = s[0][n] * v0 + s[1][n] * v1 + s[2][n] * v2;
189 	}
190 }
191 
192 
193 #define MASK_QUAD	_M(FL)|_M(FR)|_M(RL)|_M(RR)|_M(UNKNOWN)
194 
195 void
channelmix_f32_2_4_c(struct channelmix * mix,uint32_t n_dst,void * SPA_RESTRICT dst[n_dst],uint32_t n_src,const void * SPA_RESTRICT src[n_src],uint32_t n_samples)196 channelmix_f32_2_4_c(struct channelmix *mix, uint32_t n_dst, void * SPA_RESTRICT dst[n_dst],
197 		   uint32_t n_src, const void * SPA_RESTRICT src[n_src], uint32_t n_samples)
198 {
199 	uint32_t i, n;
200 	float **d = (float **)dst;
201 	const float **s = (const float **)src;
202 	const float v0 = mix->matrix[0][0];
203 	const float v1 = mix->matrix[1][1];
204 	const float v2 = mix->matrix[2][0];
205 	const float v3 = mix->matrix[3][1];
206 
207 	if (SPA_FLAG_IS_SET(mix->flags, CHANNELMIX_FLAG_ZERO)) {
208 		for (i = 0; i < n_dst; i++)
209 			memset(d[i], 0, n_samples * sizeof(float));
210 	}
211 	else if (v0 == v2 && v1 == v3) {
212 		if (v0 == 1.0f && v1 == 1.0f) {
213 			for (n = 0; n < n_samples; n++) {
214 				d[0][n] = d[2][n] = s[0][n];
215 				d[1][n] = d[3][n] = s[1][n];
216 			}
217 		} else {
218 			for (n = 0; n < n_samples; n++) {
219 				d[0][n] = d[2][n] = s[0][n] * v0;
220 				d[1][n] = d[3][n] = s[1][n] * v1;
221 			}
222 		}
223 	}
224 	else {
225 		for (n = 0; n < n_samples; n++) {
226 			d[0][n] = s[0][n] * v0;
227 			d[1][n] = s[1][n] * v1;
228 			d[2][n] = s[0][n] * v2;
229 			d[3][n] = s[1][n] * v3;
230 		}
231 	}
232 }
233 
234 #define MASK_3_1	_M(FL)|_M(FR)|_M(FC)|_M(LFE)
235 void
channelmix_f32_2_3p1_c(struct channelmix * mix,uint32_t n_dst,void * SPA_RESTRICT dst[n_dst],uint32_t n_src,const void * SPA_RESTRICT src[n_src],uint32_t n_samples)236 channelmix_f32_2_3p1_c(struct channelmix *mix, uint32_t n_dst, void * SPA_RESTRICT dst[n_dst],
237 		   uint32_t n_src, const void * SPA_RESTRICT src[n_src], uint32_t n_samples)
238 {
239 	uint32_t i, n;
240 	float **d = (float **)dst;
241 	const float **s = (const float **)src;
242 	const float v0 = mix->matrix[0][0];
243 	const float v1 = mix->matrix[1][1];
244 	const float v2 = (mix->matrix[2][0] + mix->matrix[2][1]) * 0.5f;
245 	const float v3 = (mix->matrix[3][0] + mix->matrix[3][1]) * 0.5f;
246 
247 	if (SPA_FLAG_IS_SET(mix->flags, CHANNELMIX_FLAG_ZERO)) {
248 		for (i = 0; i < n_dst; i++)
249 			memset(d[i], 0, n_samples * sizeof(float));
250 	}
251 	else if (v0 == 1.0f && v1 == 1.0f) {
252 		for (n = 0; n < n_samples; n++) {
253 			float c = s[0][n] + s[1][n];
254 			d[0][n] = s[0][n];
255 			d[1][n] = s[1][n];
256 			d[2][n] = c * v2;
257 			d[3][n] = c * v3;
258 		}
259 		if (v3 > 0.0f)
260 			lr4_process(&mix->lr4[3], d[3], n_samples);
261 	}
262 	else {
263 		for (n = 0; n < n_samples; n++) {
264 			float c = s[0][n] + s[1][n];
265 			d[0][n] = s[0][n] * v0;
266 			d[1][n] = s[1][n] * v1;
267 			d[2][n] = c * v2;
268 			d[3][n] = c * v3;
269 		}
270 		if (v3 > 0.0f)
271 			lr4_process(&mix->lr4[3], d[3], n_samples);
272 	}
273 }
274 
275 #define MASK_5_1	_M(FL)|_M(FR)|_M(FC)|_M(LFE)|_M(SL)|_M(SR)|_M(RL)|_M(RR)
276 void
channelmix_f32_2_5p1_c(struct channelmix * mix,uint32_t n_dst,void * SPA_RESTRICT dst[n_dst],uint32_t n_src,const void * SPA_RESTRICT src[n_src],uint32_t n_samples)277 channelmix_f32_2_5p1_c(struct channelmix *mix, uint32_t n_dst, void * SPA_RESTRICT dst[n_dst],
278 		   uint32_t n_src, const void * SPA_RESTRICT src[n_src], uint32_t n_samples)
279 {
280 	uint32_t i, n;
281 	float **d = (float **)dst;
282 	const float **s = (const float **)src;
283 	const float v0 = mix->matrix[0][0];
284 	const float v1 = mix->matrix[1][1];
285 	const float v2 = (mix->matrix[2][0] + mix->matrix[2][1]) * 0.5f;
286 	const float v3 = (mix->matrix[3][0] + mix->matrix[3][1]) * 0.5f;
287 	const float v4 = mix->matrix[4][0];
288 	const float v5 = mix->matrix[5][1];
289 
290 	if (SPA_FLAG_IS_SET(mix->flags, CHANNELMIX_FLAG_ZERO)) {
291 		for (i = 0; i < n_dst; i++)
292 			memset(d[i], 0, n_samples * sizeof(float));
293 	}
294 	else if (v0 == 1.0f && v1 == 1.0f && v4 == 1.0f && v5 == 1.0f) {
295 		for (n = 0; n < n_samples; n++) {
296 			float c = s[0][n] + s[1][n];
297 			d[0][n] = d[4][n] = s[0][n];
298 			d[1][n] = d[5][n] = s[1][n];
299 			d[2][n] = c * v2;
300 			d[3][n] = c * v3;
301 		}
302 		if (v3 > 0.0f)
303 			lr4_process(&mix->lr4[3], d[3], n_samples);
304 	}
305 	else {
306 		for (n = 0; n < n_samples; n++) {
307 			float c = s[0][n] + s[1][n];
308 			d[0][n] = s[0][n] * v0;
309 			d[1][n] = s[1][n] * v1;
310 			d[2][n] = c * v2;
311 			d[3][n] = c * v3;
312 			d[4][n] = s[0][n] * v4;
313 			d[5][n] = s[1][n] * v5;
314 		}
315 		if (v3 > 0.0f)
316 			lr4_process(&mix->lr4[3], d[3], n_samples);
317 	}
318 }
319 
320 /* FL+FR+FC+LFE+SL+SR -> FL+FR */
321 void
channelmix_f32_5p1_2_c(struct channelmix * mix,uint32_t n_dst,void * SPA_RESTRICT dst[n_dst],uint32_t n_src,const void * SPA_RESTRICT src[n_src],uint32_t n_samples)322 channelmix_f32_5p1_2_c(struct channelmix *mix, uint32_t n_dst, void * SPA_RESTRICT dst[n_dst],
323 		   uint32_t n_src, const void * SPA_RESTRICT src[n_src], uint32_t n_samples)
324 {
325 	uint32_t n;
326 	float **d = (float **) dst;
327 	const float **s = (const float **) src;
328 	const float v0 = mix->matrix[0][0];
329 	const float v1 = mix->matrix[1][1];
330 	const float clev = (mix->matrix[0][2] + mix->matrix[1][2]) * 0.5f;
331 	const float llev = (mix->matrix[0][3] + mix->matrix[1][3]) * 0.5f;
332 	const float slev0 = mix->matrix[0][4];
333 	const float slev1 = mix->matrix[1][5];
334 
335 	if (SPA_FLAG_IS_SET(mix->flags, CHANNELMIX_FLAG_ZERO)) {
336 		memset(d[0], 0, n_samples * sizeof(float));
337 		memset(d[1], 0, n_samples * sizeof(float));
338 	}
339 	else {
340 		for (n = 0; n < n_samples; n++) {
341 			const float ctr = clev * s[2][n] + llev * s[3][n];
342 			d[0][n] = s[0][n] * v0 + ctr + (slev0 * s[4][n]);
343 			d[1][n] = s[1][n] * v1 + ctr + (slev1 * s[5][n]);
344 		}
345 	}
346 }
347 
348 /* FL+FR+FC+LFE+SL+SR -> FL+FR+FC+LFE*/
349 void
channelmix_f32_5p1_3p1_c(struct channelmix * mix,uint32_t n_dst,void * SPA_RESTRICT dst[n_dst],uint32_t n_src,const void * SPA_RESTRICT src[n_src],uint32_t n_samples)350 channelmix_f32_5p1_3p1_c(struct channelmix *mix, uint32_t n_dst, void * SPA_RESTRICT dst[n_dst],
351 		   uint32_t n_src, const void * SPA_RESTRICT src[n_src], uint32_t n_samples)
352 {
353 	uint32_t i, n;
354 	float **d = (float **) dst;
355 	const float **s = (const float **) src;
356 	const float v0 = mix->matrix[0][0];
357 	const float v1 = mix->matrix[1][1];
358 	const float v2 = mix->matrix[2][2];
359 	const float v3 = mix->matrix[3][3];
360 	const float v4 = mix->matrix[0][4];
361 	const float v5 = mix->matrix[1][5];
362 
363 	if (SPA_FLAG_IS_SET(mix->flags, CHANNELMIX_FLAG_ZERO)) {
364 		for (i = 0; i < n_dst; i++)
365 			memset(d[i], 0, n_samples * sizeof(float));
366 	}
367 	else {
368 		for (n = 0; n < n_samples; n++) {
369 			d[0][n] = s[0][n] * v0 + s[4][n] * v4;
370 			d[1][n] = s[1][n] * v1 + s[5][n] * v5;
371 			d[2][n] = s[2][n] * v2;
372 			d[3][n] = s[3][n] * v3;
373 		}
374 	}
375 }
376 
377 /* FL+FR+FC+LFE+SL+SR -> FL+FR+RL+RR*/
378 void
channelmix_f32_5p1_4_c(struct channelmix * mix,uint32_t n_dst,void * SPA_RESTRICT dst[n_dst],uint32_t n_src,const void * SPA_RESTRICT src[n_src],uint32_t n_samples)379 channelmix_f32_5p1_4_c(struct channelmix *mix, uint32_t n_dst, void * SPA_RESTRICT dst[n_dst],
380 		   uint32_t n_src, const void * SPA_RESTRICT src[n_src], uint32_t n_samples)
381 {
382 	uint32_t i, n;
383 	float **d = (float **) dst;
384 	const float **s = (const float **) src;
385 	const float clev = mix->matrix[0][2];
386 	const float llev = mix->matrix[0][3];
387 	const float v0 = mix->matrix[0][0];
388 	const float v1 = mix->matrix[1][1];
389 	const float v4 = mix->matrix[2][4];
390 	const float v5 = mix->matrix[3][5];
391 
392 	if (SPA_FLAG_IS_SET(mix->flags, CHANNELMIX_FLAG_ZERO)) {
393 		for (i = 0; i < n_dst; i++)
394 			memset(d[i], 0, n_samples * sizeof(float));
395 	}
396 	else {
397 		for (n = 0; n < n_samples; n++) {
398 			const float ctr = s[2][n] * clev + s[3][n] * llev;
399 			d[0][n] = s[0][n] * v0 + ctr;
400 			d[1][n] = s[1][n] * v1 + ctr;
401 			d[2][n] = s[4][n] * v4;
402 			d[3][n] = s[5][n] * v5;
403 		}
404 	}
405 }
406 
407 #define MASK_7_1	_M(FL)|_M(FR)|_M(FC)|_M(LFE)|_M(SL)|_M(SR)|_M(RL)|_M(RR)
408 
409 /* FL+FR+FC+LFE+SL+SR+RL+RR -> FL+FR */
410 void
channelmix_f32_7p1_2_c(struct channelmix * mix,uint32_t n_dst,void * SPA_RESTRICT dst[n_dst],uint32_t n_src,const void * SPA_RESTRICT src[n_src],uint32_t n_samples)411 channelmix_f32_7p1_2_c(struct channelmix *mix, uint32_t n_dst, void * SPA_RESTRICT dst[n_dst],
412 		   uint32_t n_src, const void * SPA_RESTRICT src[n_src], uint32_t n_samples)
413 {
414 	uint32_t n;
415 	float **d = (float **) dst;
416 	const float **s = (const float **) src;
417 	const float v0 = mix->matrix[0][0];
418 	const float v1 = mix->matrix[1][1];
419 	const float clev = (mix->matrix[0][2] + mix->matrix[1][2]) * 0.5f;
420 	const float llev = (mix->matrix[0][3] + mix->matrix[1][3]) * 0.5f;
421 	const float slev0 = mix->matrix[0][4];
422 	const float slev1 = mix->matrix[1][5];
423 	const float rlev0 = mix->matrix[0][6];
424 	const float rlev1 = mix->matrix[1][7];
425 
426 	if (SPA_FLAG_IS_SET(mix->flags, CHANNELMIX_FLAG_ZERO)) {
427 		memset(d[0], 0, n_samples * sizeof(float));
428 		memset(d[1], 0, n_samples * sizeof(float));
429 	}
430 	else {
431 		for (n = 0; n < n_samples; n++) {
432 			const float ctr = clev * s[2][n] + llev * s[3][n];
433 			d[0][n] = s[0][n] * v0 + ctr + s[4][n] * slev0 + s[6][n] * rlev0;
434 			d[1][n] = s[1][n] * v1 + ctr + s[5][n] * slev1 + s[7][n] * rlev1;
435 		}
436 	}
437 }
438 
439 /* FL+FR+FC+LFE+SL+SR+RL+RR -> FL+FR+FC+LFE*/
440 void
channelmix_f32_7p1_3p1_c(struct channelmix * mix,uint32_t n_dst,void * SPA_RESTRICT dst[n_dst],uint32_t n_src,const void * SPA_RESTRICT src[n_src],uint32_t n_samples)441 channelmix_f32_7p1_3p1_c(struct channelmix *mix, uint32_t n_dst, void * SPA_RESTRICT dst[n_dst],
442 		   uint32_t n_src, const void * SPA_RESTRICT src[n_src], uint32_t n_samples)
443 {
444 	uint32_t i, n;
445 	float **d = (float **) dst;
446 	const float **s = (const float **) src;
447 	const float v0 = mix->matrix[0][0];
448 	const float v1 = mix->matrix[1][1];
449 	const float v2 = mix->matrix[2][2];
450 	const float v3 = mix->matrix[3][3];
451 	const float v4 = (mix->matrix[0][4] + mix->matrix[0][6]) * 0.5f;
452 	const float v5 = (mix->matrix[1][5] + mix->matrix[1][7]) * 0.5f;
453 
454 	if (SPA_FLAG_IS_SET(mix->flags, CHANNELMIX_FLAG_ZERO)) {
455 		for (i = 0; i < n_dst; i++)
456 			memset(d[i], 0, n_samples * sizeof(float));
457 	}
458 	else {
459 		for (n = 0; n < n_samples; n++) {
460 			d[0][n] = s[0][n] * v0 + (s[4][n] + s[6][n]) * v4;
461 			d[1][n] = s[1][n] * v1 + (s[5][n] + s[7][n]) * v5;
462 			d[2][n] = s[2][n] * v2;
463 			d[3][n] = s[3][n] * v3;
464 		}
465 	}
466 }
467 
468 /* FL+FR+FC+LFE+SL+SR+RL+RR -> FL+FR+RL+RR*/
469 void
channelmix_f32_7p1_4_c(struct channelmix * mix,uint32_t n_dst,void * SPA_RESTRICT dst[n_dst],uint32_t n_src,const void * SPA_RESTRICT src[n_src],uint32_t n_samples)470 channelmix_f32_7p1_4_c(struct channelmix *mix, uint32_t n_dst, void * SPA_RESTRICT dst[n_dst],
471 		   uint32_t n_src, const void * SPA_RESTRICT src[n_src], uint32_t n_samples)
472 {
473 	uint32_t i, n;
474 	float **d = (float **) dst;
475 	const float **s = (const float **) src;
476 	const float v0 = mix->matrix[0][0];
477 	const float v1 = mix->matrix[1][1];
478 	const float clev = (mix->matrix[0][2] + mix->matrix[1][2]) * 0.5f;
479 	const float llev = (mix->matrix[0][3] + mix->matrix[1][3]) * 0.5f;
480 	const float slev0 = mix->matrix[2][4];
481 	const float slev1 = mix->matrix[3][5];
482 	const float rlev0 = mix->matrix[2][6];
483 	const float rlev1 = mix->matrix[3][7];
484 
485 	if (SPA_FLAG_IS_SET(mix->flags, CHANNELMIX_FLAG_ZERO)) {
486 		for (i = 0; i < n_dst; i++)
487 			memset(d[i], 0, n_samples * sizeof(float));
488 	}
489 	else {
490 		for (n = 0; n < n_samples; n++) {
491 			const float ctr = s[2][n] * clev + s[3][n] * llev;
492 			const float sl = s[4][n] * slev0;
493 			const float sr = s[5][n] * slev1;
494 			d[0][n] = s[0][n] * v0 + ctr + sl;
495 			d[1][n] = s[1][n] * v1 + ctr + sr;
496 			d[2][n] = s[6][n] * rlev0 + sl;
497 			d[3][n] = s[7][n] * rlev1 + sr;
498 		}
499 	}
500 }
501