1 /* Spa
2  *
3  * Copyright © 2019 Wim Taymans
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  * DEALINGS IN THE SOFTWARE.
23  */
24 
25 #include <string.h>
26 #include <stdio.h>
27 #include <math.h>
28 
29 #include <spa/utils/defs.h>
30 
31 #include "mix-ops.h"
32 
33 #include <immintrin.h>
34 
mix_4(float * dst,const float * SPA_RESTRICT src0,const float * SPA_RESTRICT src1,const float * SPA_RESTRICT src2,uint32_t n_samples)35 static inline void mix_4(float * dst,
36 		const float * SPA_RESTRICT src0,
37 		const float * SPA_RESTRICT src1,
38 		const float * SPA_RESTRICT src2,
39 		uint32_t n_samples)
40 {
41 	uint32_t n, unrolled;
42 
43 	if (SPA_IS_ALIGNED(src0, 32) &&
44 	    SPA_IS_ALIGNED(src1, 32) &&
45 	    SPA_IS_ALIGNED(src2, 32) &&
46 	    SPA_IS_ALIGNED(dst, 32))
47 		unrolled = n_samples & ~15;
48 	else
49 		unrolled = 0;
50 
51 	for (n = 0; n < unrolled; n += 16) {
52 		__m256 in1[4], in2[4];
53 
54 		in1[0] = _mm256_load_ps(&dst[n + 0]);
55 		in2[0] = _mm256_load_ps(&dst[n + 8]);
56 		in1[1] = _mm256_load_ps(&src0[n + 0]);
57 		in2[1] = _mm256_load_ps(&src0[n + 8]);
58 		in1[2] = _mm256_load_ps(&src1[n + 0]);
59 		in2[2] = _mm256_load_ps(&src1[n + 8]);
60 		in1[3] = _mm256_load_ps(&src2[n + 0]);
61 		in2[3] = _mm256_load_ps(&src2[n + 8]);
62 
63 		in1[0] = _mm256_add_ps(in1[0], in1[1]);
64 		in2[0] = _mm256_add_ps(in2[0], in2[1]);
65 		in1[2] = _mm256_add_ps(in1[2], in1[3]);
66 		in2[2] = _mm256_add_ps(in2[2], in2[3]);
67 		in1[0] = _mm256_add_ps(in1[0], in1[2]);
68 		in2[0] = _mm256_add_ps(in2[0], in2[2]);
69 
70 		_mm256_store_ps(&dst[n + 0], in1[0]);
71 		_mm256_store_ps(&dst[n + 8], in2[0]);
72 	}
73 	for (; n < n_samples; n++) {
74 		__m128 in[4];
75 		in[0] = _mm_load_ss(&dst[n]),
76 		in[1] = _mm_load_ss(&src0[n]),
77 		in[2] = _mm_load_ss(&src1[n]),
78 		in[3] = _mm_load_ss(&src2[n]),
79 		in[0] = _mm_add_ss(in[0], in[1]);
80 		in[2] = _mm_add_ss(in[2], in[3]);
81 		in[0] = _mm_add_ss(in[0], in[2]);
82 		_mm_store_ss(&dst[n], in[0]);
83 	}
84 }
85 
86 
mix_2(float * dst,const float * SPA_RESTRICT src,uint32_t n_samples)87 static inline void mix_2(float * dst, const float * SPA_RESTRICT src, uint32_t n_samples)
88 {
89 	uint32_t n, unrolled;
90 
91 	if (SPA_IS_ALIGNED(src, 32) &&
92 	    SPA_IS_ALIGNED(dst, 32))
93 		unrolled = n_samples & ~15;
94 	else
95 		unrolled = 0;
96 
97 	for (n = 0; n < unrolled; n += 16) {
98 		__m256 in1[2], in2[2];
99 
100 		in1[0] = _mm256_load_ps(&dst[n + 0]);
101 		in1[1] = _mm256_load_ps(&dst[n + 8]);
102 		in2[0] = _mm256_load_ps(&src[n + 0]);
103 		in2[1] = _mm256_load_ps(&src[n + 8]);
104 
105 		in1[0] = _mm256_add_ps(in1[0], in2[0]);
106 		in1[1] = _mm256_add_ps(in1[1], in2[1]);
107 
108 		_mm256_store_ps(&dst[n + 0], in1[0]);
109 		_mm256_store_ps(&dst[n + 8], in1[1]);
110 	}
111 	for (; n < n_samples; n++) {
112 		__m128 in1[1], in2[1];
113 		in1[0] = _mm_load_ss(&dst[n]),
114 		in2[0] = _mm_load_ss(&src[n]),
115 		in1[0] = _mm_add_ss(in1[0], in2[0]);
116 		_mm_store_ss(&dst[n], in1[0]);
117 	}
118 }
119 
120 void
mix_f32_avx(struct mix_ops * ops,void * SPA_RESTRICT dst,const void * SPA_RESTRICT src[],uint32_t n_src,uint32_t n_samples)121 mix_f32_avx(struct mix_ops *ops, void * SPA_RESTRICT dst, const void * SPA_RESTRICT src[],
122 		uint32_t n_src, uint32_t n_samples)
123 {
124 	uint32_t i;
125 
126 	if (n_src == 0)
127 		memset(dst, 0, n_samples * ops->n_channels * sizeof(float));
128 	else if (dst != src[0])
129 		memcpy(dst, src[0], n_samples * ops->n_channels * sizeof(float));
130 
131 	for (i = 1; i + 2 < n_src; i += 3)
132 		mix_4(dst, src[i], src[i + 1], src[i + 2], n_samples);
133 	for (; i < n_src; i++)
134 		mix_2(dst, src[i], n_samples * ops->n_channels);
135 }
136