1;******************************************************************************
2;* SIMD-optimized functions for the DCA decoder
3;* Copyright (C) 2016 James Almer
4;*
5;* This file is part of FFmpeg.
6;*
7;* FFmpeg is free software; you can redistribute it and/or
8;* modify it under the terms of the GNU Lesser General Public
9;* License as published by the Free Software Foundation; either
10;* version 2.1 of the License, or (at your option) any later version.
11;*
12;* FFmpeg is distributed in the hope that it will be useful,
13;* but WITHOUT ANY WARRANTY; without even the implied warranty of
14;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15;* Lesser General Public License for more details.
16;*
17;* You should have received a copy of the GNU Lesser General Public
18;* License along with FFmpeg; if not, write to the Free Software
19;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20;******************************************************************************
21
22%include "libavutil/x86/x86util.asm"
23
24SECTION .text
25
26%define sizeof_float 4
27%define FMA3_OFFSET (8 * cpuflag(fma3))
28
29%macro LFE_FIR0_FLOAT 0
30cglobal lfe_fir0_float, 4, 6, 12 + cpuflag(fma3)*4, samples, lfe, coeff, nblocks, cnt1, cnt2
31    shr nblocksd, 1
32    sub     lfeq, 7*sizeof_float
33    mov    cnt1d, 32*sizeof_float
34    mov    cnt2d, 32*sizeof_float-8-FMA3_OFFSET
35    lea   coeffq, [coeffq+cnt1q*8]
36    add samplesq, cnt1q
37    neg    cnt1q
38
39.loop:
40%if cpuflag(avx)
41    cvtdq2ps  m4, [lfeq+16]
42    cvtdq2ps  m5, [lfeq   ]
43    shufps    m7, m4, m4, q0123
44    shufps    m6, m5, m5, q0123
45%elif cpuflag(sse2)
46    movu      m4, [lfeq+16]
47    movu      m5, [lfeq   ]
48    cvtdq2ps  m4, m4
49    cvtdq2ps  m5, m5
50    pshufd    m7, m4, q0123
51    pshufd    m6, m5, q0123
52%else
53    cvtpi2ps  m4, [lfeq+16]
54    cvtpi2ps  m0, [lfeq+24]
55    cvtpi2ps  m5, [lfeq   ]
56    cvtpi2ps  m1, [lfeq+8 ]
57    shufps    m4, m0, q1010
58    shufps    m5, m1, q1010
59    shufps    m7, m4, m4, q0123
60    shufps    m6, m5, m5, q0123
61%endif
62
63.inner_loop:
64%if ARCH_X86_64
65    movaps    m8, [coeffq+cnt1q*8   ]
66    movaps    m9, [coeffq+cnt1q*8+16]
67    movaps   m10, [coeffq+cnt1q*8+32]
68    movaps   m11, [coeffq+cnt1q*8+48]
69%if cpuflag(fma3)
70    movaps   m12, [coeffq+cnt1q*8+64]
71    movaps   m13, [coeffq+cnt1q*8+80]
72    movaps   m14, [coeffq+cnt1q*8+96]
73    movaps   m15, [coeffq+cnt1q*8+112]
74    mulps     m0, m7, m8
75    mulps     m1, m7, m10
76    mulps     m2, m7, m12
77    mulps     m3, m7, m14
78    fmaddps   m0, m6, m9, m0
79    fmaddps   m1, m6, m11, m1
80    fmaddps   m2, m6, m13, m2
81    fmaddps   m3, m6, m15, m3
82
83    haddps    m0, m1
84    haddps    m2, m3
85    haddps    m0, m2
86    movaps [samplesq+cnt1q], m0
87%else
88    mulps     m0, m7, m8
89    mulps     m1, m6, m9
90    mulps     m2, m7, m10
91    mulps     m3, m6, m11
92    addps     m0, m1
93    addps     m2, m3
94
95    unpckhps  m3, m0, m2
96    unpcklps  m0, m2
97    addps     m3, m0
98    movhlps   m2, m3
99    addps     m2, m3
100    movlps [samplesq+cnt1q], m2
101%endif
102%else ; ARCH_X86_32
103%if cpuflag(fma3)
104    mulps     m0, m7, [coeffq+cnt1q*8    ]
105    mulps     m1, m7, [coeffq+cnt1q*8+32 ]
106    mulps     m2, m7, [coeffq+cnt1q*8+64 ]
107    mulps     m3, m7, [coeffq+cnt1q*8+96 ]
108    fmaddps   m0, m6, [coeffq+cnt1q*8+16 ], m0
109    fmaddps   m1, m6, [coeffq+cnt1q*8+48 ], m1
110    fmaddps   m2, m6, [coeffq+cnt1q*8+80 ], m2
111    fmaddps   m3, m6, [coeffq+cnt1q*8+112], m3
112
113    haddps    m0, m1
114    haddps    m2, m3
115    haddps    m0, m2
116    movaps [samplesq+cnt1q], m0
117%else
118    mulps     m0, m7, [coeffq+cnt1q*8   ]
119    mulps     m1, m6, [coeffq+cnt1q*8+16]
120    mulps     m2, m7, [coeffq+cnt1q*8+32]
121    mulps     m3, m6, [coeffq+cnt1q*8+48]
122    addps     m0, m1
123    addps     m2, m3
124
125    unpckhps  m3, m0, m2
126    unpcklps  m0, m2
127    addps     m3, m0
128    movhlps   m2, m3
129    addps     m2, m3
130    movlps [samplesq+cnt1q], m2
131%endif
132%endif; ARCH
133
134%if ARCH_X86_64
135%if cpuflag(fma3)
136    mulps     m8, m5
137    mulps    m10, m5
138    mulps    m12, m5
139    mulps    m14, m5
140    fmaddps   m8, m4, m9, m8
141    fmaddps  m10, m4, m11, m10
142    fmaddps  m12, m4, m13, m12
143    fmaddps  m14, m4, m15, m14
144
145    haddps   m10, m8
146    haddps   m14, m12
147    haddps   m14, m10
148    movaps [samplesq+cnt2q], m14
149%else
150    mulps     m8, m5
151    mulps     m9, m4
152    mulps    m10, m5
153    mulps    m11, m4
154    addps     m8, m9
155    addps    m10, m11
156
157    unpckhps m11, m10, m8
158    unpcklps m10, m8
159    addps    m11, m10
160    movhlps   m8, m11
161    addps     m8, m11
162    movlps [samplesq+cnt2q], m8
163%endif
164%else ; ARCH_X86_32
165%if cpuflag(fma3)
166    mulps     m0, m5, [coeffq+cnt1q*8    ]
167    mulps     m1, m5, [coeffq+cnt1q*8+32 ]
168    mulps     m2, m5, [coeffq+cnt1q*8+64 ]
169    mulps     m3, m5, [coeffq+cnt1q*8+96 ]
170    fmaddps   m0, m4, [coeffq+cnt1q*8+16 ], m0
171    fmaddps   m1, m4, [coeffq+cnt1q*8+48 ], m1
172    fmaddps   m2, m4, [coeffq+cnt1q*8+80 ], m2
173    fmaddps   m3, m4, [coeffq+cnt1q*8+112], m3
174
175    haddps    m1, m0
176    haddps    m3, m2
177    haddps    m3, m1
178    movaps [samplesq+cnt2q], m3
179%else
180    mulps     m0, m5, [coeffq+cnt1q*8   ]
181    mulps     m1, m4, [coeffq+cnt1q*8+16]
182    mulps     m2, m5, [coeffq+cnt1q*8+32]
183    mulps     m3, m4, [coeffq+cnt1q*8+48]
184    addps     m0, m1
185    addps     m2, m3
186
187    unpckhps  m3, m2, m0
188    unpcklps  m2, m0
189    addps     m3, m2
190    movhlps   m0, m3
191    addps     m0, m3
192    movlps [samplesq+cnt2q], m0
193%endif
194%endif; ARCH
195
196    sub    cnt2d, 8 + FMA3_OFFSET
197    add    cnt1q, 8 + FMA3_OFFSET
198    jl .inner_loop
199
200    add     lfeq, 4
201    add samplesq,  64*sizeof_float
202    mov    cnt1q, -32*sizeof_float
203    mov    cnt2d,  32*sizeof_float-8-FMA3_OFFSET
204    sub nblocksd, 1
205    jg .loop
206    RET
207%endmacro
208
209%if ARCH_X86_32
210INIT_XMM sse
211LFE_FIR0_FLOAT
212%endif
213INIT_XMM sse2
214LFE_FIR0_FLOAT
215%if HAVE_AVX_EXTERNAL
216INIT_XMM avx
217LFE_FIR0_FLOAT
218%endif
219%if HAVE_FMA3_EXTERNAL
220INIT_XMM fma3
221LFE_FIR0_FLOAT
222%endif
223
224%macro LFE_FIR1_FLOAT 0
225cglobal lfe_fir1_float, 4, 6, 10, samples, lfe, coeff, nblocks, cnt1, cnt2
226    shr nblocksd, 2
227    sub     lfeq, 3*sizeof_float
228    mov    cnt1d, 64*sizeof_float
229    mov    cnt2d, 64*sizeof_float-16
230    lea   coeffq, [coeffq+cnt1q*4]
231    add samplesq, cnt1q
232    neg    cnt1q
233
234.loop:
235%if cpuflag(avx)
236    cvtdq2ps  m4, [lfeq]
237    shufps    m5, m4, m4, q0123
238%elif cpuflag(sse2)
239    movu      m4, [lfeq]
240    cvtdq2ps  m4, m4
241    pshufd    m5, m4, q0123
242%endif
243
244.inner_loop:
245    movaps    m6, [coeffq+cnt1q*4   ]
246    movaps    m7, [coeffq+cnt1q*4+16]
247    mulps     m0, m5, m6
248    mulps     m1, m5, m7
249%if ARCH_X86_64
250    movaps    m8, [coeffq+cnt1q*4+32]
251    movaps    m9, [coeffq+cnt1q*4+48]
252    mulps     m2, m5, m8
253    mulps     m3, m5, m9
254%else
255    mulps     m2, m5, [coeffq+cnt1q*4+32]
256    mulps     m3, m5, [coeffq+cnt1q*4+48]
257%endif
258
259    haddps    m0, m1
260    haddps    m2, m3
261    haddps    m0, m2
262    movaps [samplesq+cnt1q], m0
263
264    mulps     m6, m4
265    mulps     m7, m4
266%if ARCH_X86_64
267    mulps     m8, m4
268    mulps     m9, m4
269
270    haddps    m6, m7
271    haddps    m8, m9
272    haddps    m6, m8
273%else
274    mulps     m2, m4, [coeffq+cnt1q*4+32]
275    mulps     m3, m4, [coeffq+cnt1q*4+48]
276
277    haddps    m6, m7
278    haddps    m2, m3
279    haddps    m6, m2
280%endif
281    movaps [samplesq+cnt2q], m6
282
283    sub    cnt2d, 16
284    add    cnt1q, 16
285    jl .inner_loop
286
287    add     lfeq, sizeof_float
288    add samplesq, 128*sizeof_float
289    mov    cnt1q, -64*sizeof_float
290    mov    cnt2d,  64*sizeof_float-16
291    sub nblocksd, 1
292    jg .loop
293    RET
294%endmacro
295
296INIT_XMM sse3
297LFE_FIR1_FLOAT
298%if HAVE_AVX_EXTERNAL
299INIT_XMM avx
300LFE_FIR1_FLOAT
301%endif
302