1 /*
2  * Copyright (c) 2003, 2007-14 Matteo Frigo
3  * Copyright (c) 2003, 2007-14 Massachusetts Institute of Technology
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
18  *
19  */
20 
21 #if defined(FFTW_LDOUBLE) || defined(FFTW_QUAD)
22 #  error "SSE/SSE2 only works in single/double precision"
23 #endif
24 
25 #ifdef FFTW_SINGLE
26 #  define DS(d,s) s /* single-precision option */
27 #  define SUFF(name) name ## s
28 #else
29 #  define DS(d,s) d /* double-precision option */
30 #  define SUFF(name) name ## d
31 #endif
32 
33 #define SIMD_SUFFIX  _sse2  /* for renaming */
34 #define VL DS(1,2)         /* SIMD vector length, in term of complex numbers */
35 #define SIMD_VSTRIDE_OKA(x) DS(1,((x) == 2))
36 #define SIMD_STRIDE_OKPAIR SIMD_STRIDE_OK
37 
38 #if defined(__GNUC__) && !defined(FFTW_SINGLE) && !defined(__SSE2__)
39 #  error "compiling simd-sse2.h in double precision without -msse2"
40 #elif defined(__GNUC__) && defined(FFTW_SINGLE) && !defined(__SSE__)
41 #  error "compiling simd-sse2.h in single precision without -msse"
42 #endif
43 
44 #ifdef _MSC_VER
45 #ifndef inline
46 #define inline __inline
47 #endif
48 #endif
49 
50 /* some versions of glibc's sys/cdefs.h define __inline to be empty,
51    which is wrong because emmintrin.h defines several inline
52    procedures */
53 #ifndef _MSC_VER
54 #undef __inline
55 #endif
56 
57 #ifdef FFTW_SINGLE
58 #  include <xmmintrin.h>
59 #else
60 #  include <emmintrin.h>
61 #endif
62 
63 typedef DS(__m128d,__m128) V;
64 #define VADD SUFF(_mm_add_p)
65 #define VSUB SUFF(_mm_sub_p)
66 #define VMUL SUFF(_mm_mul_p)
67 #define VXOR SUFF(_mm_xor_p)
68 #define SHUF SUFF(_mm_shuffle_p)
69 #define UNPCKL SUFF(_mm_unpacklo_p)
70 #define UNPCKH SUFF(_mm_unpackhi_p)
71 
72 #define SHUFVALS(fp0,fp1,fp2,fp3) \
73    (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | ((fp0)))
74 
75 #define VDUPL(x) DS(UNPCKL(x, x), SHUF(x, x, SHUFVALS(0, 0, 2, 2)))
76 #define VDUPH(x) DS(UNPCKH(x, x), SHUF(x, x, SHUFVALS(1, 1, 3, 3)))
77 #define STOREH(a, v) DS(_mm_storeh_pd(a, v), _mm_storeh_pi((__m64 *)(a), v))
78 #define STOREL(a, v) DS(_mm_storel_pd(a, v), _mm_storel_pi((__m64 *)(a), v))
79 
80 
81 #ifdef __GNUC__
82   /*
83    * gcc-3.3 generates slow code for mm_set_ps (write all elements to
84    * the stack and load __m128 from the stack).
85    *
86    * gcc-3.[34] generates slow code for mm_set_ps1 (load into low element
87    * and shuffle).
88    *
89    * This hack forces gcc to generate a constant __m128 at compile time.
90    */
91   union rvec {
92        R r[DS(2,4)];
93        V v;
94   };
95 
96 #  ifdef FFTW_SINGLE
97 #    define DVK(var, val) V var = __extension__ ({ \
98          static const union rvec _var = { {val,val,val,val} }; _var.v; })
99 #  else
100 #    define DVK(var, val) V var = __extension__ ({ \
101          static const union rvec _var = { {val,val} }; _var.v; })
102 #  endif
103 #  define LDK(x) x
104 #else
105 #  define DVK(var, val) const R var = K(val)
106 #  define LDK(x) DS(_mm_set1_pd,_mm_set_ps1)(x)
107 #endif
108 
LDA(const R * x,INT ivs,const R * aligned_like)109 static inline V LDA(const R *x, INT ivs, const R *aligned_like)
110 {
111      (void)aligned_like; /* UNUSED */
112      (void)ivs; /* UNUSED */
113      return *(const V *)x;
114 }
115 
STA(R * x,V v,INT ovs,const R * aligned_like)116 static inline void STA(R *x, V v, INT ovs, const R *aligned_like)
117 {
118      (void)aligned_like; /* UNUSED */
119      (void)ovs; /* UNUSED */
120      *(V *)x = v;
121 }
122 
123 #ifdef FFTW_SINGLE
124 
125 #  ifdef _MSC_VER
126      /* Temporarily disable the warning "uninitialized local variable
127 	'name' used" and runtime checks for using a variable before it is
128 	defined which is erroneously triggered by the LOADL0 / LOADH macros
129 	as they only modify VAL partly each. */
130 #    ifndef __INTEL_COMPILER
131 #      pragma warning(disable : 4700)
132 #      pragma runtime_checks("u", off)
133 #    endif
134 #  endif
135 #  ifdef __INTEL_COMPILER
136 #    pragma warning(disable : 592)
137 #  endif
138 
LD(const R * x,INT ivs,const R * aligned_like)139 static inline V LD(const R *x, INT ivs, const R *aligned_like)
140 {
141      V var;
142      (void)aligned_like; /* UNUSED */
143 #  ifdef __GNUC__
144      /* We use inline asm because gcc-3.x generates slow code for
145 	_mm_loadh_pi().  gcc-3.x insists upon having an existing variable for
146 	VAL, which is however never used.  Thus, it generates code to move
147 	values in and out the variable.  Worse still, gcc-4.0 stores VAL on
148 	the stack, causing valgrind to complain about uninitialized reads. */
149      __asm__("movlps %1, %0\n\tmovhps %2, %0"
150 	     : "=x"(var) : "m"(x[0]), "m"(x[ivs]));
151 #  else
152 #    define LOADH(addr, val) _mm_loadh_pi(val, (const __m64 *)(addr))
153 #    define LOADL0(addr, val) _mm_loadl_pi(val, (const __m64 *)(addr))
154      var = LOADL0(x, var);
155      var = LOADH(x + ivs, var);
156 #  endif
157      return var;
158 }
159 
160 #  ifdef _MSC_VER
161 #    ifndef __INTEL_COMPILER
162 #      pragma warning(default : 4700)
163 #      pragma runtime_checks("u", restore)
164 #    endif
165 #  endif
166 #  ifdef __INTEL_COMPILER
167 #    pragma warning(default : 592)
168 #  endif
169 
ST(R * x,V v,INT ovs,const R * aligned_like)170 static inline void ST(R *x, V v, INT ovs, const R *aligned_like)
171 {
172      (void)aligned_like; /* UNUSED */
173      /* WARNING: the extra_iter hack depends upon STOREL occurring
174 	after STOREH */
175      STOREH(x + ovs, v);
176      STOREL(x, v);
177 }
178 
179 #else /* ! FFTW_SINGLE */
180 #  define LD LDA
181 #  define ST STA
182 #endif
183 
184 #define STM2 DS(STA,ST)
185 #define STN2(x, v0, v1, ovs) /* nop */
186 
187 #ifdef FFTW_SINGLE
188 #  define STM4(x, v, ovs, aligned_like) /* no-op */
189 /* STN4 is a macro, not a function, thanks to Visual C++ developers
190    deciding "it would be infrequent that people would want to pass more
191    than 3 [__m128 parameters] by value."  3 parameters ought to be enough
192    for anybody. */
193 #  define STN4(x, v0, v1, v2, v3, ovs)			\
194 {							\
195      V xxx0, xxx1, xxx2, xxx3;				\
196      xxx0 = UNPCKL(v0, v2);				\
197      xxx1 = UNPCKH(v0, v2);				\
198      xxx2 = UNPCKL(v1, v3);				\
199      xxx3 = UNPCKH(v1, v3);				\
200      STA(x, UNPCKL(xxx0, xxx2), 0, 0);			\
201      STA(x + ovs, UNPCKH(xxx0, xxx2), 0, 0);		\
202      STA(x + 2 * ovs, UNPCKL(xxx1, xxx3), 0, 0);	\
203      STA(x + 3 * ovs, UNPCKH(xxx1, xxx3), 0, 0);	\
204 }
205 #else /* !FFTW_SINGLE */
STM4(R * x,V v,INT ovs,const R * aligned_like)206 static inline void STM4(R *x, V v, INT ovs, const R *aligned_like)
207 {
208      (void)aligned_like; /* UNUSED */
209      STOREL(x, v);
210      STOREH(x + ovs, v);
211 }
212 #  define STN4(x, v0, v1, v2, v3, ovs) /* nothing */
213 #endif
214 
FLIP_RI(V x)215 static inline V FLIP_RI(V x)
216 {
217      return SHUF(x, x, DS(1, SHUFVALS(1, 0, 3, 2)));
218 }
219 
VCONJ(V x)220 static inline V VCONJ(V x)
221 {
222      /* This will produce -0.0f (or -0.0d) even on broken
223         compilers that do not distinguish +0.0 from -0.0.
224         I bet some are still around. */
225      union uvec {
226           unsigned u[4];
227           V v;
228      };
229      /* it looks like gcc-3.3.5 produces slow code unless PM is
230         declared static. */
231      static const union uvec pm = {
232 #ifdef FFTW_SINGLE
233           { 0x00000000, 0x80000000, 0x00000000, 0x80000000 }
234 #else
235           { 0x00000000, 0x00000000, 0x00000000, 0x80000000 }
236 #endif
237      };
238      return VXOR(pm.v, x);
239 }
240 
VBYI(V x)241 static inline V VBYI(V x)
242 {
243      x = VCONJ(x);
244      x = FLIP_RI(x);
245      return x;
246 }
247 
248 /* FMA support */
249 #define VFMA(a, b, c) VADD(c, VMUL(a, b))
250 #define VFNMS(a, b, c) VSUB(c, VMUL(a, b))
251 #define VFMS(a, b, c) VSUB(VMUL(a, b), c)
252 #define VFMAI(b, c) VADD(c, VBYI(b))
253 #define VFNMSI(b, c) VSUB(c, VBYI(b))
254 #define VFMACONJ(b,c)  VADD(VCONJ(b),c)
255 #define VFMSCONJ(b,c)  VSUB(VCONJ(b),c)
256 #define VFNMSCONJ(b,c) VSUB(c, VCONJ(b))
257 
VZMUL(V tx,V sr)258 static inline V VZMUL(V tx, V sr)
259 {
260      V tr = VDUPL(tx);
261      V ti = VDUPH(tx);
262      tr = VMUL(sr, tr);
263      sr = VBYI(sr);
264      return VFMA(ti, sr, tr);
265 }
266 
VZMULJ(V tx,V sr)267 static inline V VZMULJ(V tx, V sr)
268 {
269      V tr = VDUPL(tx);
270      V ti = VDUPH(tx);
271      tr = VMUL(sr, tr);
272      sr = VBYI(sr);
273      return VFNMS(ti, sr, tr);
274 }
275 
VZMULI(V tx,V sr)276 static inline V VZMULI(V tx, V sr)
277 {
278      V tr = VDUPL(tx);
279      V ti = VDUPH(tx);
280      ti = VMUL(ti, sr);
281      sr = VBYI(sr);
282      return VFMS(tr, sr, ti);
283 }
284 
VZMULIJ(V tx,V sr)285 static inline V VZMULIJ(V tx, V sr)
286 {
287      V tr = VDUPL(tx);
288      V ti = VDUPH(tx);
289      ti = VMUL(ti, sr);
290      sr = VBYI(sr);
291      return VFMA(tr, sr, ti);
292 }
293 
294 /* twiddle storage #1: compact, slower */
295 #ifdef FFTW_SINGLE
296 #  define VTW1(v,x)  \
297   {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_SIN, v, x}, {TW_SIN, v+1, x}
BYTW1(const R * t,V sr)298 static inline V BYTW1(const R *t, V sr)
299 {
300      const V *twp = (const V *)t;
301      V tx = twp[0];
302      V tr = UNPCKL(tx, tx);
303      V ti = UNPCKH(tx, tx);
304      tr = VMUL(tr, sr);
305      sr = VBYI(sr);
306      return VFMA(ti, sr, tr);
307 }
BYTWJ1(const R * t,V sr)308 static inline V BYTWJ1(const R *t, V sr)
309 {
310      const V *twp = (const V *)t;
311      V tx = twp[0];
312      V tr = UNPCKL(tx, tx);
313      V ti = UNPCKH(tx, tx);
314      tr = VMUL(tr, sr);
315      sr = VBYI(sr);
316      return VFNMS(ti, sr, tr);
317 }
318 #else /* !FFTW_SINGLE */
319 #  define VTW1(v,x) {TW_CEXP, v, x}
BYTW1(const R * t,V sr)320 static inline V BYTW1(const R *t, V sr)
321 {
322      V tx = LD(t, 1, t);
323      return VZMUL(tx, sr);
324 }
BYTWJ1(const R * t,V sr)325 static inline V BYTWJ1(const R *t, V sr)
326 {
327      V tx = LD(t, 1, t);
328      return VZMULJ(tx, sr);
329 }
330 #endif
331 #define TWVL1 (VL)
332 
333 /* twiddle storage #2: twice the space, faster (when in cache) */
334 #ifdef FFTW_SINGLE
335 #  define VTW2(v,x)							\
336   {TW_COS, v, x}, {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_COS, v+1, x},	\
337   {TW_SIN, v, -x}, {TW_SIN, v, x}, {TW_SIN, v+1, -x}, {TW_SIN, v+1, x}
338 #else /* !FFTW_SINGLE */
339 #  define VTW2(v,x)							\
340   {TW_COS, v, x}, {TW_COS, v, x}, {TW_SIN, v, -x}, {TW_SIN, v, x}
341 #endif
342 #define TWVL2 (2 * VL)
BYTW2(const R * t,V sr)343 static inline V BYTW2(const R *t, V sr)
344 {
345      const V *twp = (const V *)t;
346      V si = FLIP_RI(sr);
347      V tr = twp[0], ti = twp[1];
348      return VFMA(tr, sr, VMUL(ti, si));
349 }
BYTWJ2(const R * t,V sr)350 static inline V BYTWJ2(const R *t, V sr)
351 {
352      const V *twp = (const V *)t;
353      V si = FLIP_RI(sr);
354      V tr = twp[0], ti = twp[1];
355      return VFNMS(ti, si, VMUL(tr, sr));
356 }
357 
358 /* twiddle storage #3 */
359 #ifdef FFTW_SINGLE
360 #  define VTW3(v,x) {TW_CEXP, v, x}, {TW_CEXP, v+1, x}
361 #  define TWVL3 (VL)
362 #else
363 #  define VTW3(v,x) VTW1(v,x)
364 #  define TWVL3 TWVL1
365 #endif
366 
367 /* twiddle storage for split arrays */
368 #ifdef FFTW_SINGLE
369 #  define VTWS(v,x)							  \
370     {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_COS, v+2, x}, {TW_COS, v+3, x}, \
371     {TW_SIN, v, x}, {TW_SIN, v+1, x}, {TW_SIN, v+2, x}, {TW_SIN, v+3, x}
372 #else
373 #  define VTWS(v,x)							  \
374     {TW_COS, v, x}, {TW_COS, v+1, x}, {TW_SIN, v, x}, {TW_SIN, v+1, x}
375 #endif
376 #define TWVLS (2 * VL)
377 
378 #define VLEAVE() /* nothing */
379 
380 #include "simd-common.h"
381