1 /*
2  * Copyright (c) 2003, 2007-14 Matteo Frigo
3  * Copyright (c) 2003, 2007-14 Massachusetts Institute of Technology
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
18  *
19  */
20 
21 /* This file was automatically generated --- DO NOT EDIT */
22 /* Generated on Thu Dec 10 07:04:53 EST 2020 */
23 
24 #include "dft/codelet-dft.h"
25 
26 #if defined(ARCH_PREFERS_FMA) || defined(ISA_EXTENSION_PREFERS_FMA)
27 
28 /* Generated by: ../../../genfft/gen_notw_c.native -fma -simd -compact -variables 4 -pipeline-latency 8 -n 12 -name n2fv_12 -with-ostride 2 -include dft/simd/n2f.h -store-multiple 2 */
29 
30 /*
31  * This function contains 48 FP additions, 20 FP multiplications,
32  * (or, 30 additions, 2 multiplications, 18 fused multiply/add),
33  * 33 stack variables, 2 constants, and 30 memory accesses
34  */
35 #include "dft/simd/n2f.h"
36 
n2fv_12(const R * ri,const R * ii,R * ro,R * io,stride is,stride os,INT v,INT ivs,INT ovs)37 static void n2fv_12(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs)
38 {
39      DVK(KP866025403, +0.866025403784438646763723170752936183471402627);
40      DVK(KP500000000, +0.500000000000000000000000000000000000000000000);
41      {
42 	  INT i;
43 	  const R *xi;
44 	  R *xo;
45 	  xi = ri;
46 	  xo = ro;
47 	  for (i = v; i > 0; i = i - VL, xi = xi + (VL * ivs), xo = xo + (VL * ovs), MAKE_VOLATILE_STRIDE(24, is), MAKE_VOLATILE_STRIDE(24, os)) {
48 	       V T5, Ta, TG, TF, TB, Tt, Ti, Tm, TJ, TI, TA, Tp;
49 	       {
50 		    V T1, T6, T4, Tr, T9, Ts;
51 		    T1 = LD(&(xi[0]), ivs, &(xi[0]));
52 		    T6 = LD(&(xi[WS(is, 6)]), ivs, &(xi[0]));
53 		    {
54 			 V T2, T3, T7, T8;
55 			 T2 = LD(&(xi[WS(is, 4)]), ivs, &(xi[0]));
56 			 T3 = LD(&(xi[WS(is, 8)]), ivs, &(xi[0]));
57 			 T4 = VADD(T2, T3);
58 			 Tr = VSUB(T3, T2);
59 			 T7 = LD(&(xi[WS(is, 10)]), ivs, &(xi[0]));
60 			 T8 = LD(&(xi[WS(is, 2)]), ivs, &(xi[0]));
61 			 T9 = VADD(T7, T8);
62 			 Ts = VSUB(T8, T7);
63 		    }
64 		    T5 = VFNMS(LDK(KP500000000), T4, T1);
65 		    Ta = VFNMS(LDK(KP500000000), T9, T6);
66 		    TG = VADD(T6, T9);
67 		    TF = VADD(T1, T4);
68 		    TB = VADD(Tr, Ts);
69 		    Tt = VSUB(Tr, Ts);
70 	       }
71 	       {
72 		    V Tk, Tn, Te, Tl, Th, To;
73 		    Tk = LD(&(xi[WS(is, 3)]), ivs, &(xi[WS(is, 1)]));
74 		    Tn = LD(&(xi[WS(is, 9)]), ivs, &(xi[WS(is, 1)]));
75 		    {
76 			 V Tc, Td, Tf, Tg;
77 			 Tc = LD(&(xi[WS(is, 11)]), ivs, &(xi[WS(is, 1)]));
78 			 Td = LD(&(xi[WS(is, 7)]), ivs, &(xi[WS(is, 1)]));
79 			 Te = VSUB(Tc, Td);
80 			 Tl = VADD(Td, Tc);
81 			 Tf = LD(&(xi[WS(is, 1)]), ivs, &(xi[WS(is, 1)]));
82 			 Tg = LD(&(xi[WS(is, 5)]), ivs, &(xi[WS(is, 1)]));
83 			 Th = VSUB(Tf, Tg);
84 			 To = VADD(Tf, Tg);
85 		    }
86 		    Ti = VADD(Te, Th);
87 		    Tm = VFNMS(LDK(KP500000000), Tl, Tk);
88 		    TJ = VADD(Tn, To);
89 		    TI = VADD(Tk, Tl);
90 		    TA = VSUB(Te, Th);
91 		    Tp = VFNMS(LDK(KP500000000), To, Tn);
92 	       }
93 	       {
94 		    V TN, TO, TP, TQ, TT, TU;
95 		    {
96 			 V TH, TK, TL, TM;
97 			 TH = VSUB(TF, TG);
98 			 TK = VSUB(TI, TJ);
99 			 TN = VFNMSI(TK, TH);
100 			 STM2(&(xo[18]), TN, ovs, &(xo[2]));
101 			 TO = VFMAI(TK, TH);
102 			 STM2(&(xo[6]), TO, ovs, &(xo[2]));
103 			 TL = VADD(TF, TG);
104 			 TM = VADD(TI, TJ);
105 			 TP = VSUB(TL, TM);
106 			 STM2(&(xo[12]), TP, ovs, &(xo[0]));
107 			 TQ = VADD(TL, TM);
108 			 STM2(&(xo[0]), TQ, ovs, &(xo[0]));
109 		    }
110 		    {
111 			 V Tj, Tv, Tu, Tw, Tb, Tq, TR, TS;
112 			 Tb = VSUB(T5, Ta);
113 			 Tj = VFMA(LDK(KP866025403), Ti, Tb);
114 			 Tv = VFNMS(LDK(KP866025403), Ti, Tb);
115 			 Tq = VSUB(Tm, Tp);
116 			 Tu = VFNMS(LDK(KP866025403), Tt, Tq);
117 			 Tw = VFMA(LDK(KP866025403), Tt, Tq);
118 			 TR = VFNMSI(Tu, Tj);
119 			 STM2(&(xo[2]), TR, ovs, &(xo[2]));
120 			 STN2(&(xo[0]), TQ, TR, ovs);
121 			 TS = VFMAI(Tw, Tv);
122 			 STM2(&(xo[14]), TS, ovs, &(xo[2]));
123 			 STN2(&(xo[12]), TP, TS, ovs);
124 			 TT = VFMAI(Tu, Tj);
125 			 STM2(&(xo[22]), TT, ovs, &(xo[2]));
126 			 TU = VFNMSI(Tw, Tv);
127 			 STM2(&(xo[10]), TU, ovs, &(xo[2]));
128 		    }
129 		    {
130 			 V TC, TE, Tz, TD, Tx, Ty;
131 			 TC = VMUL(LDK(KP866025403), VSUB(TA, TB));
132 			 TE = VMUL(LDK(KP866025403), VADD(TB, TA));
133 			 Tx = VADD(T5, Ta);
134 			 Ty = VADD(Tm, Tp);
135 			 Tz = VSUB(Tx, Ty);
136 			 TD = VADD(Tx, Ty);
137 			 {
138 			      V TV, TW, TX, TY;
139 			      TV = VFMAI(TC, Tz);
140 			      STM2(&(xo[4]), TV, ovs, &(xo[0]));
141 			      STN2(&(xo[4]), TV, TO, ovs);
142 			      TW = VFNMSI(TE, TD);
143 			      STM2(&(xo[16]), TW, ovs, &(xo[0]));
144 			      STN2(&(xo[16]), TW, TN, ovs);
145 			      TX = VFNMSI(TC, Tz);
146 			      STM2(&(xo[20]), TX, ovs, &(xo[0]));
147 			      STN2(&(xo[20]), TX, TT, ovs);
148 			      TY = VFMAI(TE, TD);
149 			      STM2(&(xo[8]), TY, ovs, &(xo[0]));
150 			      STN2(&(xo[8]), TY, TU, ovs);
151 			 }
152 		    }
153 	       }
154 	  }
155      }
156      VLEAVE();
157 }
158 
159 static const kdft_desc desc = { 12, XSIMD_STRING("n2fv_12"), { 30, 2, 18, 0 }, &GENUS, 0, 2, 0, 0 };
160 
XSIMD(codelet_n2fv_12)161 void XSIMD(codelet_n2fv_12) (planner *p) { X(kdft_register) (p, n2fv_12, &desc);
162 }
163 
164 #else
165 
166 /* Generated by: ../../../genfft/gen_notw_c.native -simd -compact -variables 4 -pipeline-latency 8 -n 12 -name n2fv_12 -with-ostride 2 -include dft/simd/n2f.h -store-multiple 2 */
167 
168 /*
169  * This function contains 48 FP additions, 8 FP multiplications,
170  * (or, 44 additions, 4 multiplications, 4 fused multiply/add),
171  * 33 stack variables, 2 constants, and 30 memory accesses
172  */
173 #include "dft/simd/n2f.h"
174 
n2fv_12(const R * ri,const R * ii,R * ro,R * io,stride is,stride os,INT v,INT ivs,INT ovs)175 static void n2fv_12(const R *ri, const R *ii, R *ro, R *io, stride is, stride os, INT v, INT ivs, INT ovs)
176 {
177      DVK(KP500000000, +0.500000000000000000000000000000000000000000000);
178      DVK(KP866025403, +0.866025403784438646763723170752936183471402627);
179      {
180 	  INT i;
181 	  const R *xi;
182 	  R *xo;
183 	  xi = ri;
184 	  xo = ro;
185 	  for (i = v; i > 0; i = i - VL, xi = xi + (VL * ivs), xo = xo + (VL * ovs), MAKE_VOLATILE_STRIDE(24, is), MAKE_VOLATILE_STRIDE(24, os)) {
186 	       V T5, Ta, TJ, Ty, Tq, Tp, Tg, Tl, TI, TA, Tz, Tu;
187 	       {
188 		    V T1, T6, T4, Tw, T9, Tx;
189 		    T1 = LD(&(xi[0]), ivs, &(xi[0]));
190 		    T6 = LD(&(xi[WS(is, 6)]), ivs, &(xi[0]));
191 		    {
192 			 V T2, T3, T7, T8;
193 			 T2 = LD(&(xi[WS(is, 4)]), ivs, &(xi[0]));
194 			 T3 = LD(&(xi[WS(is, 8)]), ivs, &(xi[0]));
195 			 T4 = VADD(T2, T3);
196 			 Tw = VSUB(T3, T2);
197 			 T7 = LD(&(xi[WS(is, 10)]), ivs, &(xi[0]));
198 			 T8 = LD(&(xi[WS(is, 2)]), ivs, &(xi[0]));
199 			 T9 = VADD(T7, T8);
200 			 Tx = VSUB(T8, T7);
201 		    }
202 		    T5 = VADD(T1, T4);
203 		    Ta = VADD(T6, T9);
204 		    TJ = VADD(Tw, Tx);
205 		    Ty = VMUL(LDK(KP866025403), VSUB(Tw, Tx));
206 		    Tq = VFNMS(LDK(KP500000000), T9, T6);
207 		    Tp = VFNMS(LDK(KP500000000), T4, T1);
208 	       }
209 	       {
210 		    V Tc, Th, Tf, Ts, Tk, Tt;
211 		    Tc = LD(&(xi[WS(is, 3)]), ivs, &(xi[WS(is, 1)]));
212 		    Th = LD(&(xi[WS(is, 9)]), ivs, &(xi[WS(is, 1)]));
213 		    {
214 			 V Td, Te, Ti, Tj;
215 			 Td = LD(&(xi[WS(is, 7)]), ivs, &(xi[WS(is, 1)]));
216 			 Te = LD(&(xi[WS(is, 11)]), ivs, &(xi[WS(is, 1)]));
217 			 Tf = VADD(Td, Te);
218 			 Ts = VSUB(Te, Td);
219 			 Ti = LD(&(xi[WS(is, 1)]), ivs, &(xi[WS(is, 1)]));
220 			 Tj = LD(&(xi[WS(is, 5)]), ivs, &(xi[WS(is, 1)]));
221 			 Tk = VADD(Ti, Tj);
222 			 Tt = VSUB(Tj, Ti);
223 		    }
224 		    Tg = VADD(Tc, Tf);
225 		    Tl = VADD(Th, Tk);
226 		    TI = VADD(Ts, Tt);
227 		    TA = VFNMS(LDK(KP500000000), Tk, Th);
228 		    Tz = VFNMS(LDK(KP500000000), Tf, Tc);
229 		    Tu = VMUL(LDK(KP866025403), VSUB(Ts, Tt));
230 	       }
231 	       {
232 		    V TN, TO, TP, TQ, TR, TS;
233 		    {
234 			 V Tb, Tm, Tn, To;
235 			 Tb = VSUB(T5, Ta);
236 			 Tm = VBYI(VSUB(Tg, Tl));
237 			 TN = VSUB(Tb, Tm);
238 			 STM2(&(xo[18]), TN, ovs, &(xo[2]));
239 			 TO = VADD(Tb, Tm);
240 			 STM2(&(xo[6]), TO, ovs, &(xo[2]));
241 			 Tn = VADD(T5, Ta);
242 			 To = VADD(Tg, Tl);
243 			 TP = VSUB(Tn, To);
244 			 STM2(&(xo[12]), TP, ovs, &(xo[0]));
245 			 TQ = VADD(Tn, To);
246 			 STM2(&(xo[0]), TQ, ovs, &(xo[0]));
247 		    }
248 		    {
249 			 V Tv, TE, TC, TD, Tr, TB, TT, TU;
250 			 Tr = VSUB(Tp, Tq);
251 			 Tv = VSUB(Tr, Tu);
252 			 TE = VADD(Tr, Tu);
253 			 TB = VSUB(Tz, TA);
254 			 TC = VBYI(VADD(Ty, TB));
255 			 TD = VBYI(VSUB(Ty, TB));
256 			 TR = VSUB(Tv, TC);
257 			 STM2(&(xo[10]), TR, ovs, &(xo[2]));
258 			 TS = VSUB(TE, TD);
259 			 STM2(&(xo[22]), TS, ovs, &(xo[2]));
260 			 TT = VADD(TC, Tv);
261 			 STM2(&(xo[14]), TT, ovs, &(xo[2]));
262 			 STN2(&(xo[12]), TP, TT, ovs);
263 			 TU = VADD(TD, TE);
264 			 STM2(&(xo[2]), TU, ovs, &(xo[2]));
265 			 STN2(&(xo[0]), TQ, TU, ovs);
266 		    }
267 		    {
268 			 V TK, TM, TH, TL, TF, TG;
269 			 TK = VBYI(VMUL(LDK(KP866025403), VSUB(TI, TJ)));
270 			 TM = VBYI(VMUL(LDK(KP866025403), VADD(TJ, TI)));
271 			 TF = VADD(Tp, Tq);
272 			 TG = VADD(Tz, TA);
273 			 TH = VSUB(TF, TG);
274 			 TL = VADD(TF, TG);
275 			 {
276 			      V TV, TW, TX, TY;
277 			      TV = VSUB(TH, TK);
278 			      STM2(&(xo[20]), TV, ovs, &(xo[0]));
279 			      STN2(&(xo[20]), TV, TS, ovs);
280 			      TW = VADD(TL, TM);
281 			      STM2(&(xo[8]), TW, ovs, &(xo[0]));
282 			      STN2(&(xo[8]), TW, TR, ovs);
283 			      TX = VADD(TH, TK);
284 			      STM2(&(xo[4]), TX, ovs, &(xo[0]));
285 			      STN2(&(xo[4]), TX, TO, ovs);
286 			      TY = VSUB(TL, TM);
287 			      STM2(&(xo[16]), TY, ovs, &(xo[0]));
288 			      STN2(&(xo[16]), TY, TN, ovs);
289 			 }
290 		    }
291 	       }
292 	  }
293      }
294      VLEAVE();
295 }
296 
297 static const kdft_desc desc = { 12, XSIMD_STRING("n2fv_12"), { 44, 4, 4, 0 }, &GENUS, 0, 2, 0, 0 };
298 
XSIMD(codelet_n2fv_12)299 void XSIMD(codelet_n2fv_12) (planner *p) { X(kdft_register) (p, n2fv_12, &desc);
300 }
301 
302 #endif
303