1 /*
2  * Copyright (c) 2003, 2007-14 Matteo Frigo
3  * Copyright (c) 2003, 2007-14 Massachusetts Institute of Technology
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
18  *
19  */
20 
21 /* This file was automatically generated --- DO NOT EDIT */
22 /* Generated on Thu Dec 10 07:05:09 EST 2020 */
23 
24 #include "dft/codelet-dft.h"
25 
26 #if defined(ARCH_PREFERS_FMA) || defined(ISA_EXTENSION_PREFERS_FMA)
27 
28 /* Generated by: ../../../genfft/gen_twiddle_c.native -fma -simd -compact -variables 4 -pipeline-latency 8 -n 8 -name t1fv_8 -include dft/simd/t1f.h */
29 
30 /*
31  * This function contains 33 FP additions, 24 FP multiplications,
32  * (or, 23 additions, 14 multiplications, 10 fused multiply/add),
33  * 24 stack variables, 1 constants, and 16 memory accesses
34  */
35 #include "dft/simd/t1f.h"
36 
t1fv_8(R * ri,R * ii,const R * W,stride rs,INT mb,INT me,INT ms)37 static void t1fv_8(R *ri, R *ii, const R *W, stride rs, INT mb, INT me, INT ms)
38 {
39      DVK(KP707106781, +0.707106781186547524400844362104849039284835938);
40      {
41 	  INT m;
42 	  R *x;
43 	  x = ri;
44 	  for (m = mb, W = W + (mb * ((TWVL / VL) * 14)); m < me; m = m + VL, x = x + (VL * ms), W = W + (TWVL * 14), MAKE_VOLATILE_STRIDE(8, rs)) {
45 	       V T4, Tq, Tl, Tr, T9, Tt, Te, Tu, T1, T3, T2;
46 	       T1 = LD(&(x[0]), ms, &(x[0]));
47 	       T2 = LD(&(x[WS(rs, 4)]), ms, &(x[0]));
48 	       T3 = BYTWJ(&(W[TWVL * 6]), T2);
49 	       T4 = VSUB(T1, T3);
50 	       Tq = VADD(T1, T3);
51 	       {
52 		    V Ti, Tk, Th, Tj;
53 		    Th = LD(&(x[WS(rs, 2)]), ms, &(x[0]));
54 		    Ti = BYTWJ(&(W[TWVL * 2]), Th);
55 		    Tj = LD(&(x[WS(rs, 6)]), ms, &(x[0]));
56 		    Tk = BYTWJ(&(W[TWVL * 10]), Tj);
57 		    Tl = VSUB(Ti, Tk);
58 		    Tr = VADD(Ti, Tk);
59 	       }
60 	       {
61 		    V T6, T8, T5, T7;
62 		    T5 = LD(&(x[WS(rs, 1)]), ms, &(x[WS(rs, 1)]));
63 		    T6 = BYTWJ(&(W[0]), T5);
64 		    T7 = LD(&(x[WS(rs, 5)]), ms, &(x[WS(rs, 1)]));
65 		    T8 = BYTWJ(&(W[TWVL * 8]), T7);
66 		    T9 = VSUB(T6, T8);
67 		    Tt = VADD(T6, T8);
68 	       }
69 	       {
70 		    V Tb, Td, Ta, Tc;
71 		    Ta = LD(&(x[WS(rs, 7)]), ms, &(x[WS(rs, 1)]));
72 		    Tb = BYTWJ(&(W[TWVL * 12]), Ta);
73 		    Tc = LD(&(x[WS(rs, 3)]), ms, &(x[WS(rs, 1)]));
74 		    Td = BYTWJ(&(W[TWVL * 4]), Tc);
75 		    Te = VSUB(Tb, Td);
76 		    Tu = VADD(Tb, Td);
77 	       }
78 	       {
79 		    V Ts, Tv, Tw, Tx;
80 		    Ts = VADD(Tq, Tr);
81 		    Tv = VADD(Tt, Tu);
82 		    ST(&(x[WS(rs, 4)]), VSUB(Ts, Tv), ms, &(x[0]));
83 		    ST(&(x[0]), VADD(Ts, Tv), ms, &(x[0]));
84 		    Tw = VSUB(Tq, Tr);
85 		    Tx = VSUB(Tu, Tt);
86 		    ST(&(x[WS(rs, 6)]), VFNMSI(Tx, Tw), ms, &(x[0]));
87 		    ST(&(x[WS(rs, 2)]), VFMAI(Tx, Tw), ms, &(x[0]));
88 		    {
89 			 V Tg, To, Tn, Tp, Tf, Tm;
90 			 Tf = VADD(T9, Te);
91 			 Tg = VFMA(LDK(KP707106781), Tf, T4);
92 			 To = VFNMS(LDK(KP707106781), Tf, T4);
93 			 Tm = VSUB(Te, T9);
94 			 Tn = VFNMS(LDK(KP707106781), Tm, Tl);
95 			 Tp = VFMA(LDK(KP707106781), Tm, Tl);
96 			 ST(&(x[WS(rs, 1)]), VFNMSI(Tn, Tg), ms, &(x[WS(rs, 1)]));
97 			 ST(&(x[WS(rs, 3)]), VFMAI(Tp, To), ms, &(x[WS(rs, 1)]));
98 			 ST(&(x[WS(rs, 7)]), VFMAI(Tn, Tg), ms, &(x[WS(rs, 1)]));
99 			 ST(&(x[WS(rs, 5)]), VFNMSI(Tp, To), ms, &(x[WS(rs, 1)]));
100 		    }
101 	       }
102 	  }
103      }
104      VLEAVE();
105 }
106 
107 static const tw_instr twinstr[] = {
108      VTW(0, 1),
109      VTW(0, 2),
110      VTW(0, 3),
111      VTW(0, 4),
112      VTW(0, 5),
113      VTW(0, 6),
114      VTW(0, 7),
115      { TW_NEXT, VL, 0 }
116 };
117 
118 static const ct_desc desc = { 8, XSIMD_STRING("t1fv_8"), twinstr, &GENUS, { 23, 14, 10, 0 }, 0, 0, 0 };
119 
XSIMD(codelet_t1fv_8)120 void XSIMD(codelet_t1fv_8) (planner *p) {
121      X(kdft_dit_register) (p, t1fv_8, &desc);
122 }
123 #else
124 
125 /* Generated by: ../../../genfft/gen_twiddle_c.native -simd -compact -variables 4 -pipeline-latency 8 -n 8 -name t1fv_8 -include dft/simd/t1f.h */
126 
127 /*
128  * This function contains 33 FP additions, 16 FP multiplications,
129  * (or, 33 additions, 16 multiplications, 0 fused multiply/add),
130  * 24 stack variables, 1 constants, and 16 memory accesses
131  */
132 #include "dft/simd/t1f.h"
133 
t1fv_8(R * ri,R * ii,const R * W,stride rs,INT mb,INT me,INT ms)134 static void t1fv_8(R *ri, R *ii, const R *W, stride rs, INT mb, INT me, INT ms)
135 {
136      DVK(KP707106781, +0.707106781186547524400844362104849039284835938);
137      {
138 	  INT m;
139 	  R *x;
140 	  x = ri;
141 	  for (m = mb, W = W + (mb * ((TWVL / VL) * 14)); m < me; m = m + VL, x = x + (VL * ms), W = W + (TWVL * 14), MAKE_VOLATILE_STRIDE(8, rs)) {
142 	       V T4, Tq, Tm, Tr, T9, Tt, Te, Tu, T1, T3, T2;
143 	       T1 = LD(&(x[0]), ms, &(x[0]));
144 	       T2 = LD(&(x[WS(rs, 4)]), ms, &(x[0]));
145 	       T3 = BYTWJ(&(W[TWVL * 6]), T2);
146 	       T4 = VSUB(T1, T3);
147 	       Tq = VADD(T1, T3);
148 	       {
149 		    V Tj, Tl, Ti, Tk;
150 		    Ti = LD(&(x[WS(rs, 2)]), ms, &(x[0]));
151 		    Tj = BYTWJ(&(W[TWVL * 2]), Ti);
152 		    Tk = LD(&(x[WS(rs, 6)]), ms, &(x[0]));
153 		    Tl = BYTWJ(&(W[TWVL * 10]), Tk);
154 		    Tm = VSUB(Tj, Tl);
155 		    Tr = VADD(Tj, Tl);
156 	       }
157 	       {
158 		    V T6, T8, T5, T7;
159 		    T5 = LD(&(x[WS(rs, 1)]), ms, &(x[WS(rs, 1)]));
160 		    T6 = BYTWJ(&(W[0]), T5);
161 		    T7 = LD(&(x[WS(rs, 5)]), ms, &(x[WS(rs, 1)]));
162 		    T8 = BYTWJ(&(W[TWVL * 8]), T7);
163 		    T9 = VSUB(T6, T8);
164 		    Tt = VADD(T6, T8);
165 	       }
166 	       {
167 		    V Tb, Td, Ta, Tc;
168 		    Ta = LD(&(x[WS(rs, 7)]), ms, &(x[WS(rs, 1)]));
169 		    Tb = BYTWJ(&(W[TWVL * 12]), Ta);
170 		    Tc = LD(&(x[WS(rs, 3)]), ms, &(x[WS(rs, 1)]));
171 		    Td = BYTWJ(&(W[TWVL * 4]), Tc);
172 		    Te = VSUB(Tb, Td);
173 		    Tu = VADD(Tb, Td);
174 	       }
175 	       {
176 		    V Ts, Tv, Tw, Tx;
177 		    Ts = VADD(Tq, Tr);
178 		    Tv = VADD(Tt, Tu);
179 		    ST(&(x[WS(rs, 4)]), VSUB(Ts, Tv), ms, &(x[0]));
180 		    ST(&(x[0]), VADD(Ts, Tv), ms, &(x[0]));
181 		    Tw = VSUB(Tq, Tr);
182 		    Tx = VBYI(VSUB(Tu, Tt));
183 		    ST(&(x[WS(rs, 6)]), VSUB(Tw, Tx), ms, &(x[0]));
184 		    ST(&(x[WS(rs, 2)]), VADD(Tw, Tx), ms, &(x[0]));
185 		    {
186 			 V Tg, To, Tn, Tp, Tf, Th;
187 			 Tf = VMUL(LDK(KP707106781), VADD(T9, Te));
188 			 Tg = VADD(T4, Tf);
189 			 To = VSUB(T4, Tf);
190 			 Th = VMUL(LDK(KP707106781), VSUB(Te, T9));
191 			 Tn = VBYI(VSUB(Th, Tm));
192 			 Tp = VBYI(VADD(Tm, Th));
193 			 ST(&(x[WS(rs, 7)]), VSUB(Tg, Tn), ms, &(x[WS(rs, 1)]));
194 			 ST(&(x[WS(rs, 3)]), VADD(To, Tp), ms, &(x[WS(rs, 1)]));
195 			 ST(&(x[WS(rs, 1)]), VADD(Tg, Tn), ms, &(x[WS(rs, 1)]));
196 			 ST(&(x[WS(rs, 5)]), VSUB(To, Tp), ms, &(x[WS(rs, 1)]));
197 		    }
198 	       }
199 	  }
200      }
201      VLEAVE();
202 }
203 
204 static const tw_instr twinstr[] = {
205      VTW(0, 1),
206      VTW(0, 2),
207      VTW(0, 3),
208      VTW(0, 4),
209      VTW(0, 5),
210      VTW(0, 6),
211      VTW(0, 7),
212      { TW_NEXT, VL, 0 }
213 };
214 
215 static const ct_desc desc = { 8, XSIMD_STRING("t1fv_8"), twinstr, &GENUS, { 33, 16, 0, 0 }, 0, 0, 0 };
216 
XSIMD(codelet_t1fv_8)217 void XSIMD(codelet_t1fv_8) (planner *p) {
218      X(kdft_dit_register) (p, t1fv_8, &desc);
219 }
220 #endif
221