1 /*
2  * Copyright (c) 2003, 2007-14 Matteo Frigo
3  * Copyright (c) 2003, 2007-14 Massachusetts Institute of Technology
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
18  *
19  */
20 
21 /* This file was automatically generated --- DO NOT EDIT */
22 /* Generated on Thu Dec 10 07:05:32 EST 2020 */
23 
24 #include "dft/codelet-dft.h"
25 
26 #if defined(ARCH_PREFERS_FMA) || defined(ISA_EXTENSION_PREFERS_FMA)
27 
28 /* Generated by: ../../../genfft/gen_twiddle_c.native -fma -simd -compact -variables 4 -pipeline-latency 8 -n 8 -name t2bv_8 -include dft/simd/t2b.h -sign 1 */
29 
30 /*
31  * This function contains 33 FP additions, 24 FP multiplications,
32  * (or, 23 additions, 14 multiplications, 10 fused multiply/add),
33  * 24 stack variables, 1 constants, and 16 memory accesses
34  */
35 #include "dft/simd/t2b.h"
36 
t2bv_8(R * ri,R * ii,const R * W,stride rs,INT mb,INT me,INT ms)37 static void t2bv_8(R *ri, R *ii, const R *W, stride rs, INT mb, INT me, INT ms)
38 {
39      DVK(KP707106781, +0.707106781186547524400844362104849039284835938);
40      {
41 	  INT m;
42 	  R *x;
43 	  x = ii;
44 	  for (m = mb, W = W + (mb * ((TWVL / VL) * 14)); m < me; m = m + VL, x = x + (VL * ms), W = W + (TWVL * 14), MAKE_VOLATILE_STRIDE(8, rs)) {
45 	       V T4, Tq, Tl, Tr, T9, Tt, Te, Tu, T1, T3, T2;
46 	       T1 = LD(&(x[0]), ms, &(x[0]));
47 	       T2 = LD(&(x[WS(rs, 4)]), ms, &(x[0]));
48 	       T3 = BYTW(&(W[TWVL * 6]), T2);
49 	       T4 = VSUB(T1, T3);
50 	       Tq = VADD(T1, T3);
51 	       {
52 		    V Ti, Tk, Th, Tj;
53 		    Th = LD(&(x[WS(rs, 2)]), ms, &(x[0]));
54 		    Ti = BYTW(&(W[TWVL * 2]), Th);
55 		    Tj = LD(&(x[WS(rs, 6)]), ms, &(x[0]));
56 		    Tk = BYTW(&(W[TWVL * 10]), Tj);
57 		    Tl = VSUB(Ti, Tk);
58 		    Tr = VADD(Ti, Tk);
59 	       }
60 	       {
61 		    V T6, T8, T5, T7;
62 		    T5 = LD(&(x[WS(rs, 1)]), ms, &(x[WS(rs, 1)]));
63 		    T6 = BYTW(&(W[0]), T5);
64 		    T7 = LD(&(x[WS(rs, 5)]), ms, &(x[WS(rs, 1)]));
65 		    T8 = BYTW(&(W[TWVL * 8]), T7);
66 		    T9 = VSUB(T6, T8);
67 		    Tt = VADD(T6, T8);
68 	       }
69 	       {
70 		    V Tb, Td, Ta, Tc;
71 		    Ta = LD(&(x[WS(rs, 7)]), ms, &(x[WS(rs, 1)]));
72 		    Tb = BYTW(&(W[TWVL * 12]), Ta);
73 		    Tc = LD(&(x[WS(rs, 3)]), ms, &(x[WS(rs, 1)]));
74 		    Td = BYTW(&(W[TWVL * 4]), Tc);
75 		    Te = VSUB(Tb, Td);
76 		    Tu = VADD(Tb, Td);
77 	       }
78 	       {
79 		    V Ts, Tv, Tw, Tx;
80 		    Ts = VSUB(Tq, Tr);
81 		    Tv = VSUB(Tt, Tu);
82 		    ST(&(x[WS(rs, 6)]), VFNMSI(Tv, Ts), ms, &(x[0]));
83 		    ST(&(x[WS(rs, 2)]), VFMAI(Tv, Ts), ms, &(x[0]));
84 		    Tw = VADD(Tq, Tr);
85 		    Tx = VADD(Tt, Tu);
86 		    ST(&(x[WS(rs, 4)]), VSUB(Tw, Tx), ms, &(x[0]));
87 		    ST(&(x[0]), VADD(Tw, Tx), ms, &(x[0]));
88 		    {
89 			 V Tg, To, Tn, Tp, Tf, Tm;
90 			 Tf = VADD(T9, Te);
91 			 Tg = VFNMS(LDK(KP707106781), Tf, T4);
92 			 To = VFMA(LDK(KP707106781), Tf, T4);
93 			 Tm = VSUB(T9, Te);
94 			 Tn = VFNMS(LDK(KP707106781), Tm, Tl);
95 			 Tp = VFMA(LDK(KP707106781), Tm, Tl);
96 			 ST(&(x[WS(rs, 3)]), VFNMSI(Tn, Tg), ms, &(x[WS(rs, 1)]));
97 			 ST(&(x[WS(rs, 7)]), VFNMSI(Tp, To), ms, &(x[WS(rs, 1)]));
98 			 ST(&(x[WS(rs, 5)]), VFMAI(Tn, Tg), ms, &(x[WS(rs, 1)]));
99 			 ST(&(x[WS(rs, 1)]), VFMAI(Tp, To), ms, &(x[WS(rs, 1)]));
100 		    }
101 	       }
102 	  }
103      }
104      VLEAVE();
105 }
106 
107 static const tw_instr twinstr[] = {
108      VTW(0, 1),
109      VTW(0, 2),
110      VTW(0, 3),
111      VTW(0, 4),
112      VTW(0, 5),
113      VTW(0, 6),
114      VTW(0, 7),
115      { TW_NEXT, VL, 0 }
116 };
117 
118 static const ct_desc desc = { 8, XSIMD_STRING("t2bv_8"), twinstr, &GENUS, { 23, 14, 10, 0 }, 0, 0, 0 };
119 
XSIMD(codelet_t2bv_8)120 void XSIMD(codelet_t2bv_8) (planner *p) {
121      X(kdft_dit_register) (p, t2bv_8, &desc);
122 }
123 #else
124 
125 /* Generated by: ../../../genfft/gen_twiddle_c.native -simd -compact -variables 4 -pipeline-latency 8 -n 8 -name t2bv_8 -include dft/simd/t2b.h -sign 1 */
126 
127 /*
128  * This function contains 33 FP additions, 16 FP multiplications,
129  * (or, 33 additions, 16 multiplications, 0 fused multiply/add),
130  * 24 stack variables, 1 constants, and 16 memory accesses
131  */
132 #include "dft/simd/t2b.h"
133 
t2bv_8(R * ri,R * ii,const R * W,stride rs,INT mb,INT me,INT ms)134 static void t2bv_8(R *ri, R *ii, const R *W, stride rs, INT mb, INT me, INT ms)
135 {
136      DVK(KP707106781, +0.707106781186547524400844362104849039284835938);
137      {
138 	  INT m;
139 	  R *x;
140 	  x = ii;
141 	  for (m = mb, W = W + (mb * ((TWVL / VL) * 14)); m < me; m = m + VL, x = x + (VL * ms), W = W + (TWVL * 14), MAKE_VOLATILE_STRIDE(8, rs)) {
142 	       V Tl, Tq, Tg, Tr, T5, Tt, Ta, Tu, Ti, Tk, Tj;
143 	       Ti = LD(&(x[0]), ms, &(x[0]));
144 	       Tj = LD(&(x[WS(rs, 4)]), ms, &(x[0]));
145 	       Tk = BYTW(&(W[TWVL * 6]), Tj);
146 	       Tl = VSUB(Ti, Tk);
147 	       Tq = VADD(Ti, Tk);
148 	       {
149 		    V Td, Tf, Tc, Te;
150 		    Tc = LD(&(x[WS(rs, 2)]), ms, &(x[0]));
151 		    Td = BYTW(&(W[TWVL * 2]), Tc);
152 		    Te = LD(&(x[WS(rs, 6)]), ms, &(x[0]));
153 		    Tf = BYTW(&(W[TWVL * 10]), Te);
154 		    Tg = VSUB(Td, Tf);
155 		    Tr = VADD(Td, Tf);
156 	       }
157 	       {
158 		    V T2, T4, T1, T3;
159 		    T1 = LD(&(x[WS(rs, 1)]), ms, &(x[WS(rs, 1)]));
160 		    T2 = BYTW(&(W[0]), T1);
161 		    T3 = LD(&(x[WS(rs, 5)]), ms, &(x[WS(rs, 1)]));
162 		    T4 = BYTW(&(W[TWVL * 8]), T3);
163 		    T5 = VSUB(T2, T4);
164 		    Tt = VADD(T2, T4);
165 	       }
166 	       {
167 		    V T7, T9, T6, T8;
168 		    T6 = LD(&(x[WS(rs, 7)]), ms, &(x[WS(rs, 1)]));
169 		    T7 = BYTW(&(W[TWVL * 12]), T6);
170 		    T8 = LD(&(x[WS(rs, 3)]), ms, &(x[WS(rs, 1)]));
171 		    T9 = BYTW(&(W[TWVL * 4]), T8);
172 		    Ta = VSUB(T7, T9);
173 		    Tu = VADD(T7, T9);
174 	       }
175 	       {
176 		    V Ts, Tv, Tw, Tx;
177 		    Ts = VSUB(Tq, Tr);
178 		    Tv = VBYI(VSUB(Tt, Tu));
179 		    ST(&(x[WS(rs, 6)]), VSUB(Ts, Tv), ms, &(x[0]));
180 		    ST(&(x[WS(rs, 2)]), VADD(Ts, Tv), ms, &(x[0]));
181 		    Tw = VADD(Tq, Tr);
182 		    Tx = VADD(Tt, Tu);
183 		    ST(&(x[WS(rs, 4)]), VSUB(Tw, Tx), ms, &(x[0]));
184 		    ST(&(x[0]), VADD(Tw, Tx), ms, &(x[0]));
185 		    {
186 			 V Th, To, Tn, Tp, Tb, Tm;
187 			 Tb = VMUL(LDK(KP707106781), VSUB(T5, Ta));
188 			 Th = VBYI(VSUB(Tb, Tg));
189 			 To = VBYI(VADD(Tg, Tb));
190 			 Tm = VMUL(LDK(KP707106781), VADD(T5, Ta));
191 			 Tn = VSUB(Tl, Tm);
192 			 Tp = VADD(Tl, Tm);
193 			 ST(&(x[WS(rs, 3)]), VADD(Th, Tn), ms, &(x[WS(rs, 1)]));
194 			 ST(&(x[WS(rs, 7)]), VSUB(Tp, To), ms, &(x[WS(rs, 1)]));
195 			 ST(&(x[WS(rs, 5)]), VSUB(Tn, Th), ms, &(x[WS(rs, 1)]));
196 			 ST(&(x[WS(rs, 1)]), VADD(To, Tp), ms, &(x[WS(rs, 1)]));
197 		    }
198 	       }
199 	  }
200      }
201      VLEAVE();
202 }
203 
204 static const tw_instr twinstr[] = {
205      VTW(0, 1),
206      VTW(0, 2),
207      VTW(0, 3),
208      VTW(0, 4),
209      VTW(0, 5),
210      VTW(0, 6),
211      VTW(0, 7),
212      { TW_NEXT, VL, 0 }
213 };
214 
215 static const ct_desc desc = { 8, XSIMD_STRING("t2bv_8"), twinstr, &GENUS, { 33, 16, 0, 0 }, 0, 0, 0 };
216 
XSIMD(codelet_t2bv_8)217 void XSIMD(codelet_t2bv_8) (planner *p) {
218      X(kdft_dit_register) (p, t2bv_8, &desc);
219 }
220 #endif
221