1 /*********************************************************************/
2 /* Copyright 2009, 2010 The University of Texas at Austin.           */
3 /* All rights reserved.                                              */
4 /*                                                                   */
5 /* Redistribution and use in source and binary forms, with or        */
6 /* without modification, are permitted provided that the following   */
7 /* conditions are met:                                               */
8 /*                                                                   */
9 /*   1. Redistributions of source code must retain the above         */
10 /*      copyright notice, this list of conditions and the following  */
11 /*      disclaimer.                                                  */
12 /*                                                                   */
13 /*   2. Redistributions in binary form must reproduce the above      */
14 /*      copyright notice, this list of conditions and the following  */
15 /*      disclaimer in the documentation and/or other materials       */
16 /*      provided with the distribution.                              */
17 /*                                                                   */
18 /*    THIS  SOFTWARE IS PROVIDED  BY THE  UNIVERSITY OF  TEXAS AT    */
19 /*    AUSTIN  ``AS IS''  AND ANY  EXPRESS OR  IMPLIED WARRANTIES,    */
20 /*    INCLUDING, BUT  NOT LIMITED  TO, THE IMPLIED  WARRANTIES OF    */
21 /*    MERCHANTABILITY  AND FITNESS FOR  A PARTICULAR  PURPOSE ARE    */
22 /*    DISCLAIMED.  IN  NO EVENT SHALL THE UNIVERSITY  OF TEXAS AT    */
23 /*    AUSTIN OR CONTRIBUTORS BE  LIABLE FOR ANY DIRECT, INDIRECT,    */
24 /*    INCIDENTAL,  SPECIAL, EXEMPLARY,  OR  CONSEQUENTIAL DAMAGES    */
25 /*    (INCLUDING, BUT  NOT LIMITED TO,  PROCUREMENT OF SUBSTITUTE    */
26 /*    GOODS  OR  SERVICES; LOSS  OF  USE,  DATA,  OR PROFITS;  OR    */
27 /*    BUSINESS INTERRUPTION) HOWEVER CAUSED  AND ON ANY THEORY OF    */
28 /*    LIABILITY, WHETHER  IN CONTRACT, STRICT  LIABILITY, OR TORT    */
29 /*    (INCLUDING NEGLIGENCE OR OTHERWISE)  ARISING IN ANY WAY OUT    */
30 /*    OF  THE  USE OF  THIS  SOFTWARE,  EVEN  IF ADVISED  OF  THE    */
31 /*    POSSIBILITY OF SUCH DAMAGE.                                    */
32 /*                                                                   */
33 /* The views and conclusions contained in the software and           */
34 /* documentation are those of the authors and should not be          */
35 /* interpreted as representing official policies, either expressed   */
36 /* or implied, of The University of Texas at Austin.                 */
37 /*********************************************************************/
38 
39 #include "common.h"
40 #include <altivec.h>
41 
42 static FLOAT dm1 = -1.;
43 
44 #ifdef CONJ
45 #define GEMM_KERNEL   GEMM_KERNEL_L
46 #else
47 #define GEMM_KERNEL   GEMM_KERNEL_N
48 #endif
49 
50 #if GEMM_DEFAULT_UNROLL_M == 1
51 #define GEMM_UNROLL_M_SHIFT 0
52 #endif
53 
54 #if GEMM_DEFAULT_UNROLL_M == 2
55 #define GEMM_UNROLL_M_SHIFT 1
56 #endif
57 
58 #if GEMM_DEFAULT_UNROLL_M == 4
59 #define GEMM_UNROLL_M_SHIFT 2
60 #endif
61 
62 #if GEMM_DEFAULT_UNROLL_M == 6
63 #define GEMM_UNROLL_M_SHIFT 2
64 #endif
65 
66 #if GEMM_DEFAULT_UNROLL_M == 8
67 #define GEMM_UNROLL_M_SHIFT 3
68 #endif
69 
70 #if GEMM_DEFAULT_UNROLL_M == 16
71 #define GEMM_UNROLL_M_SHIFT 4
72 #endif
73 
74 #if GEMM_DEFAULT_UNROLL_N == 1
75 #define GEMM_UNROLL_N_SHIFT 0
76 #endif
77 
78 #if GEMM_DEFAULT_UNROLL_N == 2
79 #define GEMM_UNROLL_N_SHIFT 1
80 #endif
81 
82 #if GEMM_DEFAULT_UNROLL_N == 4
83 #define GEMM_UNROLL_N_SHIFT 2
84 #endif
85 
86 #if GEMM_DEFAULT_UNROLL_N == 8
87 #define GEMM_UNROLL_N_SHIFT 3
88 #endif
89 
90 #if GEMM_DEFAULT_UNROLL_N == 16
91 #define GEMM_UNROLL_N_SHIFT 4
92 #endif
93 
94 #ifndef COMPLEX
95 
96 #ifdef DOUBLE
97 
solve8x8(FLOAT * a,FLOAT * b,FLOAT * c,BLASLONG ldc)98 static inline __attribute__ ((always_inline)) void solve8x8(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) {
99    FLOAT *c0, *c1, *c2, *c3, *c4, *c5, *c6, *c7;
100    c0 = &c[0*ldc];
101    c1 = &c[1*ldc];
102    c2 = &c[2*ldc];
103    c3 = &c[3*ldc];
104    c4 = &c[4*ldc];
105    c5 = &c[5*ldc];
106    c6 = &c[6*ldc];
107    c7 = &c[7*ldc];
108    vector FLOAT *Va = (vector FLOAT *) a;
109    vector FLOAT *Vb = (vector FLOAT *) b;
110    vector FLOAT *Vc0 = (vector FLOAT *) c0;
111    vector FLOAT *Vc1 = (vector FLOAT *) c1;
112    vector FLOAT *Vc2 = (vector FLOAT *) c2;
113    vector FLOAT *Vc3 = (vector FLOAT *) c3;
114    vector FLOAT *Vc4 = (vector FLOAT *) c4;
115    vector FLOAT *Vc5 = (vector FLOAT *) c5;
116    vector FLOAT *Vc6 = (vector FLOAT *) c6;
117    vector FLOAT *Vc7 = (vector FLOAT *) c7;
118    vector FLOAT VbS0, VbS1, VbS2, VbS3, VbS4, VbS5, VbS6, VbS7;
119 
120    b[0] = (c0[0] *= a[0]);
121    b[1] = (c1[0] *= a[0]);
122    b[2] = (c2[0] *= a[0]);
123    b[3] = (c3[0] *= a[0]);
124    b[4] = (c4[0] *= a[0]);
125    b[5] = (c5[0] *= a[0]);
126    b[6] = (c6[0] *= a[0]);
127    b[7] = (c7[0] *= a[0]);
128    VbS0 = vec_splat(Vb[0], 0);
129    VbS1 = vec_splat(Vb[0], 1);
130    VbS2 = vec_splat(Vb[1], 0);
131    VbS3 = vec_splat(Vb[1], 1);
132    VbS4 = vec_splat(Vb[2], 0);
133    VbS5 = vec_splat(Vb[2], 1);
134    VbS6 = vec_splat(Vb[3], 0);
135    VbS7 = vec_splat(Vb[3], 1);
136    Vc0[1] = vec_nmsub(VbS0, Va[1], Vc0[1]);
137    Vc0[2] = vec_nmsub(VbS0, Va[2], Vc0[2]);
138    Vc0[3] = vec_nmsub(VbS0, Va[3], Vc0[3]);
139    Vc1[1] = vec_nmsub(VbS1, Va[1], Vc1[1]);
140    Vc1[2] = vec_nmsub(VbS1, Va[2], Vc1[2]);
141    Vc1[3] = vec_nmsub(VbS1, Va[3], Vc1[3]);
142    Vc2[1] = vec_nmsub(VbS2, Va[1], Vc2[1]);
143    Vc2[2] = vec_nmsub(VbS2, Va[2], Vc2[2]);
144    Vc2[3] = vec_nmsub(VbS2, Va[3], Vc2[3]);
145    Vc3[1] = vec_nmsub(VbS3, Va[1], Vc3[1]);
146    Vc3[2] = vec_nmsub(VbS3, Va[2], Vc3[2]);
147    Vc3[3] = vec_nmsub(VbS3, Va[3], Vc3[3]);
148    Vc4[1] = vec_nmsub(VbS4, Va[1], Vc4[1]);
149    Vc4[2] = vec_nmsub(VbS4, Va[2], Vc4[2]);
150    Vc4[3] = vec_nmsub(VbS4, Va[3], Vc4[3]);
151    Vc5[1] = vec_nmsub(VbS5, Va[1], Vc5[1]);
152    Vc5[2] = vec_nmsub(VbS5, Va[2], Vc5[2]);
153    Vc5[3] = vec_nmsub(VbS5, Va[3], Vc5[3]);
154    Vc6[1] = vec_nmsub(VbS6, Va[1], Vc6[1]);
155    Vc6[2] = vec_nmsub(VbS6, Va[2], Vc6[2]);
156    Vc6[3] = vec_nmsub(VbS6, Va[3], Vc6[3]);
157    Vc7[1] = vec_nmsub(VbS7, Va[1], Vc7[1]);
158    Vc7[2] = vec_nmsub(VbS7, Va[2], Vc7[2]);
159    Vc7[3] = vec_nmsub(VbS7, Va[3], Vc7[3]);
160    c0[1] -= c0[0] * a[1];
161    c1[1] -= c1[0] * a[1];
162    c2[1] -= c2[0] * a[1];
163    c3[1] -= c3[0] * a[1];
164    c4[1] -= c4[0] * a[1];
165    c5[1] -= c5[0] * a[1];
166    c6[1] -= c6[0] * a[1];
167    c7[1] -= c7[0] * a[1];
168 
169    b[ 8] = (c0[1] *= a[9]);
170    b[ 9] = (c1[1] *= a[9]);
171    b[10] = (c2[1] *= a[9]);
172    b[11] = (c3[1] *= a[9]);
173    b[12] = (c4[1] *= a[9]);
174    b[13] = (c5[1] *= a[9]);
175    b[14] = (c6[1] *= a[9]);
176    b[15] = (c7[1] *= a[9]);
177    VbS0 = vec_splat(Vb[4], 0);
178    VbS1 = vec_splat(Vb[4], 1);
179    VbS2 = vec_splat(Vb[5], 0);
180    VbS3 = vec_splat(Vb[5], 1);
181    VbS4 = vec_splat(Vb[6], 0);
182    VbS5 = vec_splat(Vb[6], 1);
183    VbS6 = vec_splat(Vb[7], 0);
184    VbS7 = vec_splat(Vb[7], 1);
185    Vc0[1] = vec_nmsub(VbS0, Va[5], Vc0[1]);
186    Vc0[2] = vec_nmsub(VbS0, Va[6], Vc0[2]);
187    Vc0[3] = vec_nmsub(VbS0, Va[7], Vc0[3]);
188    Vc1[1] = vec_nmsub(VbS1, Va[5], Vc1[1]);
189    Vc1[2] = vec_nmsub(VbS1, Va[6], Vc1[2]);
190    Vc1[3] = vec_nmsub(VbS1, Va[7], Vc1[3]);
191    Vc2[1] = vec_nmsub(VbS2, Va[5], Vc2[1]);
192    Vc2[2] = vec_nmsub(VbS2, Va[6], Vc2[2]);
193    Vc2[3] = vec_nmsub(VbS2, Va[7], Vc2[3]);
194    Vc3[1] = vec_nmsub(VbS3, Va[5], Vc3[1]);
195    Vc3[2] = vec_nmsub(VbS3, Va[6], Vc3[2]);
196    Vc3[3] = vec_nmsub(VbS3, Va[7], Vc3[3]);
197    Vc4[1] = vec_nmsub(VbS4, Va[5], Vc4[1]);
198    Vc4[2] = vec_nmsub(VbS4, Va[6], Vc4[2]);
199    Vc4[3] = vec_nmsub(VbS4, Va[7], Vc4[3]);
200    Vc5[1] = vec_nmsub(VbS5, Va[5], Vc5[1]);
201    Vc5[2] = vec_nmsub(VbS5, Va[6], Vc5[2]);
202    Vc5[3] = vec_nmsub(VbS5, Va[7], Vc5[3]);
203    Vc6[1] = vec_nmsub(VbS6, Va[5], Vc6[1]);
204    Vc6[2] = vec_nmsub(VbS6, Va[6], Vc6[2]);
205    Vc6[3] = vec_nmsub(VbS6, Va[7], Vc6[3]);
206    Vc7[1] = vec_nmsub(VbS7, Va[5], Vc7[1]);
207    Vc7[2] = vec_nmsub(VbS7, Va[6], Vc7[2]);
208    Vc7[3] = vec_nmsub(VbS7, Va[7], Vc7[3]);
209 
210    b[16] = (c0[2] *= a[18]);
211    b[17] = (c1[2] *= a[18]);
212    b[18] = (c2[2] *= a[18]);
213    b[19] = (c3[2] *= a[18]);
214    b[20] = (c4[2] *= a[18]);
215    b[21] = (c5[2] *= a[18]);
216    b[22] = (c6[2] *= a[18]);
217    b[23] = (c7[2] *= a[18]);
218    VbS0 = vec_splat(Vb[ 8], 0);
219    VbS1 = vec_splat(Vb[ 8], 1);
220    VbS2 = vec_splat(Vb[ 9], 0);
221    VbS3 = vec_splat(Vb[ 9], 1);
222    VbS4 = vec_splat(Vb[10], 0);
223    VbS5 = vec_splat(Vb[10], 1);
224    VbS6 = vec_splat(Vb[11], 0);
225    VbS7 = vec_splat(Vb[11], 1);
226    Vc0[2] = vec_nmsub(VbS0, Va[10], Vc0[2]);
227    Vc0[3] = vec_nmsub(VbS0, Va[11], Vc0[3]);
228    Vc1[2] = vec_nmsub(VbS1, Va[10], Vc1[2]);
229    Vc1[3] = vec_nmsub(VbS1, Va[11], Vc1[3]);
230    Vc2[2] = vec_nmsub(VbS2, Va[10], Vc2[2]);
231    Vc2[3] = vec_nmsub(VbS2, Va[11], Vc2[3]);
232    Vc3[2] = vec_nmsub(VbS3, Va[10], Vc3[2]);
233    Vc3[3] = vec_nmsub(VbS3, Va[11], Vc3[3]);
234    Vc4[2] = vec_nmsub(VbS4, Va[10], Vc4[2]);
235    Vc4[3] = vec_nmsub(VbS4, Va[11], Vc4[3]);
236    Vc5[2] = vec_nmsub(VbS5, Va[10], Vc5[2]);
237    Vc5[3] = vec_nmsub(VbS5, Va[11], Vc5[3]);
238    Vc6[2] = vec_nmsub(VbS6, Va[10], Vc6[2]);
239    Vc6[3] = vec_nmsub(VbS6, Va[11], Vc6[3]);
240    Vc7[2] = vec_nmsub(VbS7, Va[10], Vc7[2]);
241    Vc7[3] = vec_nmsub(VbS7, Va[11], Vc7[3]);
242    c0[3] -= c0[2] * a[19];
243    c1[3] -= c1[2] * a[19];
244    c2[3] -= c2[2] * a[19];
245    c3[3] -= c3[2] * a[19];
246    c4[3] -= c4[2] * a[19];
247    c5[3] -= c5[2] * a[19];
248    c6[3] -= c6[2] * a[19];
249    c7[3] -= c7[2] * a[19];
250 
251    b[24] = (c0[3] *= a[27]);
252    b[25] = (c1[3] *= a[27]);
253    b[26] = (c2[3] *= a[27]);
254    b[27] = (c3[3] *= a[27]);
255    b[28] = (c4[3] *= a[27]);
256    b[29] = (c5[3] *= a[27]);
257    b[30] = (c6[3] *= a[27]);
258    b[31] = (c7[3] *= a[27]);
259    VbS0 = vec_splat(Vb[12], 0);
260    VbS1 = vec_splat(Vb[12], 1);
261    VbS2 = vec_splat(Vb[13], 0);
262    VbS3 = vec_splat(Vb[13], 1);
263    VbS4 = vec_splat(Vb[14], 0);
264    VbS5 = vec_splat(Vb[14], 1);
265    VbS6 = vec_splat(Vb[15], 0);
266    VbS7 = vec_splat(Vb[15], 1);
267    Vc0[2] = vec_nmsub(VbS0, Va[14], Vc0[2]);
268    Vc0[3] = vec_nmsub(VbS0, Va[15], Vc0[3]);
269    Vc1[2] = vec_nmsub(VbS1, Va[14], Vc1[2]);
270    Vc1[3] = vec_nmsub(VbS1, Va[15], Vc1[3]);
271    Vc2[2] = vec_nmsub(VbS2, Va[14], Vc2[2]);
272    Vc2[3] = vec_nmsub(VbS2, Va[15], Vc2[3]);
273    Vc3[2] = vec_nmsub(VbS3, Va[14], Vc3[2]);
274    Vc3[3] = vec_nmsub(VbS3, Va[15], Vc3[3]);
275    Vc4[2] = vec_nmsub(VbS4, Va[14], Vc4[2]);
276    Vc4[3] = vec_nmsub(VbS4, Va[15], Vc4[3]);
277    Vc5[2] = vec_nmsub(VbS5, Va[14], Vc5[2]);
278    Vc5[3] = vec_nmsub(VbS5, Va[15], Vc5[3]);
279    Vc6[2] = vec_nmsub(VbS6, Va[14], Vc6[2]);
280    Vc6[3] = vec_nmsub(VbS6, Va[15], Vc6[3]);
281    Vc7[2] = vec_nmsub(VbS7, Va[14], Vc7[2]);
282    Vc7[3] = vec_nmsub(VbS7, Va[15], Vc7[3]);
283 
284    b[32] = (c0[4] *= a[36]);
285    b[33] = (c1[4] *= a[36]);
286    b[34] = (c2[4] *= a[36]);
287    b[35] = (c3[4] *= a[36]);
288    b[36] = (c4[4] *= a[36]);
289    b[37] = (c5[4] *= a[36]);
290    b[38] = (c6[4] *= a[36]);
291    b[39] = (c7[4] *= a[36]);
292    VbS0 = vec_splat(Vb[16], 0);
293    VbS1 = vec_splat(Vb[16], 1);
294    VbS2 = vec_splat(Vb[17], 0);
295    VbS3 = vec_splat(Vb[17], 1);
296    VbS4 = vec_splat(Vb[18], 0);
297    VbS5 = vec_splat(Vb[18], 1);
298    VbS6 = vec_splat(Vb[19], 0);
299    VbS7 = vec_splat(Vb[19], 1);
300    Vc0[3] = vec_nmsub(VbS0, Va[19], Vc0[3]);
301    Vc1[3] = vec_nmsub(VbS1, Va[19], Vc1[3]);
302    Vc2[3] = vec_nmsub(VbS2, Va[19], Vc2[3]);
303    Vc3[3] = vec_nmsub(VbS3, Va[19], Vc3[3]);
304    Vc4[3] = vec_nmsub(VbS4, Va[19], Vc4[3]);
305    Vc5[3] = vec_nmsub(VbS5, Va[19], Vc5[3]);
306    Vc6[3] = vec_nmsub(VbS6, Va[19], Vc6[3]);
307    Vc7[3] = vec_nmsub(VbS7, Va[19], Vc7[3]);
308    c0[5] -= c0[4] * a[37];
309    c1[5] -= c1[4] * a[37];
310    c2[5] -= c2[4] * a[37];
311    c3[5] -= c3[4] * a[37];
312    c4[5] -= c4[4] * a[37];
313    c5[5] -= c5[4] * a[37];
314    c6[5] -= c6[4] * a[37];
315    c7[5] -= c7[4] * a[37];
316 
317    b[40] = (c0[5] *= a[45]);
318    b[41] = (c1[5] *= a[45]);
319    b[42] = (c2[5] *= a[45]);
320    b[43] = (c3[5] *= a[45]);
321    b[44] = (c4[5] *= a[45]);
322    b[45] = (c5[5] *= a[45]);
323    b[46] = (c6[5] *= a[45]);
324    b[47] = (c7[5] *= a[45]);
325    VbS0 = vec_splat(Vb[20], 0);
326    VbS1 = vec_splat(Vb[20], 1);
327    VbS2 = vec_splat(Vb[21], 0);
328    VbS3 = vec_splat(Vb[21], 1);
329    VbS4 = vec_splat(Vb[22], 0);
330    VbS5 = vec_splat(Vb[22], 1);
331    VbS6 = vec_splat(Vb[23], 0);
332    VbS7 = vec_splat(Vb[23], 1);
333    Vc0[3] = vec_nmsub(VbS0, Va[23], Vc0[3]);
334    Vc1[3] = vec_nmsub(VbS1, Va[23], Vc1[3]);
335    Vc2[3] = vec_nmsub(VbS2, Va[23], Vc2[3]);
336    Vc3[3] = vec_nmsub(VbS3, Va[23], Vc3[3]);
337    Vc4[3] = vec_nmsub(VbS4, Va[23], Vc4[3]);
338    Vc5[3] = vec_nmsub(VbS5, Va[23], Vc5[3]);
339    Vc6[3] = vec_nmsub(VbS6, Va[23], Vc6[3]);
340    Vc7[3] = vec_nmsub(VbS7, Va[23], Vc7[3]);
341 
342    b[48] = (c0[6] *= a[54]);
343    b[49] = (c1[6] *= a[54]);
344    b[50] = (c2[6] *= a[54]);
345    b[51] = (c3[6] *= a[54]);
346    b[52] = (c4[6] *= a[54]);
347    b[53] = (c5[6] *= a[54]);
348    b[54] = (c6[6] *= a[54]);
349    b[55] = (c7[6] *= a[54]);
350    c0[7] -= c0[6] * a[55];
351    c1[7] -= c1[6] * a[55];
352    c2[7] -= c2[6] * a[55];
353    c3[7] -= c3[6] * a[55];
354    c4[7] -= c4[6] * a[55];
355    c5[7] -= c5[6] * a[55];
356    c6[7] -= c6[6] * a[55];
357    c7[7] -= c7[6] * a[55];
358 
359    b[56] = (c0[7] *= a[63]);
360    b[57] = (c1[7] *= a[63]);
361    b[58] = (c2[7] *= a[63]);
362    b[59] = (c3[7] *= a[63]);
363    b[60] = (c4[7] *= a[63]);
364    b[61] = (c5[7] *= a[63]);
365    b[62] = (c6[7] *= a[63]);
366    b[63] = (c7[7] *= a[63]);
367 }
368 
369 #else
370 
solve16x8(FLOAT * a,FLOAT * b,FLOAT * c,BLASLONG ldc)371 static inline __attribute__ ((always_inline)) void solve16x8(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) {
372    FLOAT *c0, *c1, *c2, *c3, *c4, *c5, *c6, *c7;
373    c0 = &c[0*ldc];
374    c1 = &c[1*ldc];
375    c2 = &c[2*ldc];
376    c3 = &c[3*ldc];
377    c4 = &c[4*ldc];
378    c5 = &c[5*ldc];
379    c6 = &c[6*ldc];
380    c7 = &c[7*ldc];
381 
382    vector FLOAT *Va = (vector FLOAT *) a;
383    vector FLOAT *Vb = (vector FLOAT *) b;
384    vector FLOAT *Vc0 = (vector FLOAT *) c0;
385    vector FLOAT *Vc1 = (vector FLOAT *) c1;
386    vector FLOAT *Vc2 = (vector FLOAT *) c2;
387    vector FLOAT *Vc3 = (vector FLOAT *) c3;
388    vector FLOAT *Vc4 = (vector FLOAT *) c4;
389    vector FLOAT *Vc5 = (vector FLOAT *) c5;
390    vector FLOAT *Vc6 = (vector FLOAT *) c6;
391    vector FLOAT *Vc7 = (vector FLOAT *) c7;
392    vector FLOAT VbS0, VbS1, VbS2, VbS3, VbS4, VbS5, VbS6, VbS7;
393 
394    b[0] = (c0[0] *= a[0]);
395    b[1] = (c1[0] *= a[0]);
396    b[2] = (c2[0] *= a[0]);
397    b[3] = (c3[0] *= a[0]);
398    b[4] = (c4[0] *= a[0]);
399    b[5] = (c5[0] *= a[0]);
400    b[6] = (c6[0] *= a[0]);
401    b[7] = (c7[0] *= a[0]);
402    VbS0 = vec_splat(Vb[0], 0);
403    VbS1 = vec_splat(Vb[0], 1);
404    VbS2 = vec_splat(Vb[0], 2);
405    VbS3 = vec_splat(Vb[0], 3);
406    VbS4 = vec_splat(Vb[1], 0);
407    VbS5 = vec_splat(Vb[1], 1);
408    VbS6 = vec_splat(Vb[1], 2);
409    VbS7 = vec_splat(Vb[1], 3);
410    Vc0[1] = vec_nmsub(VbS0, Va[1], Vc0[1]);
411    Vc0[2] = vec_nmsub(VbS0, Va[2], Vc0[2]);
412    Vc0[3] = vec_nmsub(VbS0, Va[3], Vc0[3]);
413    Vc1[1] = vec_nmsub(VbS1, Va[1], Vc1[1]);
414    Vc1[2] = vec_nmsub(VbS1, Va[2], Vc1[2]);
415    Vc1[3] = vec_nmsub(VbS1, Va[3], Vc1[3]);
416    Vc2[1] = vec_nmsub(VbS2, Va[1], Vc2[1]);
417    Vc2[2] = vec_nmsub(VbS2, Va[2], Vc2[2]);
418    Vc2[3] = vec_nmsub(VbS2, Va[3], Vc2[3]);
419    Vc3[1] = vec_nmsub(VbS3, Va[1], Vc3[1]);
420    Vc3[2] = vec_nmsub(VbS3, Va[2], Vc3[2]);
421    Vc3[3] = vec_nmsub(VbS3, Va[3], Vc3[3]);
422    Vc4[1] = vec_nmsub(VbS4, Va[1], Vc4[1]);
423    Vc4[2] = vec_nmsub(VbS4, Va[2], Vc4[2]);
424    Vc4[3] = vec_nmsub(VbS4, Va[3], Vc4[3]);
425    Vc5[1] = vec_nmsub(VbS5, Va[1], Vc5[1]);
426    Vc5[2] = vec_nmsub(VbS5, Va[2], Vc5[2]);
427    Vc5[3] = vec_nmsub(VbS5, Va[3], Vc5[3]);
428    Vc6[1] = vec_nmsub(VbS6, Va[1], Vc6[1]);
429    Vc6[2] = vec_nmsub(VbS6, Va[2], Vc6[2]);
430    Vc6[3] = vec_nmsub(VbS6, Va[3], Vc6[3]);
431    Vc7[1] = vec_nmsub(VbS7, Va[1], Vc7[1]);
432    Vc7[2] = vec_nmsub(VbS7, Va[2], Vc7[2]);
433    Vc7[3] = vec_nmsub(VbS7, Va[3], Vc7[3]);
434    c0[1] -= b[0] * a[ 1];
435    c0[2] -= b[0] * a[ 2];
436    c0[3] -= b[0] * a[ 3];
437    c1[1] -= b[1] * a[ 1];
438    c1[2] -= b[1] * a[ 2];
439    c1[3] -= b[1] * a[ 3];
440    c2[1] -= b[2] * a[ 1];
441    c2[2] -= b[2] * a[ 2];
442    c2[3] -= b[2] * a[ 3];
443    c3[1] -= b[3] * a[ 1];
444    c3[2] -= b[3] * a[ 2];
445    c3[3] -= b[3] * a[ 3];
446    c4[1] -= b[4] * a[ 1];
447    c4[2] -= b[4] * a[ 2];
448    c4[3] -= b[4] * a[ 3];
449    c5[1] -= b[5] * a[ 1];
450    c5[2] -= b[5] * a[ 2];
451    c5[3] -= b[5] * a[ 3];
452    c6[1] -= b[6] * a[ 1];
453    c6[2] -= b[6] * a[ 2];
454    c6[3] -= b[6] * a[ 3];
455    c7[1] -= b[7] * a[ 1];
456    c7[2] -= b[7] * a[ 2];
457    c7[3] -= b[7] * a[ 3];
458 
459    b[ 8] = (c0[1] *= a[17]);
460    b[ 9] = (c1[1] *= a[17]);
461    b[10] = (c2[1] *= a[17]);
462    b[11] = (c3[1] *= a[17]);
463    b[12] = (c4[1] *= a[17]);
464    b[13] = (c5[1] *= a[17]);
465    b[14] = (c6[1] *= a[17]);
466    b[15] = (c7[1] *= a[17]);
467    VbS0 = vec_splat(Vb[2], 0);
468    VbS1 = vec_splat(Vb[2], 1);
469    VbS2 = vec_splat(Vb[2], 2);
470    VbS3 = vec_splat(Vb[2], 3);
471    VbS4 = vec_splat(Vb[3], 0);
472    VbS5 = vec_splat(Vb[3], 1);
473    VbS6 = vec_splat(Vb[3], 2);
474    VbS7 = vec_splat(Vb[3], 3);
475    Vc0[1] = vec_nmsub(VbS0, Va[5], Vc0[1]);
476    Vc0[2] = vec_nmsub(VbS0, Va[6], Vc0[2]);
477    Vc0[3] = vec_nmsub(VbS0, Va[7], Vc0[3]);
478    Vc1[1] = vec_nmsub(VbS1, Va[5], Vc1[1]);
479    Vc1[2] = vec_nmsub(VbS1, Va[6], Vc1[2]);
480    Vc1[3] = vec_nmsub(VbS1, Va[7], Vc1[3]);
481    Vc2[1] = vec_nmsub(VbS2, Va[5], Vc2[1]);
482    Vc2[2] = vec_nmsub(VbS2, Va[6], Vc2[2]);
483    Vc2[3] = vec_nmsub(VbS2, Va[7], Vc2[3]);
484    Vc3[1] = vec_nmsub(VbS3, Va[5], Vc3[1]);
485    Vc3[2] = vec_nmsub(VbS3, Va[6], Vc3[2]);
486    Vc3[3] = vec_nmsub(VbS3, Va[7], Vc3[3]);
487    Vc4[1] = vec_nmsub(VbS4, Va[5], Vc4[1]);
488    Vc4[2] = vec_nmsub(VbS4, Va[6], Vc4[2]);
489    Vc4[3] = vec_nmsub(VbS4, Va[7], Vc4[3]);
490    Vc5[1] = vec_nmsub(VbS5, Va[5], Vc5[1]);
491    Vc5[2] = vec_nmsub(VbS5, Va[6], Vc5[2]);
492    Vc5[3] = vec_nmsub(VbS5, Va[7], Vc5[3]);
493    Vc6[1] = vec_nmsub(VbS6, Va[5], Vc6[1]);
494    Vc6[2] = vec_nmsub(VbS6, Va[6], Vc6[2]);
495    Vc6[3] = vec_nmsub(VbS6, Va[7], Vc6[3]);
496    Vc7[1] = vec_nmsub(VbS7, Va[5], Vc7[1]);
497    Vc7[2] = vec_nmsub(VbS7, Va[6], Vc7[2]);
498    Vc7[3] = vec_nmsub(VbS7, Va[7], Vc7[3]);
499    c0[2] -= b[ 8] * a[18];
500    c0[3] -= b[ 8] * a[19];
501    c1[2] -= b[ 9] * a[18];
502    c1[3] -= b[ 9] * a[19];
503    c2[2] -= b[10] * a[18];
504    c2[3] -= b[10] * a[19];
505    c3[2] -= b[11] * a[18];
506    c3[3] -= b[11] * a[19];
507    c4[2] -= b[12] * a[18];
508    c4[3] -= b[12] * a[19];
509    c5[2] -= b[13] * a[18];
510    c5[3] -= b[13] * a[19];
511    c6[2] -= b[14] * a[18];
512    c6[3] -= b[14] * a[19];
513    c7[2] -= b[15] * a[18];
514    c7[3] -= b[15] * a[19];
515 
516    b[16] = (c0[2] *= a[34]);
517    b[17] = (c1[2] *= a[34]);
518    b[18] = (c2[2] *= a[34]);
519    b[19] = (c3[2] *= a[34]);
520    b[20] = (c4[2] *= a[34]);
521    b[21] = (c5[2] *= a[34]);
522    b[22] = (c6[2] *= a[34]);
523    b[23] = (c7[2] *= a[34]);
524    VbS0 = vec_splat(Vb[4], 0);
525    VbS1 = vec_splat(Vb[4], 1);
526    VbS2 = vec_splat(Vb[4], 2);
527    VbS3 = vec_splat(Vb[4], 3);
528    VbS4 = vec_splat(Vb[5], 0);
529    VbS5 = vec_splat(Vb[5], 1);
530    VbS6 = vec_splat(Vb[5], 2);
531    VbS7 = vec_splat(Vb[5], 3);
532    Vc0[1] = vec_nmsub(VbS0, Va[ 9], Vc0[1]);
533    Vc0[2] = vec_nmsub(VbS0, Va[10], Vc0[2]);
534    Vc0[3] = vec_nmsub(VbS0, Va[11], Vc0[3]);
535    Vc1[1] = vec_nmsub(VbS1, Va[ 9], Vc1[1]);
536    Vc1[2] = vec_nmsub(VbS1, Va[10], Vc1[2]);
537    Vc1[3] = vec_nmsub(VbS1, Va[11], Vc1[3]);
538    Vc2[1] = vec_nmsub(VbS2, Va[ 9], Vc2[1]);
539    Vc2[2] = vec_nmsub(VbS2, Va[10], Vc2[2]);
540    Vc2[3] = vec_nmsub(VbS2, Va[11], Vc2[3]);
541    Vc3[1] = vec_nmsub(VbS3, Va[ 9], Vc3[1]);
542    Vc3[2] = vec_nmsub(VbS3, Va[10], Vc3[2]);
543    Vc3[3] = vec_nmsub(VbS3, Va[11], Vc3[3]);
544    Vc4[1] = vec_nmsub(VbS4, Va[ 9], Vc4[1]);
545    Vc4[2] = vec_nmsub(VbS4, Va[10], Vc4[2]);
546    Vc4[3] = vec_nmsub(VbS4, Va[11], Vc4[3]);
547    Vc5[1] = vec_nmsub(VbS5, Va[ 9], Vc5[1]);
548    Vc5[2] = vec_nmsub(VbS5, Va[10], Vc5[2]);
549    Vc5[3] = vec_nmsub(VbS5, Va[11], Vc5[3]);
550    Vc6[1] = vec_nmsub(VbS6, Va[ 9], Vc6[1]);
551    Vc6[2] = vec_nmsub(VbS6, Va[10], Vc6[2]);
552    Vc6[3] = vec_nmsub(VbS6, Va[11], Vc6[3]);
553    Vc7[1] = vec_nmsub(VbS7, Va[ 9], Vc7[1]);
554    Vc7[2] = vec_nmsub(VbS7, Va[10], Vc7[2]);
555    Vc7[3] = vec_nmsub(VbS7, Va[11], Vc7[3]);
556    c0[3] -= b[16] * a[35];
557    c1[3] -= b[17] * a[35];
558    c2[3] -= b[18] * a[35];
559    c3[3] -= b[19] * a[35];
560    c4[3] -= b[20] * a[35];
561    c5[3] -= b[21] * a[35];
562    c6[3] -= b[22] * a[35];
563    c7[3] -= b[23] * a[35];
564 
565    b[24] = (c0[3] *= a[51]);
566    b[25] = (c1[3] *= a[51]);
567    b[26] = (c2[3] *= a[51]);
568    b[27] = (c3[3] *= a[51]);
569    b[28] = (c4[3] *= a[51]);
570    b[29] = (c5[3] *= a[51]);
571    b[30] = (c6[3] *= a[51]);
572    b[31] = (c7[3] *= a[51]);
573    VbS0 = vec_splat(Vb[6], 0);
574    VbS1 = vec_splat(Vb[6], 1);
575    VbS2 = vec_splat(Vb[6], 2);
576    VbS3 = vec_splat(Vb[6], 3);
577    VbS4 = vec_splat(Vb[7], 0);
578    VbS5 = vec_splat(Vb[7], 1);
579    VbS6 = vec_splat(Vb[7], 2);
580    VbS7 = vec_splat(Vb[7], 3);
581    Vc0[1] = vec_nmsub(VbS0, Va[13], Vc0[1]);
582    Vc0[2] = vec_nmsub(VbS0, Va[14], Vc0[2]);
583    Vc0[3] = vec_nmsub(VbS0, Va[15], Vc0[3]);
584    Vc1[1] = vec_nmsub(VbS1, Va[13], Vc1[1]);
585    Vc1[2] = vec_nmsub(VbS1, Va[14], Vc1[2]);
586    Vc1[3] = vec_nmsub(VbS1, Va[15], Vc1[3]);
587    Vc2[1] = vec_nmsub(VbS2, Va[13], Vc2[1]);
588    Vc2[2] = vec_nmsub(VbS2, Va[14], Vc2[2]);
589    Vc2[3] = vec_nmsub(VbS2, Va[15], Vc2[3]);
590    Vc3[1] = vec_nmsub(VbS3, Va[13], Vc3[1]);
591    Vc3[2] = vec_nmsub(VbS3, Va[14], Vc3[2]);
592    Vc3[3] = vec_nmsub(VbS3, Va[15], Vc3[3]);
593    Vc4[1] = vec_nmsub(VbS4, Va[13], Vc4[1]);
594    Vc4[2] = vec_nmsub(VbS4, Va[14], Vc4[2]);
595    Vc4[3] = vec_nmsub(VbS4, Va[15], Vc4[3]);
596    Vc5[1] = vec_nmsub(VbS5, Va[13], Vc5[1]);
597    Vc5[2] = vec_nmsub(VbS5, Va[14], Vc5[2]);
598    Vc5[3] = vec_nmsub(VbS5, Va[15], Vc5[3]);
599    Vc6[1] = vec_nmsub(VbS6, Va[13], Vc6[1]);
600    Vc6[2] = vec_nmsub(VbS6, Va[14], Vc6[2]);
601    Vc6[3] = vec_nmsub(VbS6, Va[15], Vc6[3]);
602    Vc7[1] = vec_nmsub(VbS7, Va[13], Vc7[1]);
603    Vc7[2] = vec_nmsub(VbS7, Va[14], Vc7[2]);
604    Vc7[3] = vec_nmsub(VbS7, Va[15], Vc7[3]);
605 
606    b[32] = (c0[4] *= a[68]);
607    b[33] = (c1[4] *= a[68]);
608    b[34] = (c2[4] *= a[68]);
609    b[35] = (c3[4] *= a[68]);
610    b[36] = (c4[4] *= a[68]);
611    b[37] = (c5[4] *= a[68]);
612    b[38] = (c6[4] *= a[68]);
613    b[39] = (c7[4] *= a[68]);
614    VbS0 = vec_splat(Vb[8], 0);
615    VbS1 = vec_splat(Vb[8], 1);
616    VbS2 = vec_splat(Vb[8], 2);
617    VbS3 = vec_splat(Vb[8], 3);
618    VbS4 = vec_splat(Vb[9], 0);
619    VbS5 = vec_splat(Vb[9], 1);
620    VbS6 = vec_splat(Vb[9], 2);
621    VbS7 = vec_splat(Vb[9], 3);
622    Vc0[2] = vec_nmsub(VbS0, Va[18], Vc0[2]);
623    Vc0[3] = vec_nmsub(VbS0, Va[19], Vc0[3]);
624    Vc1[2] = vec_nmsub(VbS1, Va[18], Vc1[2]);
625    Vc1[3] = vec_nmsub(VbS1, Va[19], Vc1[3]);
626    Vc2[2] = vec_nmsub(VbS2, Va[18], Vc2[2]);
627    Vc2[3] = vec_nmsub(VbS2, Va[19], Vc2[3]);
628    Vc3[2] = vec_nmsub(VbS3, Va[18], Vc3[2]);
629    Vc3[3] = vec_nmsub(VbS3, Va[19], Vc3[3]);
630    Vc4[2] = vec_nmsub(VbS4, Va[18], Vc4[2]);
631    Vc4[3] = vec_nmsub(VbS4, Va[19], Vc4[3]);
632    Vc5[2] = vec_nmsub(VbS5, Va[18], Vc5[2]);
633    Vc5[3] = vec_nmsub(VbS5, Va[19], Vc5[3]);
634    Vc6[2] = vec_nmsub(VbS6, Va[18], Vc6[2]);
635    Vc6[3] = vec_nmsub(VbS6, Va[19], Vc6[3]);
636    Vc7[2] = vec_nmsub(VbS7, Va[18], Vc7[2]);
637    Vc7[3] = vec_nmsub(VbS7, Va[19], Vc7[3]);
638    c0[5] -= b[32] * a[69];
639    c0[6] -= b[32] * a[70];
640    c0[7] -= b[32] * a[71];
641    c1[5] -= b[33] * a[69];
642    c1[6] -= b[33] * a[70];
643    c1[7] -= b[33] * a[71];
644    c2[5] -= b[34] * a[69];
645    c2[6] -= b[34] * a[70];
646    c2[7] -= b[34] * a[71];
647    c3[5] -= b[35] * a[69];
648    c3[6] -= b[35] * a[70];
649    c3[7] -= b[35] * a[71];
650    c4[5] -= b[36] * a[69];
651    c4[6] -= b[36] * a[70];
652    c4[7] -= b[36] * a[71];
653    c5[5] -= b[37] * a[69];
654    c5[6] -= b[37] * a[70];
655    c5[7] -= b[37] * a[71];
656    c6[5] -= b[38] * a[69];
657    c6[6] -= b[38] * a[70];
658    c6[7] -= b[38] * a[71];
659    c7[5] -= b[39] * a[69];
660    c7[6] -= b[39] * a[70];
661    c7[7] -= b[39] * a[71];
662 
663    b[40] = (c0[5] *= a[85]);
664    b[41] = (c1[5] *= a[85]);
665    b[42] = (c2[5] *= a[85]);
666    b[43] = (c3[5] *= a[85]);
667    b[44] = (c4[5] *= a[85]);
668    b[45] = (c5[5] *= a[85]);
669    b[46] = (c6[5] *= a[85]);
670    b[47] = (c7[5] *= a[85]);
671    VbS0 = vec_splat(Vb[10], 0);
672    VbS1 = vec_splat(Vb[10], 1);
673    VbS2 = vec_splat(Vb[10], 2);
674    VbS3 = vec_splat(Vb[10], 3);
675    VbS4 = vec_splat(Vb[11], 0);
676    VbS5 = vec_splat(Vb[11], 1);
677    VbS6 = vec_splat(Vb[11], 2);
678    VbS7 = vec_splat(Vb[11], 3);
679    Vc0[2] = vec_nmsub(VbS0, Va[22], Vc0[2]);
680    Vc0[3] = vec_nmsub(VbS0, Va[23], Vc0[3]);
681    Vc1[2] = vec_nmsub(VbS1, Va[22], Vc1[2]);
682    Vc1[3] = vec_nmsub(VbS1, Va[23], Vc1[3]);
683    Vc2[2] = vec_nmsub(VbS2, Va[22], Vc2[2]);
684    Vc2[3] = vec_nmsub(VbS2, Va[23], Vc2[3]);
685    Vc3[2] = vec_nmsub(VbS3, Va[22], Vc3[2]);
686    Vc3[3] = vec_nmsub(VbS3, Va[23], Vc3[3]);
687    Vc4[2] = vec_nmsub(VbS4, Va[22], Vc4[2]);
688    Vc4[3] = vec_nmsub(VbS4, Va[23], Vc4[3]);
689    Vc5[2] = vec_nmsub(VbS5, Va[22], Vc5[2]);
690    Vc5[3] = vec_nmsub(VbS5, Va[23], Vc5[3]);
691    Vc6[2] = vec_nmsub(VbS6, Va[22], Vc6[2]);
692    Vc6[3] = vec_nmsub(VbS6, Va[23], Vc6[3]);
693    Vc7[2] = vec_nmsub(VbS7, Va[22], Vc7[2]);
694    Vc7[3] = vec_nmsub(VbS7, Va[23], Vc7[3]);
695    c0[6] -= b[40] * a[86];
696    c0[7] -= b[40] * a[87];
697    c1[6] -= b[41] * a[86];
698    c1[7] -= b[41] * a[87];
699    c2[6] -= b[42] * a[86];
700    c2[7] -= b[42] * a[87];
701    c3[6] -= b[43] * a[86];
702    c3[7] -= b[43] * a[87];
703    c4[6] -= b[44] * a[86];
704    c4[7] -= b[44] * a[87];
705    c5[6] -= b[45] * a[86];
706    c5[7] -= b[45] * a[87];
707    c6[6] -= b[46] * a[86];
708    c6[7] -= b[46] * a[87];
709    c7[6] -= b[47] * a[86];
710    c7[7] -= b[47] * a[87];
711 
712    b[48] = (c0[6] *= a[102]);
713    b[49] = (c1[6] *= a[102]);
714    b[50] = (c2[6] *= a[102]);
715    b[51] = (c3[6] *= a[102]);
716    b[52] = (c4[6] *= a[102]);
717    b[53] = (c5[6] *= a[102]);
718    b[54] = (c6[6] *= a[102]);
719    b[55] = (c7[6] *= a[102]);
720    VbS0 = vec_splat(Vb[12], 0);
721    VbS1 = vec_splat(Vb[12], 1);
722    VbS2 = vec_splat(Vb[12], 2);
723    VbS3 = vec_splat(Vb[12], 3);
724    VbS4 = vec_splat(Vb[13], 0);
725    VbS5 = vec_splat(Vb[13], 1);
726    VbS6 = vec_splat(Vb[13], 2);
727    VbS7 = vec_splat(Vb[13], 3);
728    Vc0[2] = vec_nmsub(VbS0, Va[26], Vc0[2]);
729    Vc0[3] = vec_nmsub(VbS0, Va[27], Vc0[3]);
730    Vc1[2] = vec_nmsub(VbS1, Va[26], Vc1[2]);
731    Vc1[3] = vec_nmsub(VbS1, Va[27], Vc1[3]);
732    Vc2[2] = vec_nmsub(VbS2, Va[26], Vc2[2]);
733    Vc2[3] = vec_nmsub(VbS2, Va[27], Vc2[3]);
734    Vc3[2] = vec_nmsub(VbS3, Va[26], Vc3[2]);
735    Vc3[3] = vec_nmsub(VbS3, Va[27], Vc3[3]);
736    Vc4[2] = vec_nmsub(VbS4, Va[26], Vc4[2]);
737    Vc4[3] = vec_nmsub(VbS4, Va[27], Vc4[3]);
738    Vc5[2] = vec_nmsub(VbS5, Va[26], Vc5[2]);
739    Vc5[3] = vec_nmsub(VbS5, Va[27], Vc5[3]);
740    Vc6[2] = vec_nmsub(VbS6, Va[26], Vc6[2]);
741    Vc6[3] = vec_nmsub(VbS6, Va[27], Vc6[3]);
742    Vc7[2] = vec_nmsub(VbS7, Va[26], Vc7[2]);
743    Vc7[3] = vec_nmsub(VbS7, Va[27], Vc7[3]);
744    c0[7] -= b[48] * a[103];
745    c1[7] -= b[49] * a[103];
746    c2[7] -= b[50] * a[103];
747    c3[7] -= b[51] * a[103];
748    c4[7] -= b[52] * a[103];
749    c5[7] -= b[53] * a[103];
750    c6[7] -= b[54] * a[103];
751    c7[7] -= b[55] * a[103];
752 
753    b[56] = (c0[7] *= a[119]);
754    b[57] = (c1[7] *= a[119]);
755    b[58] = (c2[7] *= a[119]);
756    b[59] = (c3[7] *= a[119]);
757    b[60] = (c4[7] *= a[119]);
758    b[61] = (c5[7] *= a[119]);
759    b[62] = (c6[7] *= a[119]);
760    b[63] = (c7[7] *= a[119]);
761    VbS0 = vec_splat(Vb[14], 0);
762    VbS1 = vec_splat(Vb[14], 1);
763    VbS2 = vec_splat(Vb[14], 2);
764    VbS3 = vec_splat(Vb[14], 3);
765    VbS4 = vec_splat(Vb[15], 0);
766    VbS5 = vec_splat(Vb[15], 1);
767    VbS6 = vec_splat(Vb[15], 2);
768    VbS7 = vec_splat(Vb[15], 3);
769    Vc0[2] = vec_nmsub(VbS0, Va[30], Vc0[2]);
770    Vc0[3] = vec_nmsub(VbS0, Va[31], Vc0[3]);
771    Vc1[2] = vec_nmsub(VbS1, Va[30], Vc1[2]);
772    Vc1[3] = vec_nmsub(VbS1, Va[31], Vc1[3]);
773    Vc2[2] = vec_nmsub(VbS2, Va[30], Vc2[2]);
774    Vc2[3] = vec_nmsub(VbS2, Va[31], Vc2[3]);
775    Vc3[2] = vec_nmsub(VbS3, Va[30], Vc3[2]);
776    Vc3[3] = vec_nmsub(VbS3, Va[31], Vc3[3]);
777    Vc4[2] = vec_nmsub(VbS4, Va[30], Vc4[2]);
778    Vc4[3] = vec_nmsub(VbS4, Va[31], Vc4[3]);
779    Vc5[2] = vec_nmsub(VbS5, Va[30], Vc5[2]);
780    Vc5[3] = vec_nmsub(VbS5, Va[31], Vc5[3]);
781    Vc6[2] = vec_nmsub(VbS6, Va[30], Vc6[2]);
782    Vc6[3] = vec_nmsub(VbS6, Va[31], Vc6[3]);
783    Vc7[2] = vec_nmsub(VbS7, Va[30], Vc7[2]);
784    Vc7[3] = vec_nmsub(VbS7, Va[31], Vc7[3]);
785 
786    b[64] = (c0[8] *= a[136]);
787    b[65] = (c1[8] *= a[136]);
788    b[66] = (c2[8] *= a[136]);
789    b[67] = (c3[8] *= a[136]);
790    b[68] = (c4[8] *= a[136]);
791    b[69] = (c5[8] *= a[136]);
792    b[70] = (c6[8] *= a[136]);
793    b[71] = (c7[8] *= a[136]);
794    VbS0 = vec_splat(Vb[16], 0);
795    VbS1 = vec_splat(Vb[16], 1);
796    VbS2 = vec_splat(Vb[16], 2);
797    VbS3 = vec_splat(Vb[16], 3);
798    VbS4 = vec_splat(Vb[17], 0);
799    VbS5 = vec_splat(Vb[17], 1);
800    VbS6 = vec_splat(Vb[17], 2);
801    VbS7 = vec_splat(Vb[17], 3);
802    Vc0[3] = vec_nmsub(VbS0, Va[35], Vc0[3]);
803    Vc1[3] = vec_nmsub(VbS1, Va[35], Vc1[3]);
804    Vc2[3] = vec_nmsub(VbS2, Va[35], Vc2[3]);
805    Vc3[3] = vec_nmsub(VbS3, Va[35], Vc3[3]);
806    Vc4[3] = vec_nmsub(VbS4, Va[35], Vc4[3]);
807    Vc5[3] = vec_nmsub(VbS5, Va[35], Vc5[3]);
808    Vc6[3] = vec_nmsub(VbS6, Va[35], Vc6[3]);
809    Vc7[3] = vec_nmsub(VbS7, Va[35], Vc7[3]);
810    c0[ 9] -= b[64] * a[137];
811    c0[10] -= b[64] * a[138];
812    c0[11] -= b[64] * a[139];
813    c1[ 9] -= b[65] * a[137];
814    c1[10] -= b[65] * a[138];
815    c1[11] -= b[65] * a[139];
816    c2[ 9] -= b[66] * a[137];
817    c2[10] -= b[66] * a[138];
818    c2[11] -= b[66] * a[139];
819    c3[ 9] -= b[67] * a[137];
820    c3[10] -= b[67] * a[138];
821    c3[11] -= b[67] * a[139];
822    c4[ 9] -= b[68] * a[137];
823    c4[10] -= b[68] * a[138];
824    c4[11] -= b[68] * a[139];
825    c5[ 9] -= b[69] * a[137];
826    c5[10] -= b[69] * a[138];
827    c5[11] -= b[69] * a[139];
828    c6[ 9] -= b[70] * a[137];
829    c6[10] -= b[70] * a[138];
830    c6[11] -= b[70] * a[139];
831    c7[ 9] -= b[71] * a[137];
832    c7[10] -= b[71] * a[138];
833    c7[11] -= b[71] * a[139];
834 
835    b[72] = (c0[9] *= a[153]);
836    b[73] = (c1[9] *= a[153]);
837    b[74] = (c2[9] *= a[153]);
838    b[75] = (c3[9] *= a[153]);
839    b[76] = (c4[9] *= a[153]);
840    b[77] = (c5[9] *= a[153]);
841    b[78] = (c6[9] *= a[153]);
842    b[79] = (c7[9] *= a[153]);
843    VbS0 = vec_splat(Vb[18], 0);
844    VbS1 = vec_splat(Vb[18], 1);
845    VbS2 = vec_splat(Vb[18], 2);
846    VbS3 = vec_splat(Vb[18], 3);
847    VbS4 = vec_splat(Vb[19], 0);
848    VbS5 = vec_splat(Vb[19], 1);
849    VbS6 = vec_splat(Vb[19], 2);
850    VbS7 = vec_splat(Vb[19], 3);
851    Vc0[3] = vec_nmsub(VbS0, Va[39], Vc0[3]);
852    Vc1[3] = vec_nmsub(VbS1, Va[39], Vc1[3]);
853    Vc2[3] = vec_nmsub(VbS2, Va[39], Vc2[3]);
854    Vc3[3] = vec_nmsub(VbS3, Va[39], Vc3[3]);
855    Vc4[3] = vec_nmsub(VbS4, Va[39], Vc4[3]);
856    Vc5[3] = vec_nmsub(VbS5, Va[39], Vc5[3]);
857    Vc6[3] = vec_nmsub(VbS6, Va[39], Vc6[3]);
858    Vc7[3] = vec_nmsub(VbS7, Va[39], Vc7[3]);
859    c0[10] -= b[72] * a[154];
860    c0[11] -= b[72] * a[155];
861    c1[10] -= b[73] * a[154];
862    c1[11] -= b[73] * a[155];
863    c2[10] -= b[74] * a[154];
864    c2[11] -= b[74] * a[155];
865    c3[10] -= b[75] * a[154];
866    c3[11] -= b[75] * a[155];
867    c4[10] -= b[76] * a[154];
868    c4[11] -= b[76] * a[155];
869    c5[10] -= b[77] * a[154];
870    c5[11] -= b[77] * a[155];
871    c6[10] -= b[78] * a[154];
872    c6[11] -= b[78] * a[155];
873    c7[10] -= b[79] * a[154];
874    c7[11] -= b[79] * a[155];
875 
876    b[80] = (c0[10] *= a[170]);
877    b[81] = (c1[10] *= a[170]);
878    b[82] = (c2[10] *= a[170]);
879    b[83] = (c3[10] *= a[170]);
880    b[84] = (c4[10] *= a[170]);
881    b[85] = (c5[10] *= a[170]);
882    b[86] = (c6[10] *= a[170]);
883    b[87] = (c7[10] *= a[170]);
884    VbS0 = vec_splat(Vb[20], 0);
885    VbS1 = vec_splat(Vb[20], 1);
886    VbS2 = vec_splat(Vb[20], 2);
887    VbS3 = vec_splat(Vb[20], 3);
888    VbS4 = vec_splat(Vb[21], 0);
889    VbS5 = vec_splat(Vb[21], 1);
890    VbS6 = vec_splat(Vb[21], 2);
891    VbS7 = vec_splat(Vb[21], 3);
892    Vc0[3] = vec_nmsub(VbS0, Va[43], Vc0[3]);
893    Vc1[3] = vec_nmsub(VbS1, Va[43], Vc1[3]);
894    Vc2[3] = vec_nmsub(VbS2, Va[43], Vc2[3]);
895    Vc3[3] = vec_nmsub(VbS3, Va[43], Vc3[3]);
896    Vc4[3] = vec_nmsub(VbS4, Va[43], Vc4[3]);
897    Vc5[3] = vec_nmsub(VbS5, Va[43], Vc5[3]);
898    Vc6[3] = vec_nmsub(VbS6, Va[43], Vc6[3]);
899    Vc7[3] = vec_nmsub(VbS7, Va[43], Vc7[3]);
900    c0[11] -= b[80] * a[171];
901    c1[11] -= b[81] * a[171];
902    c2[11] -= b[82] * a[171];
903    c3[11] -= b[83] * a[171];
904    c4[11] -= b[84] * a[171];
905    c5[11] -= b[85] * a[171];
906    c6[11] -= b[86] * a[171];
907    c7[11] -= b[87] * a[171];
908 
909    b[88] = (c0[11] *= a[187]);
910    b[89] = (c1[11] *= a[187]);
911    b[90] = (c2[11] *= a[187]);
912    b[91] = (c3[11] *= a[187]);
913    b[92] = (c4[11] *= a[187]);
914    b[93] = (c5[11] *= a[187]);
915    b[94] = (c6[11] *= a[187]);
916    b[95] = (c7[11] *= a[187]);
917    VbS0 = vec_splat(Vb[22], 0);
918    VbS1 = vec_splat(Vb[22], 1);
919    VbS2 = vec_splat(Vb[22], 2);
920    VbS3 = vec_splat(Vb[22], 3);
921    VbS4 = vec_splat(Vb[23], 0);
922    VbS5 = vec_splat(Vb[23], 1);
923    VbS6 = vec_splat(Vb[23], 2);
924    VbS7 = vec_splat(Vb[23], 3);
925    Vc0[3] = vec_nmsub(VbS0, Va[47], Vc0[3]);
926    Vc1[3] = vec_nmsub(VbS1, Va[47], Vc1[3]);
927    Vc2[3] = vec_nmsub(VbS2, Va[47], Vc2[3]);
928    Vc3[3] = vec_nmsub(VbS3, Va[47], Vc3[3]);
929    Vc4[3] = vec_nmsub(VbS4, Va[47], Vc4[3]);
930    Vc5[3] = vec_nmsub(VbS5, Va[47], Vc5[3]);
931    Vc6[3] = vec_nmsub(VbS6, Va[47], Vc6[3]);
932    Vc7[3] = vec_nmsub(VbS7, Va[47], Vc7[3]);
933 
934    b[ 96] = (c0[12] *= a[204]);
935    b[ 97] = (c1[12] *= a[204]);
936    b[ 98] = (c2[12] *= a[204]);
937    b[ 99] = (c3[12] *= a[204]);
938    b[100] = (c4[12] *= a[204]);
939    b[101] = (c5[12] *= a[204]);
940    b[102] = (c6[12] *= a[204]);
941    b[103] = (c7[12] *= a[204]);
942    c0[13] -= b[ 96] * a[205];
943    c0[14] -= b[ 96] * a[206];
944    c0[15] -= b[ 96] * a[207];
945    c1[13] -= b[ 97] * a[205];
946    c1[14] -= b[ 97] * a[206];
947    c1[15] -= b[ 97] * a[207];
948    c2[13] -= b[ 98] * a[205];
949    c2[14] -= b[ 98] * a[206];
950    c2[15] -= b[ 98] * a[207];
951    c3[13] -= b[ 99] * a[205];
952    c3[14] -= b[ 99] * a[206];
953    c3[15] -= b[ 99] * a[207];
954    c4[13] -= b[100] * a[205];
955    c4[14] -= b[100] * a[206];
956    c4[15] -= b[100] * a[207];
957    c5[13] -= b[101] * a[205];
958    c5[14] -= b[101] * a[206];
959    c5[15] -= b[101] * a[207];
960    c6[13] -= b[102] * a[205];
961    c6[14] -= b[102] * a[206];
962    c6[15] -= b[102] * a[207];
963    c7[13] -= b[103] * a[205];
964    c7[14] -= b[103] * a[206];
965    c7[15] -= b[103] * a[207];
966 
967    b[104] = (c0[13] *= a[221]);
968    b[105] = (c1[13] *= a[221]);
969    b[106] = (c2[13] *= a[221]);
970    b[107] = (c3[13] *= a[221]);
971    b[108] = (c4[13] *= a[221]);
972    b[109] = (c5[13] *= a[221]);
973    b[110] = (c6[13] *= a[221]);
974    b[111] = (c7[13] *= a[221]);
975    c0[14] -= b[104] * a[222];
976    c0[15] -= b[104] * a[223];
977    c1[14] -= b[105] * a[222];
978    c1[15] -= b[105] * a[223];
979    c2[14] -= b[106] * a[222];
980    c2[15] -= b[106] * a[223];
981    c3[14] -= b[107] * a[222];
982    c3[15] -= b[107] * a[223];
983    c4[14] -= b[108] * a[222];
984    c4[15] -= b[108] * a[223];
985    c5[14] -= b[109] * a[222];
986    c5[15] -= b[109] * a[223];
987    c6[14] -= b[110] * a[222];
988    c6[15] -= b[110] * a[223];
989    c7[14] -= b[111] * a[222];
990    c7[15] -= b[111] * a[223];
991 
992    b[112] = (c0[14] *= a[238]);
993    b[113] = (c1[14] *= a[238]);
994    b[114] = (c2[14] *= a[238]);
995    b[115] = (c3[14] *= a[238]);
996    b[116] = (c4[14] *= a[238]);
997    b[117] = (c5[14] *= a[238]);
998    b[118] = (c6[14] *= a[238]);
999    b[119] = (c7[14] *= a[238]);
1000    c0[15] -= b[112] * a[239];
1001    c1[15] -= b[113] * a[239];
1002    c2[15] -= b[114] * a[239];
1003    c3[15] -= b[115] * a[239];
1004    c4[15] -= b[116] * a[239];
1005    c5[15] -= b[117] * a[239];
1006    c6[15] -= b[118] * a[239];
1007    c7[15] -= b[119] * a[239];
1008 
1009    b[120] = (c0[15] *= a[255]);
1010    b[121] = (c1[15] *= a[255]);
1011    b[122] = (c2[15] *= a[255]);
1012    b[123] = (c3[15] *= a[255]);
1013    b[124] = (c4[15] *= a[255]);
1014    b[125] = (c5[15] *= a[255]);
1015    b[126] = (c6[15] *= a[255]);
1016    b[127] = (c7[15] *= a[255]);
1017 }
1018 
1019 #endif
1020 
solve(BLASLONG m,BLASLONG n,FLOAT * a,FLOAT * b,FLOAT * c,BLASLONG ldc)1021 static inline __attribute__ ((always_inline)) void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) {
1022 
1023   FLOAT aa, bb;
1024 
1025   int i, j, k;
1026 
1027   for (i = 0; i < m; i++) {
1028 
1029     aa = *(a + i);
1030 
1031     for (j = 0; j < n; j ++) {
1032       bb = *(c + i + j * ldc);
1033       bb *= aa;
1034       *b             = bb;
1035       *(c + i + j * ldc) = bb;
1036       b ++;
1037 
1038       for (k = i + 1; k < m; k ++){
1039 	*(c + k + j * ldc) -= bb * *(a + k);
1040       }
1041 
1042     }
1043     a += m;
1044   }
1045 }
1046 
1047 #else
1048 
solve(BLASLONG m,BLASLONG n,FLOAT * a,FLOAT * b,FLOAT * c,BLASLONG ldc)1049 static inline __attribute__ ((always_inline)) void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) {
1050 
1051   FLOAT aa1, aa2;
1052   FLOAT bb1, bb2;
1053   FLOAT cc1, cc2;
1054 
1055   int i, j, k;
1056 
1057   ldc *= 2;
1058 
1059   for (i = 0; i < m; i++) {
1060 
1061     aa1 = *(a + i * 2 + 0);
1062     aa2 = *(a + i * 2 + 1);
1063 
1064     for (j = 0; j < n; j ++) {
1065       bb1 = *(c + i * 2 + 0 + j * ldc);
1066       bb2 = *(c + i * 2 + 1 + j * ldc);
1067 
1068 #ifndef CONJ
1069       cc1 = aa1 * bb1 - aa2 * bb2;
1070       cc2 = aa1 * bb2 + aa2 * bb1;
1071 #else
1072       cc1 = aa1 * bb1 + aa2 * bb2;
1073       cc2 = aa1 * bb2 - aa2 * bb1;
1074 #endif
1075 
1076       *(b + 0) = cc1;
1077       *(b + 1) = cc2;
1078       *(c + i * 2 + 0 + j * ldc) = cc1;
1079       *(c + i * 2 + 1 + j * ldc) = cc2;
1080       b += 2;
1081 
1082       for (k = i + 1; k < m; k ++){
1083 #ifndef CONJ
1084 	*(c + k * 2 + 0 + j * ldc) -= cc1 * *(a + k * 2 + 0) - cc2 * *(a + k * 2 + 1);
1085 	*(c + k * 2 + 1 + j * ldc) -= cc1 * *(a + k * 2 + 1) + cc2 * *(a + k * 2 + 0);
1086 #else
1087 	*(c + k * 2 + 0 + j * ldc) -= cc1 * *(a + k * 2 + 0) + cc2 * *(a + k * 2 + 1);
1088 	*(c + k * 2 + 1 + j * ldc) -= -cc1 * *(a + k * 2 + 1) + cc2 * *(a + k * 2 + 0);
1089 #endif
1090       }
1091 
1092     }
1093     a += m * 2;
1094   }
1095 }
1096 
1097 #endif
1098 
1099 
CNAME(BLASLONG m,BLASLONG n,BLASLONG k,FLOAT dummy1,FLOAT dummy2,FLOAT * a,FLOAT * b,FLOAT * c,BLASLONG ldc,BLASLONG offset)1100 int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT dummy1,
1101 #ifdef COMPLEX
1102 	   FLOAT dummy2,
1103 #endif
1104 	   FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG offset){
1105 
1106   FLOAT *aa, *cc;
1107   BLASLONG  kk;
1108   BLASLONG i, j, jj;
1109 
1110 #if 0
1111   fprintf(stderr, "TRSM KERNEL LT : m = %3ld  n = %3ld  k = %3ld offset = %3ld\n",
1112 	  m, n, k, offset);
1113 #endif
1114 
1115   jj = 0;
1116 
1117   j = (n >> GEMM_UNROLL_N_SHIFT);
1118 
1119 #ifdef DOUBLE
1120   int well_aligned = (GEMM_UNROLL_M==8) && (GEMM_UNROLL_N==8) && ((((unsigned long) a) & 0x7) == 0);
1121 #else
1122   int well_aligned = (GEMM_UNROLL_M==16) && (GEMM_UNROLL_N==8) && ((((unsigned long) a) & 0x7) == 0);
1123 #endif
1124 
1125   while (j > 0) {
1126 
1127     kk = offset;
1128     aa = a;
1129     cc = c;
1130 
1131     i = (m >> GEMM_UNROLL_M_SHIFT);
1132 
1133     while (i > 0) {
1134 
1135 	if (kk > 0) {
1136 	  GEMM_KERNEL(GEMM_UNROLL_M, GEMM_UNROLL_N, kk, dm1,
1137 #ifdef COMPLEX
1138 		      ZERO,
1139 #endif
1140 		      aa, b, cc, ldc);
1141 	}
1142 
1143       if (well_aligned) {
1144 #ifdef DOUBLE
1145 	solve8x8(aa + kk * GEMM_UNROLL_M * COMPSIZE,
1146 		 b  + kk * GEMM_UNROLL_N * COMPSIZE, cc, ldc);
1147 #else
1148 	solve16x8(aa + kk * GEMM_UNROLL_M * COMPSIZE,
1149 		  b  + kk * GEMM_UNROLL_N * COMPSIZE, cc, ldc);
1150 #endif
1151       }
1152       else {
1153 	solve(GEMM_UNROLL_M, GEMM_UNROLL_N,
1154 	      aa + kk * GEMM_UNROLL_M * COMPSIZE,
1155 	      b  + kk * GEMM_UNROLL_N * COMPSIZE,
1156 	      cc, ldc);
1157       }
1158 
1159       aa += GEMM_UNROLL_M * k * COMPSIZE;
1160       cc += GEMM_UNROLL_M     * COMPSIZE;
1161       kk += GEMM_UNROLL_M;
1162       i --;
1163     }
1164 
1165     if (m & (GEMM_UNROLL_M - 1)) {
1166       i = (GEMM_UNROLL_M >> 1);
1167       while (i > 0) {
1168 	if (m & i) {
1169 	    if (kk > 0) {
1170 	      GEMM_KERNEL(i, GEMM_UNROLL_N, kk, dm1,
1171 #ifdef COMPLEX
1172 			  ZERO,
1173 #endif
1174 			  aa, b, cc, ldc);
1175 	    }
1176 	  solve(i, GEMM_UNROLL_N,
1177 		aa + kk * i             * COMPSIZE,
1178 		b  + kk * GEMM_UNROLL_N * COMPSIZE,
1179 		cc, ldc);
1180 
1181 	  aa += i * k * COMPSIZE;
1182 	  cc += i     * COMPSIZE;
1183 	  kk += i;
1184 	}
1185 	i >>= 1;
1186       }
1187     }
1188 
1189     b += GEMM_UNROLL_N * k   * COMPSIZE;
1190     c += GEMM_UNROLL_N * ldc * COMPSIZE;
1191     j --;
1192     jj += GEMM_UNROLL_M;
1193   }
1194 
1195   if (n & (GEMM_UNROLL_N - 1)) {
1196 
1197     j = (GEMM_UNROLL_N >> 1);
1198     while (j > 0) {
1199       if (n & j) {
1200 
1201 	kk = offset;
1202 	aa = a;
1203 	cc = c;
1204 
1205 	i = (m >> GEMM_UNROLL_M_SHIFT);
1206 
1207 	while (i > 0) {
1208 	  if (kk > 0) {
1209 	    GEMM_KERNEL(GEMM_UNROLL_M, j, kk, dm1,
1210 #ifdef COMPLEX
1211 			ZERO,
1212 #endif
1213 			aa,
1214 			b,
1215 			cc,
1216 			ldc);
1217 	  }
1218 
1219 	  solve(GEMM_UNROLL_M, j,
1220 		aa + kk * GEMM_UNROLL_M * COMPSIZE,
1221 		b  + kk * j             * COMPSIZE, cc, ldc);
1222 
1223 	  aa += GEMM_UNROLL_M * k * COMPSIZE;
1224 	  cc += GEMM_UNROLL_M     * COMPSIZE;
1225 	  kk += GEMM_UNROLL_M;
1226 	  i --;
1227 	}
1228 
1229 	if (m & (GEMM_UNROLL_M - 1)) {
1230 	  i = (GEMM_UNROLL_M >> 1);
1231 	  while (i > 0) {
1232 	    if (m & i) {
1233 	      if (kk > 0) {
1234 		GEMM_KERNEL(i, j, kk, dm1,
1235 #ifdef COMPLEX
1236 			    ZERO,
1237 #endif
1238 			    aa,
1239 			    b,
1240 			    cc,
1241 			    ldc);
1242 	      }
1243 
1244 	      solve(i, j,
1245 		    aa + kk * i * COMPSIZE,
1246 		    b  + kk * j * COMPSIZE, cc, ldc);
1247 
1248 	      aa += i * k * COMPSIZE;
1249 	      cc += i     * COMPSIZE;
1250 	      kk += i;
1251 	      }
1252 	    i >>= 1;
1253 	  }
1254 	}
1255 
1256 	b += j * k   * COMPSIZE;
1257 	c += j * ldc * COMPSIZE;
1258       }
1259       j >>= 1;
1260     }
1261   }
1262 
1263   return 0;
1264 }
1265