1 /***************************************************************************
2 Copyright (c) 2014, The OpenBLAS Project
3 All rights reserved.
4 Redistribution and use in source and binary forms, with or without
5 modification, are permitted provided that the following conditions are
6 met:
7 1. Redistributions of source code must retain the above copyright
8 notice, this list of conditions and the following disclaimer.
9 2. Redistributions in binary form must reproduce the above copyright
10 notice, this list of conditions and the following disclaimer in
11 the documentation and/or other materials provided with the
12 distribution.
13 3. Neither the name of the OpenBLAS project nor the names of
14 its contributors may be used to endorse or promote products
15 derived from this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
25 USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *****************************************************************************/
27 
28 
29 /* need a new enough GCC for avx512 support */
30 #if (( defined(__GNUC__)  && __GNUC__   > 6 && defined(__AVX2__)) || (defined(__clang__) && __clang_major__ >= 6))
31 
32 #include <immintrin.h>
33 
34 #define HAVE_KERNEL_4x4 1
35 
36 #if defined(__clang_patchlevel__) && __clang_major__ == 9 && __clang_minor__ == 0 && __clang_patchlevel__ == 0
37 #pragma clang optimize off
38 #endif
39 #if defined(__apple_build_version__) && __clang_major__ == 11 && __clang_minor__ == 0 && __clang_patchlevel__ == 3
40 #pragma clang optimize off
41 #endif
dsymv_kernel_4x4(BLASLONG from,BLASLONG to,FLOAT ** a,FLOAT * x,FLOAT * y,FLOAT * temp1,FLOAT * temp2)42 static void dsymv_kernel_4x4(BLASLONG from, BLASLONG to, FLOAT **a, FLOAT *x, FLOAT *y, FLOAT *temp1, FLOAT *temp2)
43 {
44 
45 
46 	__m256d accum_0, accum_1, accum_2, accum_3;
47 	__m256d temp1_0, temp1_1, temp1_2, temp1_3;
48 
49 	/* the 256 bit wide acculmulator vectors start out as zero */
50 	accum_0 = _mm256_setzero_pd();
51 	accum_1 = _mm256_setzero_pd();
52 	accum_2 = _mm256_setzero_pd();
53 	accum_3 = _mm256_setzero_pd();
54 
55 	temp1_0 = _mm256_broadcastsd_pd(_mm_load_sd(&temp1[0]));
56 	temp1_1 = _mm256_broadcastsd_pd(_mm_load_sd(&temp1[1]));
57 	temp1_2 = _mm256_broadcastsd_pd(_mm_load_sd(&temp1[2]));
58 	temp1_3 = _mm256_broadcastsd_pd(_mm_load_sd(&temp1[3]));
59 
60 #ifdef __AVX512CD__
61 	__m512d accum_05, accum_15, accum_25, accum_35;
62 	__m512d temp1_05, temp1_15, temp1_25, temp1_35;
63 	BLASLONG to2;
64 	int delta;
65 
66 	/* the 512 bit wide accumulator vectors start out as zero */
67 	accum_05 = _mm512_setzero_pd();
68 	accum_15 = _mm512_setzero_pd();
69 	accum_25 = _mm512_setzero_pd();
70 	accum_35 = _mm512_setzero_pd();
71 
72 	temp1_05 = _mm512_broadcastsd_pd(_mm_load_sd(&temp1[0]));
73 	temp1_15 = _mm512_broadcastsd_pd(_mm_load_sd(&temp1[1]));
74 	temp1_25 = _mm512_broadcastsd_pd(_mm_load_sd(&temp1[2]));
75 	temp1_35 = _mm512_broadcastsd_pd(_mm_load_sd(&temp1[3]));
76 
77 	delta = (to - from) & ~7;
78 	to2 = from + delta;
79 
80 
81 	for (; from < to2; from += 8) {
82 		__m512d _x, _y;
83 		__m512d a0, a1, a2, a3;
84 
85 		_y = _mm512_loadu_pd(&y[from]);
86 		_x = _mm512_loadu_pd(&x[from]);
87 
88 		a0 = _mm512_loadu_pd(&a[0][from]);
89 		a1 = _mm512_loadu_pd(&a[1][from]);
90 		a2 = _mm512_loadu_pd(&a[2][from]);
91 		a3 = _mm512_loadu_pd(&a[3][from]);
92 
93 		_y += temp1_05 * a0 + temp1_15 * a1 + temp1_25 * a2 + temp1_35 * a3;
94 
95 		accum_05 += _x * a0;
96 		accum_15 += _x * a1;
97 		accum_25 += _x * a2;
98 		accum_35 += _x * a3;
99 
100 		_mm512_storeu_pd(&y[from], _y);
101 
102 	};
103 
104 	/*
105 	 * we need to fold our 512 bit wide accumulator vectors into 256 bit wide vectors so that the AVX2 code
106 	 * below can continue using the intermediate results in its loop
107 	 */
108 	accum_0 = _mm256_add_pd(_mm512_extractf64x4_pd(accum_05, 0), _mm512_extractf64x4_pd(accum_05, 1));
109 	accum_1 = _mm256_add_pd(_mm512_extractf64x4_pd(accum_15, 0), _mm512_extractf64x4_pd(accum_15, 1));
110 	accum_2 = _mm256_add_pd(_mm512_extractf64x4_pd(accum_25, 0), _mm512_extractf64x4_pd(accum_25, 1));
111 	accum_3 = _mm256_add_pd(_mm512_extractf64x4_pd(accum_35, 0), _mm512_extractf64x4_pd(accum_35, 1));
112 
113 #endif
114 
115 	for (; from != to; from += 4) {
116 		__m256d _x, _y;
117 		__m256d a0, a1, a2, a3;
118 
119 		_y = _mm256_loadu_pd(&y[from]);
120 		_x = _mm256_loadu_pd(&x[from]);
121 
122 		/* load 4 rows of matrix data */
123 		a0 = _mm256_loadu_pd(&a[0][from]);
124 		a1 = _mm256_loadu_pd(&a[1][from]);
125 		a2 = _mm256_loadu_pd(&a[2][from]);
126 		a3 = _mm256_loadu_pd(&a[3][from]);
127 
128 		_y += temp1_0 * a0 + temp1_1 * a1 + temp1_2 * a2 + temp1_3 * a3;
129 
130 		accum_0 += _x * a0;
131 		accum_1 += _x * a1;
132 		accum_2 += _x * a2;
133 		accum_3 += _x * a3;
134 
135 		_mm256_storeu_pd(&y[from], _y);
136 
137 	};
138 
139 	/*
140 	 * we now have 4 accumulator vectors. Each vector needs to be summed up element wise and stored in the temp2
141 	 * output array. There is no direct instruction for this in 256 bit space, only in 128 space.
142 	 */
143 
144 	__m128d half_accum0, half_accum1, half_accum2, half_accum3;
145 
146 
147 	/* Add upper half to lower half of each of the four 256 bit vectors to get to four 128 bit vectors */
148 	half_accum0 = _mm_add_pd(_mm256_extractf128_pd(accum_0, 0), _mm256_extractf128_pd(accum_0, 1));
149 	half_accum1 = _mm_add_pd(_mm256_extractf128_pd(accum_1, 0), _mm256_extractf128_pd(accum_1, 1));
150 	half_accum2 = _mm_add_pd(_mm256_extractf128_pd(accum_2, 0), _mm256_extractf128_pd(accum_2, 1));
151 	half_accum3 = _mm_add_pd(_mm256_extractf128_pd(accum_3, 0), _mm256_extractf128_pd(accum_3, 1));
152 
153 	/* in 128 bit land there is a hadd operation to do the rest of the element-wise sum in one go */
154 	half_accum0 = _mm_hadd_pd(half_accum0, half_accum0);
155 	half_accum1 = _mm_hadd_pd(half_accum1, half_accum1);
156 	half_accum2 = _mm_hadd_pd(half_accum2, half_accum2);
157 	half_accum3 = _mm_hadd_pd(half_accum3, half_accum3);
158 
159 	/* and store the lowest double value from each of these vectors in the temp2 output */
160 	temp2[0] += half_accum0[0];
161 	temp2[1] += half_accum1[0];
162 	temp2[2] += half_accum2[0];
163 	temp2[3] += half_accum3[0];
164 }
165 
166 #if defined(__clang_patchlevel__) && __clang_major__ == 9 && __clang_minor__ == 0 && __clang_patchlevel__ == 0
167 #pragma clang optimize on
168 #endif
169 #if defined(__apple_build_version__) && __clang_major__ == 11 && __clang_minor__ == 0 && __clang_patchlevel__ == 3
170 #pragma clang optimize on
171 #endif
172 
173 #else
174 #include "dsymv_L_microk_haswell-2.c"
175 #endif
176