1
2 /*
3 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19 #if defined(TARGET_LINUX_POWER)
20 #error "Source cannot be compiled for POWER architectures"
21 #include "xmm2altivec.h"
22 #else
23 #include <immintrin.h>
24 #endif
25 #include "pow_defs.h"
26
27 extern "C" __m256 __fvs_pow_fma3_256(__m256 const, __m256 const);
28
29
__pgm_pow_vec256_dp_special_cases(__m256 res_exp,__m256 const a,__m256 const b)30 __m256 __attribute__ ((noinline)) __pgm_pow_vec256_dp_special_cases(__m256 res_exp, __m256 const a, __m256 const b)
31 {
32 __m256i abs_mask = _mm256_set1_epi32(D_SIGN_MASK2);
33 __m256i pos_inf = _mm256_set1_epi32(D_POS_INF);
34 __m256i sign_mask = _mm256_set1_epi32(D_SIGN_MASK);
35 __m256i neg_inf = _mm256_set1_epi32(D_NEG_INF);
36 __m256i nan = _mm256_set1_epi32(D_NAN);
37 __m256i neg_nan = _mm256_set1_epi32(D_NEG_NAN);
38 __m256 MINUS_ONE_F_VEC = _mm256_set1_ps(D_MINUS_ONE_F);
39 __m256 MINUS_ZERO_F_VEC = _mm256_set1_ps(D_MINUS_ZERO_F);
40 __m256 const ONE_F_VEC = _mm256_set1_ps(D_ONE_F);
41 __m256 const ZERO_F_VEC = _mm256_setzero_ps();
42
43
44 __m256i b_is_nan = (__m256i)_mm256_cmp_ps(b, b, _CMP_NEQ_UQ);
45 __m256i a_is_nan = (__m256i)_mm256_cmp_ps(a,a, _CMP_NEQ_UQ);
46 __m256i a_is_neg = (__m256i)_mm256_cmp_ps(a, ZERO_F_VEC, _CMP_LT_OS);
47 int a_is_neg_flag = _mm256_movemask_epi8((__m256i)a_is_neg);
48
49 __m256i b_is_integer = (__m256i)_mm256_cmp_ps(b, _mm256_floor_ps(b), _CMP_EQ_OQ);
50 int b_is_integer_flag = _mm256_movemask_epi8((__m256i)b_is_integer);
51
52 __m256i b_is_odd_integer = _mm256_and_si256(b_is_integer,
53 _mm256_cmpeq_epi32(
54 _mm256_and_si256(_mm256_cvtps_epi32(b), _mm256_set1_epi32(0x1)),
55 _mm256_set1_epi32(0x1)));
56
57 __m256i b_is_even_integer = _mm256_and_si256(b_is_integer,
58 _mm256_cmpeq_epi32(
59 _mm256_and_si256(_mm256_cvtps_epi32(b), _mm256_set1_epi32(0x1)),
60 _mm256_set1_epi32(0x0)));
61
62 __m256i b_is_lt_zero = (__m256i)_mm256_cmp_ps(b, ZERO_F_VEC, _CMP_LT_OS);
63 int b_is_lt_zero_flag = _mm256_movemask_epi8((__m256i)b_is_lt_zero);
64
65 __m256i b_is_gt_zero = (__m256i)_mm256_cmp_ps(b, ZERO_F_VEC, _CMP_GT_OS);
66
67 __m256i b_is_odd_integer_lt_zero = _mm256_and_si256( b_is_integer,
68 (__m256i)_mm256_cmp_ps(b, ZERO_F_VEC, _CMP_LT_OS));
69
70 __m256i b_is_odd_integer_gt_zero = _mm256_and_si256( b_is_integer,
71 (__m256i)_mm256_cmp_ps(b, ZERO_F_VEC, _CMP_GT_OS));
72
73 __m256i change_sign_mask = _mm256_and_si256(a_is_neg, b_is_odd_integer);
74 __m256 changed_sign = _mm256_xor_ps( res_exp, (__m256)sign_mask);
75 res_exp = _mm256_or_ps( _mm256_and_ps((__m256)change_sign_mask, (__m256)changed_sign), _mm256_andnot_ps((__m256)change_sign_mask, res_exp));
76
77 __m256i return_neg_nan_mask = _mm256_andnot_si256(b_is_integer, a_is_neg);
78 res_exp = _mm256_or_ps( _mm256_and_ps((__m256)return_neg_nan_mask, (__m256)neg_nan), _mm256_andnot_ps((__m256)return_neg_nan_mask, res_exp));
79
80 __m256i return_nan_mask = _mm256_or_si256( b_is_nan, a_is_nan);
81
82 __m256 b_as_nan = _mm256_or_ps( _mm256_and_ps(
83 (__m256)b_is_nan, _mm256_or_ps(b, (__m256)_mm256_set1_epi32(0x00400000))),
84 _mm256_andnot_ps((__m256)b_is_nan, res_exp));
85
86 __m256 a_as_nan = _mm256_or_ps( _mm256_and_ps(
87 (__m256)a_is_nan, _mm256_or_ps(a, (__m256)_mm256_set1_epi32(0x00400000))),
88 _mm256_andnot_ps((__m256)a_is_nan, res_exp));
89
90 __m256 nan_to_return = _mm256_and_ps((__m256)b_is_nan, b_as_nan);
91 nan_to_return = _mm256_or_ps( _mm256_and_ps((__m256)a_is_nan, a_as_nan), _mm256_andnot_ps((__m256)a_is_nan,nan_to_return));
92
93
94
95 res_exp = _mm256_or_ps( _mm256_and_ps((__m256)return_nan_mask, (__m256)nan_to_return), _mm256_andnot_ps((__m256)return_nan_mask, res_exp));
96
97 __m256i b_is_neg_inf = _mm256_cmpeq_epi32( (__m256i)b, neg_inf);
98 __m256i b_is_pos_inf = _mm256_cmpeq_epi32( (__m256i)b, pos_inf);
99 __m256i b_is_any_inf = _mm256_or_si256( b_is_pos_inf, b_is_neg_inf);
100
101 __m256i a_is_neg_inf = _mm256_cmpeq_epi32( (__m256i)a, neg_inf);
102 __m256i a_is_pos_inf = _mm256_cmpeq_epi32( (__m256i)a, pos_inf);
103 __m256i a_is_any_inf = _mm256_or_si256( a_is_pos_inf, a_is_neg_inf);
104
105 __m256i a_is_pos_zero = _mm256_cmpeq_epi32( (__m256i)a, (__m256i)ZERO_F_VEC);
106 __m256i a_is_neg_zero = _mm256_cmpeq_epi32( (__m256i)a, (__m256i)MINUS_ZERO_F_VEC);
107 __m256i a_is_any_zero = _mm256_or_si256(a_is_pos_zero, a_is_neg_zero);
108 int a_is_any_zero_flag = _mm256_movemask_epi8((__m256i)a_is_any_zero);
109
110 __m256i abs_a = _mm256_and_si256( (__m256i)a, abs_mask);
111 __m256 abs_a_lt_one = _mm256_cmp_ps( (__m256)abs_a, ONE_F_VEC, _CMP_LT_OS);
112 __m256 abs_a_gt_one = _mm256_cmp_ps( (__m256)abs_a, ONE_F_VEC, _CMP_GT_OS);
113
114 __m256i a_is_one_mask = _mm256_cmpeq_epi32( (__m256i)a, (__m256i)ONE_F_VEC);
115 __m256i a_is_minus_one_mask = _mm256_cmpeq_epi32( (__m256i)a, (__m256i)MINUS_ONE_F_VEC);
116
117 __m256i return_1_mask = _mm256_or_si256( a_is_one_mask, (__m256i)_mm256_cmp_ps( b, ZERO_F_VEC, _CMP_EQ_OQ));
118 return_1_mask = _mm256_or_si256(return_1_mask, _mm256_and_si256( a_is_minus_one_mask, b_is_any_inf));
119 return_1_mask = _mm256_or_si256(return_1_mask, _mm256_and_si256( a_is_minus_one_mask, b_is_even_integer));
120
121
122 res_exp = _mm256_or_ps( _mm256_and_ps((__m256)return_1_mask, ONE_F_VEC), _mm256_andnot_ps((__m256)return_1_mask, res_exp));
123
124
125 __m256i return_minus_1_mask = _mm256_and_si256( a_is_minus_one_mask, b_is_odd_integer );
126 res_exp = _mm256_or_ps( _mm256_and_ps((__m256)return_minus_1_mask, MINUS_ONE_F_VEC), _mm256_andnot_ps((__m256)return_minus_1_mask, res_exp));
127
128
129
130 __m256i return_neg_zero_mask = _mm256_and_si256(a_is_neg_inf,
131 b_is_odd_integer_lt_zero);
132 return_neg_zero_mask = _mm256_or_si256(return_neg_zero_mask, _mm256_and_si256( a_is_neg_zero, b_is_odd_integer_gt_zero));
133 res_exp = _mm256_or_ps( _mm256_and_ps((__m256)return_neg_zero_mask, MINUS_ZERO_F_VEC), _mm256_andnot_ps((__m256)return_neg_zero_mask, res_exp));
134
135
136
137
138 __m256i return_pos_zero_mask = _mm256_and_si256( (__m256i)abs_a_gt_one, b_is_neg_inf);
139 return_pos_zero_mask = _mm256_or_si256(return_pos_zero_mask, _mm256_and_si256( (__m256i)abs_a_lt_one, b_is_pos_inf));
140 return_pos_zero_mask = _mm256_or_si256(return_pos_zero_mask, _mm256_and_si256( (__m256i)a_is_neg_inf, _mm256_andnot_si256(b_is_odd_integer, b_is_lt_zero)));
141 return_pos_zero_mask = _mm256_or_si256(return_pos_zero_mask, _mm256_and_si256( a_is_pos_zero, b_is_odd_integer_gt_zero));
142 return_pos_zero_mask = _mm256_or_si256(return_pos_zero_mask, _mm256_and_si256( a_is_any_zero, _mm256_andnot_si256(b_is_odd_integer, b_is_gt_zero)));
143 return_pos_zero_mask = _mm256_or_si256(return_pos_zero_mask, _mm256_and_si256( a_is_pos_inf, b_is_lt_zero));
144
145
146 res_exp = _mm256_or_ps( _mm256_and_ps((__m256)return_pos_zero_mask, ZERO_F_VEC), _mm256_andnot_ps((__m256)return_pos_zero_mask, res_exp));
147
148 __m256i return_neg_inf_mask = _mm256_and_si256(a_is_neg_inf, _mm256_and_si256(b_is_odd_integer, b_is_gt_zero));
149 return_neg_inf_mask= _mm256_or_si256(return_neg_inf_mask, _mm256_and_si256(a_is_neg_zero, b_is_odd_integer_lt_zero));
150
151 res_exp = _mm256_or_ps( _mm256_and_ps((__m256)return_neg_inf_mask, (__m256)neg_inf), _mm256_andnot_ps((__m256)return_neg_inf_mask, res_exp));
152
153
154 __m256i return_pos_inf_mask = _mm256_and_si256( (__m256i)abs_a_lt_one, b_is_neg_inf);
155 return_pos_inf_mask= _mm256_or_si256(return_pos_inf_mask, _mm256_and_si256( (__m256i)abs_a_gt_one, b_is_pos_inf));
156 return_pos_inf_mask= _mm256_or_si256(return_pos_inf_mask, _mm256_and_si256(a_is_pos_zero, b_is_odd_integer_lt_zero));
157 return_pos_inf_mask= _mm256_or_si256(return_pos_inf_mask, _mm256_and_si256(a_is_neg_inf, _mm256_andnot_si256(b_is_odd_integer, b_is_gt_zero)));
158 return_pos_inf_mask= _mm256_or_si256(return_pos_inf_mask, _mm256_and_si256(a_is_any_zero, _mm256_andnot_si256(b_is_odd_integer, b_is_lt_zero)));
159 return_pos_inf_mask= _mm256_or_si256(return_pos_inf_mask, _mm256_and_si256(a_is_pos_inf, b_is_gt_zero));
160
161 res_exp = _mm256_or_ps( _mm256_and_ps((__m256)return_pos_inf_mask, (__m256)pos_inf), _mm256_andnot_ps((__m256)return_pos_inf_mask, res_exp));
162
163 /*
164 * Before returning see if we need to set any of the processor
165 * exception flags.
166 *
167 * Domain error: a is negative, and b is a finite noninteger
168 * we need to raise the Invalid-Operation flag. This can be done by
169 * taking the square root of a negative number.
170 *
171 * Pole error: a is zero and b is negative we need to raise the
172 * divide by zero flag. This can be done by dividing by zero.
173 */
174
175 if (a_is_neg_flag && (!b_is_integer_flag)) {
176 __m256 volatile invop = _mm256_sqrt_ps(a);
177 }
178
179 if (a_is_any_zero_flag && b_is_lt_zero_flag) {
180 __m256 volatile divXzero = _mm256_div_ps(ONE_F_VEC,ZERO_F_VEC);
181 }
182
183 return res_exp;
184 }
185
__fvs_pow_fma3_256(__m256 const a,__m256 const b)186 __m256 __fvs_pow_fma3_256(__m256 const a, __m256 const b)
187 {
188 // fpminimax(log2(x),10,[|double...|],[0.5;0.9999999],relative);
189 __m256d const LOG_C0_VEC = _mm256_set1_pd(LOG_C0);
190 __m256d const LOG_C1_VEC = _mm256_set1_pd(LOG_C1);
191 __m256d const LOG_C2_VEC = _mm256_set1_pd(LOG_C2);
192 __m256d const LOG_C3_VEC = _mm256_set1_pd(LOG_C3);
193 __m256d const LOG_C4_VEC = _mm256_set1_pd(LOG_C4);
194 __m256d const LOG_C5_VEC = _mm256_set1_pd(LOG_C5);
195 __m256d const LOG_C6_VEC = _mm256_set1_pd(LOG_C6);
196 __m256d const LOG_C7_VEC = _mm256_set1_pd(LOG_C7);
197 __m256d const LOG_C8_VEC = _mm256_set1_pd(LOG_C8);
198 __m256d const LOG_C9_VEC = _mm256_set1_pd(LOG_C9);
199 __m256d const LOG_C10_VEC = _mm256_set1_pd(LOG_C10);
200
201 // fpminimax(exp(x*0.6931471805599453094172321214581765680755001343602552),6,[|double...|],[-0.5,0.5],relative);
202 __m256d const EXP_C0_VEC = _mm256_set1_pd(EXP_C0);
203 __m256d const EXP_C1_VEC = _mm256_set1_pd(EXP_C1);
204 __m256d const EXP_C2_VEC = _mm256_set1_pd(EXP_C2);
205 __m256d const EXP_C3_VEC = _mm256_set1_pd(EXP_C3);
206 __m256d const EXP_C4_VEC = _mm256_set1_pd(EXP_C4);
207 __m256d const EXP_C5_VEC = _mm256_set1_pd(EXP_C5);
208 __m256d const EXP_C6_VEC = _mm256_set1_pd(EXP_C6);
209
210 __m256 const ONE_F_VEC = _mm256_set1_ps(D_ONE_F);
211 __m256 const ZERO_F_VEC = _mm256_setzero_ps();
212 __m256i const ALL_ONES_EXPONENT = _mm256_set1_epi32(D_ALL_ONES_EXPONENT);
213
214 __m256i const bit_mask2 = _mm256_set1_epi32(D_BIT_MASK2);
215 __m256i exp_offset = _mm256_set1_epi32(D_EXP_OFFSET);
216 __m256i const offset = _mm256_set1_epi32(D_OFFSET);
217
218 __m256d const EXP_HI_VEC = _mm256_set1_pd(EXP_HI);
219 __m256d const EXP_LO_VEC = _mm256_set1_pd(EXP_LO);
220 __m256d const DBL2INT_CVT_VEC= _mm256_set1_pd(DBL2INT_CVT);
221
222 __m256 const TWO_TO_M126_F_VEC = _mm256_set1_ps(0x1p-126f);
223 __m256i const U24_VEC = _mm256_set1_epi32(D_U24);
224 __m256 const TWO_TO_24_F_VEC = _mm256_set1_ps(D_TWO_TO_24_F);
225 __m256i sign_mask2 = _mm256_set1_epi32(D_SIGN_MASK2);
226
227 __m256 a_compute = _mm256_and_ps(a, (__m256)sign_mask2);
228
229 __m256 res;
230 __m128 b_hi = _mm256_extractf128_ps(b, 1);
231 __m128 b_lo = _mm256_extractf128_ps(b, 0);
232
233 __m256d b_hi_d = _mm256_cvtps_pd(b_hi);
234 __m256d b_lo_d = _mm256_cvtps_pd(b_lo);
235
236 __m256 mask = (__m256)_mm256_cmp_ps((__m256)a_compute, TWO_TO_M126_F_VEC, _CMP_LT_OS);
237 int moved_mask = _mm256_movemask_ps(mask);
238 if (moved_mask) {
239 a_compute= _mm256_or_ps( _mm256_and_ps(mask, _mm256_mul_ps(a_compute, TWO_TO_24_F_VEC)), _mm256_andnot_ps(mask,a_compute));
240 exp_offset = _mm256_add_epi32(exp_offset, _mm256_and_si256((__m256i)mask, U24_VEC));
241 }
242
243 __m256i e_int = _mm256_sub_epi32(_mm256_srli_epi32( (__m256i)a_compute, 23), exp_offset);
244
245 __m128i e_int_hi = _mm256_extracti128_si256(e_int, 1);
246 __m128i e_int_lo = _mm256_extracti128_si256(e_int, 0);
247
248 __m256d e_hi = _mm256_cvtepi32_pd(e_int_hi);
249 __m256d e_lo = _mm256_cvtepi32_pd(e_int_lo);
250
251
252 __m256 detect_inf_nan = _mm256_add_ps(a_compute, b);
253 __m256i overridemask = _mm256_cmpeq_epi32( (__m256i)a_compute, (__m256i)ONE_F_VEC);
254 overridemask = _mm256_or_si256( overridemask, (__m256i)_mm256_cmp_ps( b, ZERO_F_VEC, _CMP_EQ_OQ));
255 overridemask = _mm256_or_si256( overridemask, _mm256_cmpeq_epi32( _mm256_and_si256((__m256i)detect_inf_nan, ALL_ONES_EXPONENT), (__m256i)ALL_ONES_EXPONENT));
256 overridemask = _mm256_or_si256( overridemask, (__m256i)_mm256_cmp_ps(a, ZERO_F_VEC, _CMP_LE_OQ));
257 int reducedMask = _mm256_movemask_epi8(overridemask);
258
259 __m256 m = (__m256)_mm256_add_epi32(_mm256_and_si256( (__m256i)a_compute, bit_mask2), offset);
260 __m128 m_hi_f = _mm256_extractf128_ps(m, 1);
261 __m128 m_lo_f = _mm256_extractf128_ps(m, 0);
262 __m256d m_hi = _mm256_cvtps_pd(m_hi_f);
263 __m256d m_lo = _mm256_cvtps_pd(m_lo_f);
264
265
266 // __m256d t_hi = LOG_C0_VEC;
267 // t_hi = _mm256_fmadd_pd(t_hi, m_hi, LOG_C1_VEC);
268 // t_hi = _mm256_fmadd_pd(t_hi, m_hi, LOG_C2_VEC);
269 // t_hi = _mm256_fmadd_pd(t_hi, m_hi, LOG_C3_VEC);
270 // t_hi = _mm256_fmadd_pd(t_hi, m_hi, LOG_C4_VEC);
271 // t_hi = _mm256_fmadd_pd(t_hi, m_hi, LOG_C5_VEC);
272 // t_hi = _mm256_fmadd_pd(t_hi, m_hi, LOG_C6_VEC);
273 // t_hi = _mm256_fmadd_pd(t_hi, m_hi, LOG_C7_VEC);
274 // t_hi = _mm256_fmadd_pd(t_hi, m_hi, LOG_C8_VEC);
275 // t_hi = _mm256_fmadd_pd(t_hi, m_hi, LOG_C9_VEC);
276 // t_hi = _mm256_fmadd_pd(t_hi, m_hi, LOG_C10_VEC);
277
278 // __m256d t_lo = LOG_C0_VEC;
279 // t_lo = _mm256_fmadd_pd(t_lo, m_lo, LOG_C1_VEC);
280 // t_lo = _mm256_fmadd_pd(t_lo, m_lo, LOG_C2_VEC);
281 // t_lo = _mm256_fmadd_pd(t_lo, m_lo, LOG_C3_VEC);
282 // t_lo = _mm256_fmadd_pd(t_lo, m_lo, LOG_C4_VEC);
283 // t_lo = _mm256_fmadd_pd(t_lo, m_lo, LOG_C5_VEC);
284 // t_lo = _mm256_fmadd_pd(t_lo, m_lo, LOG_C6_VEC);
285 // t_lo = _mm256_fmadd_pd(t_lo, m_lo, LOG_C7_VEC);
286 // t_lo = _mm256_fmadd_pd(t_lo, m_lo, LOG_C8_VEC);
287 // t_lo = _mm256_fmadd_pd(t_lo, m_lo, LOG_C9_VEC);
288 // t_lo = _mm256_fmadd_pd(t_lo, m_lo, LOG_C10_VEC);
289
290
291 __m256d m2_hi = _mm256_mul_pd(m_hi, m_hi);
292 __m256d m4_hi = _mm256_mul_pd(m2_hi, m2_hi);
293
294
295 __m256d a1_hi = _mm256_fmadd_pd(m_hi, LOG_C9_VEC, LOG_C10_VEC);
296 __m256d a2_hi = _mm256_fmadd_pd(m_hi, LOG_C7_VEC, LOG_C8_VEC);
297 __m256d a3_hi = _mm256_fmadd_pd(m_hi, LOG_C5_VEC, LOG_C6_VEC);
298 __m256d a4_hi = _mm256_fmadd_pd(m_hi, LOG_C3_VEC, LOG_C4_VEC);
299 __m256d a5_hi = _mm256_fmadd_pd(m_hi, LOG_C1_VEC, LOG_C2_VEC);
300 __m256d a6_hi = _mm256_mul_pd(LOG_C0_VEC, m2_hi);
301
302
303 __m256d a7_hi = _mm256_fmadd_pd(m2_hi, a2_hi, a1_hi);
304 __m256d a8_hi = _mm256_fmadd_pd(m2_hi, a4_hi, a3_hi);
305 __m256d a9_hi = _mm256_add_pd(a5_hi, a6_hi);
306
307 __m256d a10_hi = _mm256_fmadd_pd(m4_hi, a9_hi, a8_hi);
308 __m256d t_hi = _mm256_fmadd_pd(m4_hi, a10_hi, a7_hi);
309
310 __m256d m2_lo = _mm256_mul_pd(m_lo, m_lo);
311 __m256d m4_lo = _mm256_mul_pd(m2_lo, m2_lo);
312
313 __m256d a6_lo = _mm256_mul_pd(LOG_C0_VEC, m2_lo);
314 __m256d a1_lo = _mm256_fmadd_pd(m_lo, LOG_C9_VEC, LOG_C10_VEC);
315 __m256d a2_lo = _mm256_fmadd_pd(m_lo, LOG_C7_VEC, LOG_C8_VEC);
316 __m256d a3_lo = _mm256_fmadd_pd(m_lo, LOG_C5_VEC, LOG_C6_VEC);
317 __m256d a4_lo = _mm256_fmadd_pd(m_lo, LOG_C3_VEC, LOG_C4_VEC);
318 __m256d a5_lo = _mm256_fmadd_pd(m_lo, LOG_C1_VEC, LOG_C2_VEC);
319
320 __m256d a7_lo = _mm256_fmadd_pd(m2_lo, a2_lo, a1_lo);
321 __m256d a8_lo = _mm256_fmadd_pd(m2_lo, a4_lo, a3_lo);
322 __m256d a9_lo = _mm256_add_pd(a5_lo, a6_lo);
323
324 __m256d a10_lo = _mm256_fmadd_pd(m4_lo, a9_lo, a8_lo);
325
326 __m256d t_lo = _mm256_fmadd_pd(m4_lo, a10_lo, a7_lo);
327
328 t_lo = _mm256_add_pd(e_lo, t_lo);
329 t_hi = _mm256_add_pd(e_hi, t_hi);
330
331 __m256d temp_hi = _mm256_mul_pd(b_hi_d, t_hi);
332 __m256d temp_lo = _mm256_mul_pd(b_lo_d, t_lo);
333
334 //---------exponent starts here
335 // __m256i exp_override = (__m256i)_mm256_cmp_pd( temp_hi, EXP_HI_VEC, _CMP_GT_OS);
336 // exp_override = _mm256_or_si256(exp_override, (__m256i)_mm256_cmp_pd(temp_lo, EXP_HI_VEC, _CMP_GT_OS));
337 // exp_override = _mm256_or_si256(exp_override, (__m256i)_mm256_cmp_pd(temp_hi, EXP_LO_VEC, _CMP_LT_OS));
338 // exp_override = _mm256_or_si256(exp_override, (__m256i)_mm256_cmp_pd(temp_lo, EXP_LO_VEC, _CMP_LT_OS));
339 // int exp_reduced_mask= _mm256_movemask_epi8(exp_override);
340 // if (exp_reduced_mask) {
341 // return pow_vec256_dp_slowpath(a, b);
342 // }
343
344 temp_hi = _mm256_min_pd(temp_hi,EXP_HI_VEC );
345 temp_hi = _mm256_max_pd(temp_hi,EXP_LO_VEC );
346 temp_lo = _mm256_min_pd(temp_lo,EXP_HI_VEC );
347 temp_lo = _mm256_max_pd(temp_lo,EXP_LO_VEC );
348
349 __m256d t_exp_hi = _mm256_add_pd(temp_hi, DBL2INT_CVT_VEC);
350 __m256d t_exp_lo = _mm256_add_pd(temp_lo, DBL2INT_CVT_VEC);
351
352 __m256d tt_hi = _mm256_sub_pd(t_exp_hi, DBL2INT_CVT_VEC);
353 __m256i integer_hi = _mm256_castpd_si256(t_exp_hi);
354 __m256d tt_lo = _mm256_sub_pd(t_exp_lo, DBL2INT_CVT_VEC);
355 __m256i integer_lo = _mm256_castpd_si256(t_exp_lo);
356
357 __m256d z_exp_hi = _mm256_sub_pd( temp_hi, tt_hi);
358 __m256d z_exp_lo = _mm256_sub_pd( temp_lo, tt_lo);
359
360 __m256d poly_exp_hi;
361 poly_exp_hi = EXP_C0_VEC;
362 poly_exp_hi = _mm256_fmadd_pd(poly_exp_hi, z_exp_hi, EXP_C1_VEC);
363 poly_exp_hi = _mm256_fmadd_pd(poly_exp_hi, z_exp_hi, EXP_C2_VEC);
364 poly_exp_hi = _mm256_fmadd_pd(poly_exp_hi, z_exp_hi, EXP_C3_VEC);
365 poly_exp_hi = _mm256_fmadd_pd(poly_exp_hi, z_exp_hi, EXP_C4_VEC);
366 poly_exp_hi = _mm256_fmadd_pd(poly_exp_hi, z_exp_hi, EXP_C5_VEC);
367 poly_exp_hi = _mm256_fmadd_pd(poly_exp_hi, z_exp_hi, EXP_C6_VEC);
368
369 __m256d poly_exp_lo;
370 poly_exp_lo = EXP_C0_VEC;
371 poly_exp_lo = _mm256_fmadd_pd(poly_exp_lo, z_exp_lo, EXP_C1_VEC);
372 poly_exp_lo = _mm256_fmadd_pd(poly_exp_lo, z_exp_lo, EXP_C2_VEC);
373 poly_exp_lo = _mm256_fmadd_pd(poly_exp_lo, z_exp_lo, EXP_C3_VEC);
374 poly_exp_lo = _mm256_fmadd_pd(poly_exp_lo, z_exp_lo, EXP_C4_VEC);
375 poly_exp_lo = _mm256_fmadd_pd(poly_exp_lo, z_exp_lo, EXP_C5_VEC);
376 poly_exp_lo = _mm256_fmadd_pd(poly_exp_lo, z_exp_lo, EXP_C6_VEC);
377
378 __m256i integer_poly_exp_hi = _mm256_castpd_si256(poly_exp_hi);
379 __m256i integer_poly_exp_lo = _mm256_castpd_si256(poly_exp_lo);
380 integer_hi = _mm256_slli_epi64(integer_hi, 52);
381 integer_lo = _mm256_slli_epi64(integer_lo, 52);
382 integer_poly_exp_hi = _mm256_add_epi32(integer_hi, integer_poly_exp_hi);
383 integer_poly_exp_lo = _mm256_add_epi32(integer_lo, integer_poly_exp_lo);
384
385 __m128 res_hi_f = _mm256_cvtpd_ps((__m256d)integer_poly_exp_hi);
386 __m128 res_lo_f = _mm256_cvtpd_ps((__m256d)integer_poly_exp_lo);
387 __m256 res_exp;
388 res_exp = _mm256_castps128_ps256(res_lo_f);
389 res_exp = _mm256_insertf128_ps(res_exp,res_hi_f,1);
390
391 if( __builtin_expect(reducedMask,0)) {
392 return __pgm_pow_vec256_dp_special_cases(res_exp, a, b);
393 }
394 return res_exp;
395 }
396